diff --git a/0002-fix-issue-for-the-4th-test.patch b/0002-fix-issue-for-the-4th-test.patch new file mode 100644 index 0000000..15dbc93 --- /dev/null +++ b/0002-fix-issue-for-the-4th-test.patch @@ -0,0 +1,111074 @@ +From e1a9850ce06e32d6ea62b43000d230ab85525e57 Mon Sep 17 00:00:00 2001 +From: Che Mingdao +Date: Wed, 15 Sep 2021 22:47:45 +0800 +Subject: [PATCH] fix issue for the 4th test + +--- + adoctor-cli/adoctor_cli/commands/diag_cmd.py | 52 +- + .../adoctor_cli/commands/fault_tree_cmd.py | 12 +- + .../adoctor_cli/commands/report_cmd.py | 4 +- + adoctor-cli/adoctor_cli/commands/stat_cmd.py | 5 +- + adoctor-cli/adoctor_cli/tests/right_tree2.xls | Bin 0 -> 19968 bytes + .../adoctor_cli/tests/test_diag_cmd.py | 24 + + .../adoctor_cli/tests/test_fault_tree_cmd.py | 2 +- + .../adoctor_cli/tests/test_report_cmd.py | 4 +- + .../adoctor_diag_scheduler/function/helper.py | 35 +- + .../function/producer.py | 17 +- + .../adoctor_diag_scheduler/function/verify.py | 1 + + .../adoctor_diag_scheduler/view.py | 15 +- + aops-cli/aops_cli/base_cmd.py | 3 - + aops-cli/aops_cli/commands/task_cmd.py | 58 +- + aops-cli/aops_cli/tests/test_task_cmd.py | 26 +- + aops-database/aops_database/proxy/check.py | 2 +- + aops-database/aops_database/proxy/diag.py | 9 +- + aops-database/conf/default.json | 3 +- + .../aops_manager/config_manager/view.py | 60 +- + .../gala_spider/templates/gala-spider.conf.j2 | 2 +- + .../roles/kafka/tasks/install_kafka.yml | 5 + + .../roles/kafka/tasks/start_kafka.yml | 10 +- + .../roles/kafka/templates/kafka.service | 15 + + .../roles/zookeeper/templates/zoo.cfg | 5 +- + .../ansible_handler/vars/gala_spider_vars.yml | 2 +- + .../aops_manager/deploy_manager/view.py | 42 +- + aops-utils/aops_utils/cli_utils.py | 19 + + .../aops_utils/excel2dict/diag_tree_dict.py | 322 + + aops-utils/aops_utils/kafka/producer.py | 6 +- + aops-utils/aops_utils/restful/status.py | 4 + + .../tests/test_excel2dict/__init__.py | 0 + .../tests/test_excel2dict/data/__init__.py | 12 + + .../data/data_for_diag/__init__.py | 12 + + .../data/data_for_diag/right_tree1.json | 1 + + .../data/data_for_diag/right_tree1.xlsx | Bin 0 -> 9279 bytes + .../data/data_for_diag/right_tree2.json | 1 + + .../data/data_for_diag/right_tree2.xlsx | Bin 0 -> 9336 bytes + .../data/data_for_diag/right_tree3.json | 1 + + .../data/data_for_diag/right_tree3.xlsx | Bin 0 -> 9744 bytes + .../data/data_for_diag/wrong_tree1.xlsx | Bin 0 -> 9419 bytes + .../data/data_for_diag/wrong_tree2.xlsx | Bin 0 -> 9416 bytes + .../data/data_for_diag/wrong_tree3.xlsx | Bin 0 -> 9328 bytes + .../data/data_for_diag/wrong_tree4.xlsx | Bin 0 -> 9281 bytes + .../test_excel2dict/test_cases/__init__.py | 0 + .../test_cases/test_diag_dict.py | 64 + + aops-utils/aops_utils/time_utils.py | 6 +- + aops-web/src/api/check.js | 10 + + aops-web/src/appCore/layouts/BasicLayout.vue | 2 +- + aops-web/src/appCore/layouts/UserLayout.vue | 13 +- + .../src/appCore/locales/lang/zh-CN/menu.js | 4 +- + .../src/appCore/locales/lang/zh-CN/user.js | 10 +- + aops-web/src/assets/Loading.gif | Bin 0 -> 399082 bytes + aops-web/src/assets/horizontal-left.png | Bin 0 -> 6221 bytes + aops-web/src/assets/loginPage.png | Bin 9353 -> 37699 bytes + aops-web/src/assets/vertical-left.png | Bin 0 -> 8457 bytes + aops-web/src/assets/vertical-left.svg | 1 + + .../components/TimeScopeSelector/index.vue | 59 + + aops-web/src/components/Uploader/index.vue | 24 +- + aops-web/src/config/router.config.js | 30 +- + aops-web/src/views/assests/HostEdition.vue | 12 +- + aops-web/src/views/assests/HostManagement.vue | 2 +- + .../assests/components/AddHostGroupModal.vue | 2 +- + .../TranscationDomainManagement.vue | 1 + + aops-web/src/views/dashboard/Dashboard.vue | 14 +- + .../src/views/diagnosis/AbnormalCheck.vue | 292 +- + aops-web/src/views/diagnosis/DiagReport.vue | 48 +- + .../src/views/diagnosis/FaultDiagnosis.vue | 104 +- + aops-web/src/views/diagnosis/FaultTrees.vue | 22 +- + .../src/views/diagnosis/RuleManagement.vue | 99 +- + .../components/AddAbnormalCheckRuleDrawer.vue | 11 +- + .../components/AddFaultDiagnosis.vue | 16 +- + .../diagnosis/components/AddFaultTree.vue | 2 +- + .../views/diagnosis/components/FaultTree.vue | 35 +- + .../src/views/networkTopo/NetworkTopo.vue | 5 +- + aops-web/src/views/task/TaskManagement.vue | 41 +- + .../src/views/task/components/AddTask.vue | 4 +- + .../src/views/task/components/AddTemplate.vue | 10 +- + aops-web/src/views/utils/DrawerView.vue | 1 + + gala-gopher/config/gala-gopher.conf | 6 +- + gala-gopher/doc/api_doc.md | 89 + + gala-gopher/doc/design_coe.md | 21 +- + .../include/linux_4.19.90-2012.4.0.0053.oe1.h | 97760 ++++++++++++++++ + .../ebpf.probe/src/nginxprobe/nginx_link.meta | 7 +- + .../ebpf.probe/src/nginxprobe/nginx_probe.c | 5 +- + .../ragdoll/controllers/confs_controller.py | 24 +- + .../ragdoll/controllers/domain_controller.py | 14 +- + gala-ragdoll/ragdoll/controllers/format.py | 4 +- + .../ragdoll/controllers/host_controller.py | 24 +- + .../controllers/management_controller.py | 53 +- + gala-ragdoll/ragdoll/parses/ini_parse.py | 2 + + gala-ragdoll/ragdoll/test/test_collect.py | 11 + + gala-spider/README.md | 6 +- + gala-spider/config/gala-spider.conf | 4 +- + gala-spider/doc/conf_introduction.md | 4 +- + gala-spider/doc/swagger.yaml | 353 + + gala-spider/spider/controllers/gala_spider.py | 60 +- + .../spider/data_process/data_to_entity.py | 72 +- + gala-spider/spider/db_agent/db_process.py | 2 +- + 98 files changed, 99810 insertions(+), 446 deletions(-) + create mode 100644 adoctor-cli/adoctor_cli/tests/right_tree2.xls + create mode 100644 aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/templates/kafka.service + create mode 100644 aops-utils/aops_utils/excel2dict/diag_tree_dict.py + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/__init__.py + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/__init__.py + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/__init__.py + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree1.json + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree1.xlsx + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree2.json + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree2.xlsx + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree3.json + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree3.xlsx + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree1.xlsx + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree2.xlsx + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree3.xlsx + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree4.xlsx + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/test_cases/__init__.py + create mode 100644 aops-utils/aops_utils/tests/test_excel2dict/test_cases/test_diag_dict.py + create mode 100644 aops-web/src/assets/Loading.gif + create mode 100644 aops-web/src/assets/horizontal-left.png + create mode 100644 aops-web/src/assets/vertical-left.png + create mode 100644 aops-web/src/assets/vertical-left.svg + create mode 100644 aops-web/src/components/TimeScopeSelector/index.vue + create mode 100644 gala-gopher/doc/api_doc.md + create mode 100644 gala-gopher/src/probes/extends/ebpf.probe/src/include/linux_4.19.90-2012.4.0.0053.oe1.h + create mode 100644 gala-ragdoll/ragdoll/test/test_collect.py + create mode 100644 gala-spider/doc/swagger.yaml + +diff --git a/adoctor-cli/adoctor_cli/commands/diag_cmd.py b/adoctor-cli/adoctor_cli/commands/diag_cmd.py +index a79cb4f..d1b93fd 100644 +--- a/adoctor-cli/adoctor_cli/commands/diag_cmd.py ++++ b/adoctor-cli/adoctor_cli/commands/diag_cmd.py +@@ -18,13 +18,13 @@ import sys + import time + + from adoctor_cli.base_cmd import BaseCommand +-from aops_utils.log.log import LOGGER ++from aops_utils.cli_utils import add_start_and_end, add_access_token, cli_request, request_without_print ++from aops_utils.conf.constant import DIAG_EXECUTE_DIAG, DIAG_GET_PROGRESS, DIAG_GET_REPORT_LIST ++from aops_utils.conf.constant import DIAG_GET_TASK + from aops_utils.restful.helper import make_diag_url + from aops_utils.restful.status import SUCCEED +-from aops_utils.conf.constant import DIAG_EXECUTE_DIAG, DIAG_GET_PROGRESS, DIAG_GET_REPORT_LIST + from aops_utils.time_utils import time_check_generate + from aops_utils.validate import name_check, str_split +-from aops_utils.cli_utils import add_start_and_end, add_access_token, cli_request + + SECONDS = 5 # polling interval + +@@ -94,6 +94,7 @@ class DiagCommand(BaseCommand): + name_check(hosts) + name_check(trees) + diag_url, header = make_diag_url(DIAG_EXECUTE_DIAG) ++ times = 4 + + pyload = { + "host_list": hosts, +@@ -101,29 +102,58 @@ class DiagCommand(BaseCommand): + "tree_list": trees, + "interval": params.interval + } +- result = cli_request('POST', diag_url, pyload, header, params.access_token) ++ result = request_without_print('POST', diag_url, pyload, header, params.access_token) + + if result.get('code') != SUCCEED: +- LOGGER.error("diag execute error") + print("diag execute error: please try again") + sys.exit(0) ++ total = result['expected_report_num'] + + print("Diagnosis task start......") + + task_id = result['task_id'] ++ print("The task id is: ", task_id) + pyload = { + "task_list": [task_id] + } ++ diag_url, header = make_diag_url(DIAG_GET_TASK) + ++ while times: ++ time.sleep(1) ++ result = request_without_print('POST', diag_url, pyload, header, params.access_token) ++ if result.get('code') != SUCCEED: ++ print("Task info query failed, please check your diag_scheduler or database.") ++ sys.exit(0) ++ if len(result.get('task_infos')) != 0: ++ ++ print("The diagnosis execution will run with:\n" ++ "hosts: {}\n" ++ "trees: {}\n" ++ "If there exists differences from your inputs," ++ "please check whether hosts and trees are valid." ++ .format(result.get('task_infos')[0].get('host_list'), ++ result.get('task_infos')[0].get('tree_list'))) ++ ++ DiagCommand.query_diag_process(pyload, params, total) ++ print("Diagnosis task complete.") ++ diag_url, header = make_diag_url(DIAG_GET_REPORT_LIST) ++ pyload = { ++ "task_id": task_id ++ } ++ return cli_request('POST', diag_url, pyload, header, params.access_token) ++ times -= 1 ++ print("There is no task can be found in diagnosis scheduler, please try again.") ++ ++ @staticmethod ++ def query_diag_process(pyload, params, total): + finished = 0 +- total = result['expected_report_num'] + wait_time = 0 + diag_url, header = make_diag_url(DIAG_GET_PROGRESS) + + while finished != total: + try: + time.sleep(SECONDS) +- result = cli_request('POST', diag_url, pyload, header, params.access_token) ++ result = request_without_print('POST', diag_url, pyload, header, params.access_token) + + if result.get('code') != SUCCEED: + print('Execution failed, please check your connection or params.') +@@ -145,11 +175,3 @@ class DiagCommand(BaseCommand): + except ConnectionError: + print("Connection failed, please try again.") + sys.exit(0) +- +- print("Diagnosis task complete.") +- +- diag_url, header = make_diag_url(DIAG_GET_REPORT_LIST) +- pyload = { +- "task_id": task_id +- } +- return cli_request('POST', diag_url, pyload, header, params.access_token) +diff --git a/adoctor-cli/adoctor_cli/commands/fault_tree_cmd.py b/adoctor-cli/adoctor_cli/commands/fault_tree_cmd.py +index 5917ae5..b52d663 100644 +--- a/adoctor-cli/adoctor_cli/commands/fault_tree_cmd.py ++++ b/adoctor-cli/adoctor_cli/commands/fault_tree_cmd.py +@@ -20,9 +20,9 @@ import json + from adoctor_cli.base_cmd import BaseCommand + from aops_utils.restful.helper import make_diag_url + from aops_utils.conf.constant import DIAG_IMPORT_TREE, DIAG_GET_TREE, DIAG_DELETE_TREE +-from aops_utils.readconfig import read_json_config_file + from aops_utils.validate import name_check, str_split + from aops_utils.cli_utils import cli_request, add_access_token ++from aops_utils.excel2dict.diag_tree_dict import generate_tree_dict, PaintingRuleError + + + class FaultreeCommand(BaseCommand): +@@ -114,11 +114,15 @@ class FaultreeCommand(BaseCommand): + print("Please try again with valid --tree_list .") + sys.exit(0) + if params.conf is not None: +- conf = read_json_config_file(params.conf) ++ try: ++ json_file = generate_tree_dict(params.conf) ++ except PaintingRuleError as err: ++ print("The diag tree fild import failed. %s" % err) ++ sys.exit(0) + else: + print('conf must be included in add command, please try again') + sys.exit(0) +- if conf is None: ++ if json_file is None: + print("The config file is None, please import a valid config file.") + sys.exit(0) + diag_url, header = make_diag_url(DIAG_IMPORT_TREE) +@@ -126,7 +130,7 @@ class FaultreeCommand(BaseCommand): + "trees": [ + { + "tree_name": trees[0], +- "tree_content": conf, ++ "tree_content": json_file, + "description": params.description + } + ] +diff --git a/adoctor-cli/adoctor_cli/commands/report_cmd.py b/adoctor-cli/adoctor_cli/commands/report_cmd.py +index a905cc3..5468395 100644 +--- a/adoctor-cli/adoctor_cli/commands/report_cmd.py ++++ b/adoctor-cli/adoctor_cli/commands/report_cmd.py +@@ -18,7 +18,7 @@ Class:ReportCommand + from adoctor_cli.base_cmd import BaseCommand + from aops_utils.restful.helper import make_diag_url + from aops_utils.conf.constant import DIAG_GET_REPORT_LIST, DIAG_DELETE_REPORT, DIAG_GET_REPORT +-from aops_utils.time_utils import time_check_generate ++from aops_utils.time_utils import time_check_generate, time_transfer + from aops_utils.validate import name_check, str_split + from aops_utils.cli_utils import add_page, cli_request, add_access_token, add_start_and_end + +@@ -108,7 +108,7 @@ class ReportCommand(BaseCommand): + dict: body of response + """ + params = kwargs.get('params') +- time_list = kwargs.get('time_list') ++ time_list = time_transfer(params.start, params.end) + pyload = { + "page": params.page, + "per_page": params.per_page +diff --git a/adoctor-cli/adoctor_cli/commands/stat_cmd.py b/adoctor-cli/adoctor_cli/commands/stat_cmd.py +index 08c3df9..efa5eef 100644 +--- a/adoctor-cli/adoctor_cli/commands/stat_cmd.py ++++ b/adoctor-cli/adoctor_cli/commands/stat_cmd.py +@@ -14,6 +14,7 @@ + Description: statistics method's entrance for custom commands + Class:StatCommand + """ ++import sys + + from adoctor_cli.base_cmd import BaseCommand + from aops_utils.conf.constant import CHECK_COUNT_RULE, CHECK_COUNT_RESULT +@@ -77,7 +78,9 @@ class StatCommand(BaseCommand): + Returns: + dict: body of responese + """ +- ++ if params.field is None: ++ print("please input the field of the statistics, using --field .") ++ sys.exit(0) + pyload = {} + if params.field == 'check_rule': + check_url, header = make_check_url(CHECK_COUNT_RULE) +diff --git a/adoctor-cli/adoctor_cli/tests/right_tree2.xls b/adoctor-cli/adoctor_cli/tests/right_tree2.xls +new file mode 100644 +index 0000000000000000000000000000000000000000..3c972848f2631d1e1754587ad80920f53e95d970 +GIT binary patch +literal 19968 +zcmeHP2UJu?_n)^cur%piVMUPMn0vEjg8~%$dpeKWh)X^rz +zMrU=8*u6|?~(I?DMGMJ>3K_rR9lMIqbQb{tA|5KqAl_8!% +z84g1uYXNTzv86JZ(YralZ>1dj>AeLp18zd{hsF9dp>Krrl7v&f0(y6%_j+(=$R3*K +z8m0IX2l4`P$jDr5&}0hHWgXqms7#C`PbGj?_BUt)5yr9HgE^o0t>EiL4?{Fl>0i)rk0!=HO1efl`y|APt70 +z#zu_2FX;x3CW-V{lQER&f?udrjt`Wjt~55yK^g(w$U%Kp%_Van$;1N+V&arqc@Q~I +zU!?PO7E7&}*NGyr`S2c2@a`b(PTYwrTaXwCnpKRBnUSszb_7&S6ZP`5YO>O(YOZEM +zd>-UvYgbih7zv{~G^ck?pG4wGe8u$P!`0S`pITEl@nU?zdsq^tA91cxEAF&b+@MkM +z>HWaQ4$`L3679sZ@Y{rV=H}2E?S+{+p~XvOQ`xfHK*@4V*)>U^0=f-e6~k)7J|N##gqAWrG8Fa?8VIu1o*{Oah4D8~Q4OlJts&Jxz1GR_X)SKD7|MeT2(hF+WB +zi}Kf|H&uhBXO*-)E9+669c(B+_Pm<>-fHBrqjYRJl>ZWm50{77b9S@j=nG6u4>=xk +zBvL)fuLB;N^8Z=0U)Q!X$6t-V%+%D%z{-F~*>bIT_+J@(1$5S)rW_so1K)2Lb99i0 +zd#=oz23d8%X|sk63ky@Kqc1{30nikDKF%xfX*jka-^-UT +zm3Um`@%gGvW-AcN9kMd&I6w7W5*7qn4H>t8mBg& +z#;MJxacV0RrAoYN6@_;90s&NZ&&$hGX5a+i(>MY6G)@3MO({U>5+$C>?jx17YN3QP +z8E+6uHIuEPY^AEoqHyyh)vc+dXG@AU;xqB +zPJ|+b@I?}$g(}ggmxcKS5NJ@-Oq)QW%B=Y$UzkB?)~1j_XkZwk&>@}SP%@JYrKfg)L*`2yVNcG8Fd?wcbs=)t +zmzI+VIJZoMj4ok>iaQGw_gCl&jKfJ~p-EXWseCmk%Sq)3Tk>GT05vuggY^Me_57T) +zR+?-WBw|A^4K~ElfVH8u(1s|X(1zXu8>*DG(PTq0sXR5f +z2Mq>pP!dOJ +zwh!0WRHFeRu+AE46lV}@jW!V44kZ-Y&P8B5l~fLzlogYzkp^W|QaK8xLMWkB4Fyth +z*&V^bX{Z8!5nDNGkP4?~Y!x&_Sap3*4Auc))$_Bk=EO;;EJ6vDZ6HvVlgb=U^T4ll +zh6XzVoOveo`?az&^&xvr9!@D|P3=ldDY=Gr#YtrVBaIeeT7fi4piF(Zusk*}jJ-mO +zuxUua5vs8!(~5Zp4-KZ}y0$TKh0PKs7DmMEYP-m5N}^?}&?c&MRvJ^{2CF`%3pMC4 +zdapIqN-SZh<#VuErNlj7sz8N2G$|zJ1sZEmi0f}ga8xYC%0`8fK^2FZ>PYkzQaS;^ +zR8}HmI3xiZM8QzcsUom}!~$%fN@ufK!!$vUFzo>ctC0QTNe+YeJega~GPTU%^SIi4 +zEQ~$zLFdYTLzD=nOar)VfXe`RYCBgeaIWIKESwP>l!N~5AbSRFSOWbSJJ6p?3sEC<}BAn%2;mapeRHO~b0Yjo1=~NY*Y>fto6APBg0oCTon$)z9Bv@ezN_{LJ +zCvAY3wAc-W(wc}!%XL2~3C!1_%`iw)GZdTo1!!o7xIV+~YD|LJel-{rPNNM`(5AGE +zX}{vq*=$l0lCKL=H6WqzMHTZo*e?^T$h9q%1$_h-G*B24b=(|ulYBj@a8peRiw#Il +z8WiS66e($zF9UdM_*g=uS0JpgdQ3&CM+Ns;z81jQk{0+uVI=*8Fp&&_W~7>UcnFMO +ztAko&TJ|EgG4T~izq27TV8(U{@sb8 +z=WbbC{<@=4Z?9##Ml+Jn`b^vNVw~4H`}Nn_*FWx+R#9U4;Bk)4sqYKSZ$+KV>YCf= +z^4!Va?YbfVd6(zqhR?lj4A|@ZMT?p3K7Vxajr*N8PdppzOPaiAP(hX0ozn^$zkm$| +zPdH{lZ;u=Q*|e;;U=PKlaym +zPHwkH-qccC-=h7@e)<*%Ud31#ohL@uA{8C`JGZ~+c%k8_T~00@;}1^_n)YLszUR +z$lN=igGp4bZyUWI4m7nZBxCaq{dxDbaTmpyKV7QcmH%$jU;oQ)?w7*DN5{1}ylBOV +zDR*bInzkghdu0QsYcCu2J2_kD&o5uKm@#RRz4xKbVdDqC>!3IEe%shB+k>Bu`D=dp +zeVvM&$dY>(w#oW@_w&Zr-yA)p6SH^5laPI{(+|8@$GFJ`uC=u;AC$PN|Jm(2Zts45 +zabs|Hh^N)f7l-y99ldVt2e17)$saa({yAW)QSa|x*e$3S;`!&b^sGIJ@2(UX#>E{8 +zJ>C49FOI$38sYEqI7hqxmdl~#zhAYQYr1M|=bW8Ap9P0xJ~Xh{JSAbo29rjMPLCMk +z9r4EYITOyf&vSl#FKt3QxBTPR+b%F{`r98-79A~hhQ=pd|7nV3d%$X^F;2IWocewk +zWG1w-_1Z<6x0LHy-833^??#GV +zbj+ZV2NS1OrCCk1F+C$We!IM4pJ$>$PU4jH276{cv$%P`sFBa@U5|5T4hehEJ*s}; +z){^PkpADQ<8BiI0c!cGdlP?A?ZCiPJ+!uG!8)ZM5aii^q9=9vAC6W&Pe9HZIr`}xY +zw$SaU`=W36m7K6?o-%5ViRV^b=iAZS3SU*4W>}jJ>Y$U9>b-jM1&632pFryfw_OXr +zo>+X-XVsS8qh72im{cO^s{58%+tY6AiVdx&pSe$5E=B*@a^CitNqHx<%&%CQc{GSW +zy+3{0T?CXTY)2K8N1hHfXrH +zht8!NX3r+AcJ;`ys<^YO{76+oZ27ZaPQN>|^YQC?uIJXJWJImGQt@Hxwd)ZVUs+8l +ziQg5PJNJQc+0#RB%Jx_GKA00RAl2{W&THB!?}i?D*ZJr4gY71j9bDFRzwfILeZwE> +zjrCjq^wnE)>#5tL4C5}(H)Oo}{aCpBwl9=Wq4GtBq0zClsXSnb_y9n#kK8ULMratxwpKTN6J;uXww2z@(!I_N2;WztCys;zX{H0j^rDNK8A3E53 +zqKkP^)EUPkR~sB`Y~HJRtD%EU3vRdQB_I9A;pOdmH|}?`p4sq4!@Qd>GJpyz1gMZms3H;%ik^ +z1Cvj!jj0@8-(hrQv-^8Yy|(vRwbIPSjPcF!=p7O%dG2h`uR*VO^>TK?jL|w*x(zsg;qx~2W^EdGZf;B?m(h*tx7~PTY1FQLgD(UJ)EiPBX&Y#g$$N9hc|46TlPKZ~0m+Usn0!)IkrUo6S??UAp&Ly9p6PO-om*Dv!G5#W8VLD{aLr0GaM=mbtgXjwQAs= +z{a?hV2HpDn*XSp={7P?G|MH^cw062T?{(f_u_Qe8{2zJh2SFSt<1&-evgkmm8b{Xr +zyzb#XNDE;IOshE_ALN`o=cg}@`58`qKKa1J3%Om(j@h`3^j$MI(sxxZsc_3nb+RwF +ze^TD1L`T0^HfiX#S|wdLWm49mO?gm)bKbhb;c-_S7k_Je +zJ0$wzYpZToLI-Y{v;LG~=P|$1Z@V1H+v;_{jWj)ULG<8z2`kq7-tB&1?)v17mmJRT +z`r+-*>`*B_UoaC4m0~6_x-fV7R^YP-li=|ue`sa-h4X@Hb9K@`>Sr +zObQ#{n0EfVOInle1sjEgId^^f%xH_3%x7x+!6k!>zxw8sd1Am#|J03kvSyDL4qtj? +z=nqe4hj+8;JpF3(q-_Hy%+3gXW;v*^qg$uO&fVO6{1<1|bL}vH+4&iS=(*p +ziPGDRzTOxuJ>7TTz8`PZoKDC$JD0&XZ=^n)gqXv#Z)8S#c5-@lTr0jZOda__F+^y=LM_u|VQ{xMn;T{pjC<`1~?mG%7xCVuXZ +z*Tz2nw&``-`Dt?+t-h}7yy5$nvk&hm_(pGFSW#qFe~Zu)V-lRt<(xi0;%&Znnr_cI +zW1F^I+PeO{o^MKKnr|vIIeOb`+v24Sf^WH9ZFO=@*1`E>tjiXrre-bj>oH!(D|c(a +z(nof&876PF+g|YVn)Bnz6wu$uyN +zb)lVFLaPrIs6wnzies|E66T2E4FsC0(>G?Uu)f9F;%+K0(plE*4 +zj>rn7sxaYHSp}*MJ)AETeWw#=A%<|3pu*@3ueC6EYlfJP+O(4OV3`*Ny(^bCgjw+8 +zuvvmt&TLsPGMh)63&^JM7YnP}1iC2h4vXW_^sN&t+HenL1RTo*kQjIyNq2Bm9Jr%A +z2o`tQu+0!c1~PcWIT?;^*5e^2kusP&9ypfDY~Vr~8^9zqAZZqCjr4@SBa93#LJEQ; +zq>2At4BX +z@uw+SpwkFg(c`&N7OdBp7>M9b58lxf)uas2Ba`xEVb<1k8zq^bF>pg58<)f!u`Oi9 +z9fB;d4PRmtsuH3N3O +zOqiWtyaH@tPVU0K3-)Ymi|p?dO=|p3(EvOi7x1Xw1)R(dI94`-<4*>1N}%x>(|1$2 +z6KGa4Qnlf?vQ7yxVR4l1LFu0KK7ms5frkq&c-~0dm@gsa2wl}$LVP0So<#4BfjfnU +z2Vt(fcQ+-+`p3G}KpHNFaZiRUlQo8mlcM%hCKFR;09 +zOL2ZAlb|=lxso44Ul?Ob!-$ZEmN5V=`F!{pg^-4}5Yq5l0U-^w64IbzYSB}qSjv|@F6b>+=|}W7y~iXg^fudhFY*O+;vC!*_ae!C^;L`h8W7s +z#&jTt(y}pKh@qTpOa?J*XKYLlV%Rd-m_EeN@)$!YjA_fo7#s&c48M8D7<@bpF|03) +z!S~M)!vlMa=>V@W{T&I$bZN}AI!#77akRPuJ1N&e`6AfhUJ6L6zm^g+vKtGhHcY}7 +z2n_&JOGcGqKpA+`T>@Z4?>-d5);e~vK{9K&*erO5E`(n*XtiehpDN+sxBn#zpkHNl +zi1AX%Wo-7ewZAiNEXgRjGN?6t#FGatMx2tOzg-3wdd&53LCC)cH*}BNAQS3?Ubqj;hVU1#a?%@~ +z>D7}M{5J=6;|T{o^2?}4S7u*f@I9XiDox#Zg+Y8A!Ell1vIoD +z+7WH)1{aQFXltB9(0Mqh6%Oy5?Jyk!_vGYV05rtGfH;WP4=&7?1Q&#SEDz~O!+afK +z{@VqX256xqsDVVVNh^5bukviEjAiFoZU+ae<#GWD|Dj9nX^Me1~61z~`rI-$5Dbnz0oc3qmJ4E^uYz +zUI5x7nN6(29sW&$(*B=PJ_oH0{uj4r@Vi;8`zUAyDO3ieaO-uByY|x!^he);ext7C +z 0) + + + class GetProgressSchema(Schema): +diff --git a/adoctor-diag-scheduler/adoctor_diag_scheduler/view.py b/adoctor-diag-scheduler/adoctor_diag_scheduler/view.py +index 91eb93d..5cc9b1b 100644 +--- a/adoctor-diag-scheduler/adoctor_diag_scheduler/view.py ++++ b/adoctor-diag-scheduler/adoctor_diag_scheduler/view.py +@@ -21,7 +21,7 @@ from kafka.errors import KafkaError + + from aops_utils.restful.helper import make_datacenter_url + from aops_utils.restful.response import MyResponse +-from aops_utils.restful.status import StatusCode, SUCCEED, SERVER_ERROR ++from aops_utils.restful.status import StatusCode, SUCCEED, SERVER_ERROR, TASK_EXECUTION_FAIL + from aops_utils.log.log import LOGGER + from aops_utils.kafka.kafka_exception import ProducerInitError + from aops_utils.conf.constant import DATA_ADD_DIAG_TREE, DATA_GET_DIAG_TREE, \ +@@ -157,12 +157,15 @@ class ExecuteDiag(Resource): + try: + producer = Producer(diag_configuration) + task_id, jobs_num = producer.create_msgs(args) +- LOGGER.info("%d kafka messages created." % jobs_num) +- response = StatusCode.make_response(SUCCEED) +- response["task_id"] = task_id +- response["expected_report_num"] = jobs_num + +- except (ProducerInitError, KeyError, KafkaError) as err: ++ if not task_id: ++ response = StatusCode.make_response(TASK_EXECUTION_FAIL) ++ else: ++ response = StatusCode.make_response(SUCCEED) ++ response["task_id"] = task_id ++ response["expected_report_num"] = jobs_num ++ ++ except (KeyError, KafkaError, ProducerInitError) as err: + LOGGER.error(err) + response = StatusCode.make_response(SERVER_ERROR) + response["task_id"] = None +diff --git a/aops-cli/aops_cli/base_cmd.py b/aops-cli/aops_cli/base_cmd.py +index 78c3bd7..1a624af 100644 +--- a/aops-cli/aops_cli/base_cmd.py ++++ b/aops-cli/aops_cli/base_cmd.py +@@ -14,13 +14,10 @@ + Description: Base method for custom commands + Class: BaseCommand + """ +-import sys + import argparse + from collections import namedtuple + from abc import abstractmethod + +-from aops_utils.restful.response import MyResponse +- + + class BaseCommand: + """ +diff --git a/aops-cli/aops_cli/commands/task_cmd.py b/aops-cli/aops_cli/commands/task_cmd.py +index 4480125..43e4290 100644 +--- a/aops-cli/aops_cli/commands/task_cmd.py ++++ b/aops-cli/aops_cli/commands/task_cmd.py +@@ -20,6 +20,8 @@ from aops_cli.base_cmd import BaseCommand + from aops_utils.validate import name_check, str_split + from aops_utils.conf.constant import GENERATE_TASK, DELETE_TASK, GET_TASK, EXECUTE_TASK + from aops_utils.restful.helper import make_manager_url ++from aops_utils.restful.response import MyResponse ++from aops_utils.restful.status import SUCCEED + from aops_utils.cli_utils import add_page, cli_request, add_access_token, add_query_args + + +@@ -89,9 +91,9 @@ class TaskCommand(BaseCommand): + + action_dict = { + 'generate': self.manage_requests_generate_task, +- 'execute': self.manage_requests_query_delete_execute, +- 'delete': self.manage_requests_query_delete_execute, +- 'query': self.manage_requests_query_delete_execute ++ 'execute': self.manage_requests_execute, ++ 'delete': self.manage_requests_query_delete, ++ 'query': self.manage_requests_query_delete + } + kwargs = { + "action": action, +@@ -130,9 +132,9 @@ class TaskCommand(BaseCommand): + return cli_request('POST', manager_url, pyload, header, params.access_token) + + @staticmethod +- def manage_requests_query_delete_execute(**kwargs): ++ def manage_requests_query_delete(**kwargs): + """ +- Description: Executing query or delete or excute request ++ Description: Executing query or delete request + Args: + params: Command line parameters + action: task action +@@ -146,7 +148,6 @@ class TaskCommand(BaseCommand): + + name_check(task_ids) + url_dict = { +- 'execute': [make_manager_url(EXECUTE_TASK), 'POST'], + 'delete': [make_manager_url(DELETE_TASK), 'DELETE'], + 'query': [make_manager_url(GET_TASK), 'POST'] + } +@@ -164,3 +165,48 @@ class TaskCommand(BaseCommand): + pyload['page'] = params.page + pyload['per_page'] = params.per_page + return cli_request(url_operation, manager_url, pyload, header, params.access_token) ++ ++ @staticmethod ++ def manage_requests_execute(**kwargs): ++ """ ++ Description: Executing execute request ++ Args: ++ params: Command line parameters ++ action: task action ++ Returns: ++ dict: response of the backend ++ Raises: ++ """ ++ params = kwargs.get('params') ++ task_ids = str_split(params.task_list) ++ name_check(task_ids) ++ manager_url, header = make_manager_url(GET_TASK) ++ pyload = { ++ "task_list": task_ids, ++ 'page': params.page, ++ 'per_page': params.per_page ++ } ++ header['access_token'] = params.access_token ++ task_response = MyResponse.get_response('POST', manager_url, pyload, header) ++ if task_response.get('code') != SUCCEED: ++ print("There is no such task in the system, please try again.") ++ sys.exit(0) ++ for task_info in task_response.get('task_infos'): ++ desc = task_info.get('description') ++ host_list = [] ++ for host in task_info.get('host_list'): ++ host_list.append(host.get('host_name')) ++ print("\n{}\n These tasks may change your previous configuration.\n".format(desc)) ++ print("The following host will be involved:") ++ print(host_list) ++ while True: ++ check = input("Please check if you want to continue y/n: ") ++ if check in ('y', 'Y'): ++ manager_url, header = make_manager_url(EXECUTE_TASK) ++ pyload = {"task_list": [task_info.get('task_id')]} ++ cli_request('POST', manager_url, pyload, header, params.access_token) ++ break ++ if check in ('N', 'n'): ++ break ++ print("Unknown command, please try agin with Y/y or N/n.") ++ print("\nDone.") +diff --git a/aops-cli/aops_cli/tests/test_task_cmd.py b/aops-cli/aops_cli/tests/test_task_cmd.py +index f85ad9b..9df3f59 100644 +--- a/aops-cli/aops_cli/tests/test_task_cmd.py ++++ b/aops-cli/aops_cli/tests/test_task_cmd.py +@@ -120,21 +120,35 @@ class TestTaskCli(unittest.TestCase): + args_dict['per_page'] = 20 + self.assertEqual(args_dict, mock_get_response.call_args_list[0][0][2]) + +- def test_execute_task(self): ++ @mock.patch('builtins.input') ++ def test_execute_task(self, mock_input): + print("Execute the execute task test case") + cmd = TaskCommand() + args = cmd.parser.parse_args(['task', + '--action=execute', + '--task_list=t1', + "--access_token=123321"]) ++ mock_input.return_value = 'y' + with mock.patch.object(MyResponse, "get_response") as mock_get_response: +- expected_res = { +- "code": 200, +- "msg": 'operation succeed' +- } +- mock_get_response.return_value = expected_res ++ mock_get_response.side_effect = [ ++ { ++ "code": 200, ++ "msg": 'operation succeed', ++ 'task_infos': [{ ++ "task_id": "id1", ++ "host_list": [{"host_name": "host1"}] ++ } ++ ] ++ }, ++ { ++ "code": 200, ++ "msg": 'operation succeed' ++ } ++ ] + cmd.do_command(args) + args_dict = dict() + args_list = str_split(vars(args)['task_list']) + args_dict['task_list'] = args_list ++ args_dict['page'] = 1 ++ args_dict['per_page'] = 20 + self.assertEqual(args_dict, mock_get_response.call_args_list[0][0][2]) +diff --git a/aops-database/aops_database/proxy/check.py b/aops-database/aops_database/proxy/check.py +index 6a7394b..ccfdc50 100644 +--- a/aops-database/aops_database/proxy/check.py ++++ b/aops-database/aops_database/proxy/check.py +@@ -427,7 +427,7 @@ class CheckDatabase(ElasticsearchProxy): + flag, total_page, res = self._query_or_scan( + CHECK_RESULT_INDEX, query_body, total_count, data, + ['check_item', 'data_list', 'condition', +- 'value', 'host_id', 'start', 'end']) ++ 'description', 'host_id', 'start', 'end']) + + if res[0]: + LOGGER.info("query check result succeed") +diff --git a/aops-database/aops_database/proxy/diag.py b/aops-database/aops_database/proxy/diag.py +index b490c07..0ef6613 100644 +--- a/aops-database/aops_database/proxy/diag.py ++++ b/aops-database/aops_database/proxy/diag.py +@@ -457,7 +457,8 @@ class DiagDatabase(ElasticsearchProxy): + data(dict): e.g. + { + "username": "admin", +- "status": "finshed", ++ "time_range": [1, 2], ++ "task_list": [], + "sort": "tree_name", + "direction": "asc", + "page": 1, +@@ -511,7 +512,11 @@ class DiagDatabase(ElasticsearchProxy): + """ + query_body = self._general_body(data) + time_range = data.get('time_range') +- if time_range and len(time_range) == 2: ++ task_list = data.get('task_list') ++ if task_list: ++ query_body["query"]["bool"]["must"].append( ++ {"terms": {"task_id": task_list}}) ++ elif time_range and len(time_range) == 2: + query_body["query"]["bool"]["must"].extend( + [{"range": { + "time": {"gte": time_range[0]} +diff --git a/aops-database/conf/default.json b/aops-database/conf/default.json +index 3b53633..14624d3 100644 +--- a/aops-database/conf/default.json ++++ b/aops-database/conf/default.json +@@ -4,6 +4,7 @@ + "task_id": "95c3e692ff3811ebbcd3a89d3a259eef", + "task_name": "Default deployment", + "username": "admin", ++ "description": " The default task for installing: zookeeper, kafka, prometheus, node_exporter, mysql, elasticsearch, fluentd, gala-spider, gala-gopher, gala-ragdoll.\n", + "host_list": [ + { + "host_name": "90.90.64.64", +@@ -20,4 +21,4 @@ + ] + } + ] +-} +\ No newline at end of file ++} +diff --git a/aops-manager/aops_manager/config_manager/view.py b/aops-manager/aops_manager/config_manager/view.py +index 4538fbc..fffd26d 100644 +--- a/aops-manager/aops_manager/config_manager/view.py ++++ b/aops-manager/aops_manager/config_manager/view.py +@@ -53,12 +53,12 @@ def traversal_ansible_output(status, **kwargs): + fail_files = kwargs.get('fail_files') + success_files = kwargs.get('success_files') + infos = kwargs.get('infos') +- public_ip = str(info['public_ip']) +- path = os.path.join('/home/dest/', public_ip) ++ host_name = str(info['host_name']) ++ path = os.path.join('/home/dest/', host_name) + if not res_data[status]: + return + for host in res_data[status].keys(): +- if host != public_ip: ++ if host != host_name: + continue + host_res = res_data[status][host] + if host_res['task_name'] == "Check that if the file exists": +@@ -148,31 +148,43 @@ def generate_ansible_input_json(host_infos, inventory, params): + host_infos(list): host infos get from database + inventory(InventoryBuilder): Inventory class + params(dict): params of the requests +- + Returns: +- ++ dict: The ansible output + """ + ansible_input_json = {'read_config_hosts': {}} + for info in host_infos: + + # move host dir to vars + inventory.move_host_vars_to_inventory(configuration.manager.get('HOST_VARS'), +- str(info['public_ip'])) ++ str(info['host_name'])) + + # read_config.json generate +- ansible_input_json_host = {'ansible_host': info['public_ip'], +- 'ansible_python_interpreter': '/usr/bin/python3', +- 'config_list': []} +- for item in params['infos']: +- if item['host_id'] == info['host_id']: +- for config in item['config_list']: +- ansible_input_json_host['config_list'] \ +- .append({"src": config, "dest": "/home/dest"}) +- break +- ansible_input_json['read_config_hosts'][info['public_ip']] = ansible_input_json_host ++ ansible_input_json_host = generate_host_dict(info, params) ++ ansible_input_json['read_config_hosts'][info['host_name']] = ansible_input_json_host + return ansible_input_json + + ++def generate_host_dict(info, params): ++ """ ++ Generate ansible host dict ++ Args: ++ info(dict): info of a host ++ params(dict): params of the requests ++ Returns: ++ dict: dict of ansible host ++ """ ++ ansible_input_json_host = {'ansible_host': info['public_ip'], ++ 'ansible_python_interpreter': '/usr/bin/python3', ++ 'config_list': []} ++ for item in params['infos']: ++ if item['host_id'] == info['host_id']: ++ for config in item['config_list']: ++ ansible_input_json_host['config_list'] \ ++ .append({"src": config, "dest": "/home/dest"}) ++ break ++ return ansible_input_json_host ++ ++ + def generate_yaml_vars(): + """ + generate yaml vars +@@ -211,21 +223,13 @@ class CollectConfig(Resource): + def post(): + """ + Get config +- + Args: + request(json): { +- "infos": [ +- { +- "host_id": "f", +- "config_list": ["/xx", "/exxxo"] +- }, +- { +- "host_id": "f", +- "config_list": ["/exc/hoxxame"] +- } +- ] ++ "infos": [{ ++ "host_id": "f", ++ "config_list": ["/xx", "/exxxo"] ++ }] + } +- + Returns: + dict: response body + """ +diff --git a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/gala_spider/templates/gala-spider.conf.j2 b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/gala_spider/templates/gala-spider.conf.j2 +index 5c81be0..e039f91 100644 +--- a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/gala_spider/templates/gala-spider.conf.j2 ++++ b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/gala_spider/templates/gala-spider.conf.j2 +@@ -11,7 +11,7 @@ broker = + + [table_info] + base_table_name = ["tcp_link", "lvs_link"] +-other_table_name = ["nginx_statistic" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] ++other_table_name = ["nginx_link" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] + + [option] + exclude_addr = {{ exclude_addr }} +diff --git a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/install_kafka.yml b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/install_kafka.yml +index 973edae..9a90ca7 100644 +--- a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/install_kafka.yml ++++ b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/install_kafka.yml +@@ -20,6 +20,11 @@ + name: + - kafka + ++- name: Copy kafka.service file ++ become: true ++ become_user: root ++ template: src=kafka.service dest=/usr/lib/systemd/system/kafka.service owner=root group=root mode=0644 backup=yes ++ + - name: Create Data Dir + become: true + become_user: "{{ user }}" +diff --git a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/start_kafka.yml b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/start_kafka.yml +index 15bcf47..313ef74 100644 +--- a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/start_kafka.yml ++++ b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/tasks/start_kafka.yml +@@ -1,8 +1,10 @@ +-# Checking the Service Status +-- name: Start Kafka +- shell: "cd {{ install_dir }}/bin && ./kafka-server-start.sh -daemon ../config/server.properties" +- become: yes ++#Start kafka service ++- name: Start kafka ++ become: true + become_user: "{{ user }}" ++ service: ++ name: kafka ++ state: restarted + + # Checking the Service Status + - name: Verify kafka is listening on {{ kafka_port }} +diff --git a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/templates/kafka.service b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/templates/kafka.service +new file mode 100644 +index 0000000..cfa9840 +--- /dev/null ++++ b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/kafka/templates/kafka.service +@@ -0,0 +1,15 @@ ++[Unit] ++Description=Apache Kafka server (broker) ++After=network.target zookeeper.service ++ ++[Service] ++Type=simple ++Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/java/jdk/bin" ++User={{ user }} ++Group={{ group }} ++ExecStart={{ install_dir }}/bin/kafka-server-start.sh {{ install_dir }}/config/server.properties ++ExecStop={{ install_dir }}/bin/kafka-server-stop.sh ++Restart=on-failure ++ ++[Install] ++WantedBy=multi-user.target +\ No newline at end of file +diff --git a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/zookeeper/templates/zoo.cfg b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/zookeeper/templates/zoo.cfg +index bc805f4..a02ae70 100644 +--- a/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/zookeeper/templates/zoo.cfg ++++ b/aops-manager/aops_manager/deploy_manager/ansible_handler/roles/zookeeper/templates/zoo.cfg +@@ -28,6 +28,7 @@ autopurge.snapRetainCount=100 + autopurge.purgeInterval=24 + + #Service +-{% for ip in groups.zookeeper_hosts %} +-server.{{ loop.index - 1 }}={{ ip }}:{{ leader_port }}:{{ vote_port }} ++{% for host in groups.zookeeper_hosts %} ++{% set zookeeper_ip = hostvars[host].ansible_host -%} ++server.{{ loop.index - 1 }}={{ zookeeper_ip }}:{{ leader_port }}:{{ vote_port }} + {% endfor %} +diff --git a/aops-manager/aops_manager/deploy_manager/ansible_handler/vars/gala_spider_vars.yml b/aops-manager/aops_manager/deploy_manager/ansible_handler/vars/gala_spider_vars.yml +index 974c8c9..d69afb1 100644 +--- a/aops-manager/aops_manager/deploy_manager/ansible_handler/vars/gala_spider_vars.yml ++++ b/aops-manager/aops_manager/deploy_manager/ansible_handler/vars/gala_spider_vars.yml +@@ -16,7 +16,7 @@ exclude_addr: ["1.2.3.4"] + + # basic table name, please don't delete originally contains in the list. And append items in order if necessary: + base_table_name: ["tcp_link", "lvs_link"] +-other_table_name: ["nginx_statistic" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] ++other_table_name: ["nginx_link" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] + + # gala-spider listening port + gala_spider_port: 11115 +diff --git a/aops-manager/aops_manager/deploy_manager/view.py b/aops-manager/aops_manager/deploy_manager/view.py +index c34640b..b576e57 100644 +--- a/aops-manager/aops_manager/deploy_manager/view.py ++++ b/aops-manager/aops_manager/deploy_manager/view.py +@@ -19,9 +19,10 @@ import uuid + from flask import request + from flask import jsonify + from flask_restful import Resource ++import threading + + from aops_utils.log.log import LOGGER +-from aops_utils.restful.status import StatusCode, SUCCEED, PARAM_ERROR ++from aops_utils.restful.status import StatusCode, SUCCEED, PARAM_ERROR, TASK_EXECUTION_FAIL + from aops_utils.restful.response import MyResponse + from aops_utils.restful.helper import make_datacenter_url + from aops_utils.conf.constant import DATA_ADD_TASK, DATA_ADD_TEMPLATE, DATA_DELETE_TASK,\ +@@ -160,9 +161,6 @@ class ExecuteTask(Resource): + task_list = args.get('task_list') + LOGGER.info("Start run task %s", task_list) + +- succeed_list = [] +- fail_list = [] +- + database_url = make_datacenter_url(DATA_GET_TASK) + pyload = { + "task_list": task_list, +@@ -177,24 +175,30 @@ class ExecuteTask(Resource): + if not task_info.get('host_list'): + return StatusCode.make_response(PARAM_ERROR) + for host in task_info['host_list']: +- print(configuration.manager.get('HOST_VARS'), host['host_name']) ++ LOGGER.info(configuration.manager.get('HOST_VARS'), host['host_name']) + inventory.move_host_vars_to_inventory(configuration.manager.get('HOST_VARS'), + host['host_name']) +- res = TaskRunner.run_task(task_id, HostKey.key) +- if res: +- succeed_list.append(task_id) +- LOGGER.info("task %s execute succeed", task_id) +- inventory.remove_host_vars_in_inventory() +- continue +- else: +- fail_list.append(task_id) +- LOGGER.warning("task %s execute fail", task_id) +- +- response = StatusCode.make_response(SUCCEED) +- response['succeed_list'] = succeed_list +- response['fail_list'] = fail_list ++ task_thread = threading.Thread(target=ExecuteTask.task_with_remove, ++ args=(task_id, inventory)) ++ task_thread.start() ++ if task_thread.is_alive(): ++ response = StatusCode.make_response(SUCCEED) ++ return jsonify(response) ++ response = StatusCode.make_response(TASK_EXECUTION_FAIL) ++ return jsonify(response) + +- return jsonify(response) ++ @staticmethod ++ def task_with_remove(task_id, inventory): ++ """ ++ Execute task and remove relative files after execution. ++ Args: ++ task_id(str): id of a task. ++ Returns: ++ bool: The execution flag of the task ++ """ ++ res = TaskRunner.run_task(task_id, HostKey.key) ++ inventory.remove_host_vars_in_inventory() ++ return res + + + class ImportTemplate(Resource): +diff --git a/aops-utils/aops_utils/cli_utils.py b/aops-utils/aops_utils/cli_utils.py +index fd9831a..33d8d8c 100644 +--- a/aops-utils/aops_utils/cli_utils.py ++++ b/aops-utils/aops_utils/cli_utils.py +@@ -62,6 +62,25 @@ def cli_request(action, manager_url, pyload, header, access_token=None): + return result + + ++def request_without_print(action, manager_url, pyload, header, access_token=None): ++ """ ++ cli request without print ++ Args: ++ action(str): actions of requests ++ manager_url(str): route ++ pyload(dict): request body ++ header(dict): request header ++ access_token(str): access token of users ++ ++ Returns: ++ json: response of manager ++ """ ++ if access_token is not None: ++ header['access_token'] = access_token ++ result = MyResponse.get_response(action, manager_url, pyload, header) ++ return result ++ ++ + def add_query_args(sub_parse, item_list): + """ + Add query args of the sub parse. +diff --git a/aops-utils/aops_utils/excel2dict/diag_tree_dict.py b/aops-utils/aops_utils/excel2dict/diag_tree_dict.py +new file mode 100644 +index 0000000..1ff2353 +--- /dev/null ++++ b/aops-utils/aops_utils/excel2dict/diag_tree_dict.py +@@ -0,0 +1,322 @@ ++#!/usr/bin/python3 ++# ****************************************************************************** ++# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++# licensed under the Mulan PSL v2. ++# You can use this software according to the terms and conditions of the Mulan PSL v2. ++# You may obtain a copy of Mulan PSL v2 at: ++# http://license.coscl.org.cn/MulanPSL2 ++# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++# PURPOSE. ++# See the Mulan PSL v2 for more details. ++# ******************************************************************************/ ++""" ++A tool to transfer diagnostic tree's info from excel to dict ++Needed files: xxxx.xlsx exported from MindMaster ++""" ++ ++import xlrd ++ ++ ++CONDITION_PRE = "condition:" ++ADVICE_PRE = "advice:" ++DESCRIPTION_PRE = "description:" ++ ++ ++class PaintingRuleError(Exception): ++ """ ++ Self-defined Exception class ++ """ ++ def __init__(self, error_info=''): ++ """ ++ Init PaintingRuleError exception. ++ ++ args: ++ error_info (str): Exception's error info ++ """ ++ Exception.__init__(self) ++ self.message = error_info ++ ++ def __str__(self): ++ """ ++ string of the exception ++ ++ Returns: ++ str ++ """ ++ return self.message ++ ++ ++class DiagBook: ++ """ ++ Class of diag excel workbook which contains multiple sheets ++ """ ++ def __init__(self, tree_excel_path): ++ """ ++ Init DiagBook object ++ ++ Args: ++ tree_excel_path (str): diagnose tree excel's path ++ """ ++ self.tree_workbook = xlrd.open_workbook(tree_excel_path) ++ self.diag_table = self.get_diag_table() ++ ++ def get_diag_table(self): ++ """ ++ Check the workbooks meets the standard or not ++ ++ Returns: ++ xlrd.sheet.Sheet: diag sheet ++ """ ++ if "diag" not in self.tree_workbook.sheet_names(): ++ raise PaintingRuleError("'diag' sheet is not in the Mind master's diag tree excel.") ++ return self.tree_workbook.sheet_by_name("diag") ++ ++ ++class DiagNode: ++ """ ++ Node in diagnostic tree ++ """ ++ def __init__(self, name): ++ """ ++ Init DiagNode object ++ ++ Args: ++ name (str): node's name ++ """ ++ self.name = name ++ self.diag_condition = "" ++ self.diag_description = "" ++ self.diag_advice = "" ++ self.check_item = "" ++ self.is_leaf = self._is_leaf() ++ # children ++ self.children = {} ++ self.dict = {} ++ ++ def _is_leaf(self): ++ """ ++ The node is a leaf or not ++ ++ Returns: ++ bool ++ """ ++ if '|' in self.name: ++ return True ++ return False ++ ++ def children_all_leaves(self): ++ """ ++ If the node's children all leaves ++ ++ Returns: ++ bool ++ """ ++ for child_node in self.children.values(): ++ if not child_node.is_leaf: ++ return False ++ return True ++ ++ def children_all_trees(self): ++ """ ++ If the node's children are all subtrees ++ ++ Returns: ++ bool ++ """ ++ for child_node in self.children.values(): ++ if child_node.is_leaf: ++ return False ++ return True ++ ++ def to_dict(self): ++ """ ++ Transfer Diag node to dict ++ ++ Returns: ++ dict ++ Raises: ++ PaintingRuleError ++ """ ++ if self.is_leaf: ++ self.dict["node name"] = self.name ++ self.dict["value"] = None ++ self.dict["check item"] = self.check_item ++ self.dict["msg"] = "" ++ return self.dict ++ ++ self.dict["node name"] = self.name ++ self.dict["value"] = None ++ ++ # create children's dict first for function children_all_leaves/trees ++ children = [] ++ for child_node in self.children.values(): ++ children.append(child_node.to_dict()) ++ ++ # If diag condition is not defined in the graph ++ if not self.diag_condition: ++ # Default relationship between leaves is "and" ++ if self.children_all_leaves(): ++ self.diag_condition = " && ".join(self.children.keys()) ++ # Default relationship between middle nodes is "or" ++ elif self.children_all_trees(): ++ self.diag_condition = " || ".join(self.children.keys()) ++ else: ++ raise PaintingRuleError("Node which have both leaves and subtrees as children, " ++ "must define diag condition in Mind Master") ++ ++ self.dict["condition"] = self.diag_condition ++ self.dict["description"] = self.diag_description ++ self.dict["advice"] = self.diag_advice ++ self.dict["children"] = children ++ ++ return self.dict ++ ++ ++class DiagTree: ++ """ ++ Diag tree sheet ++ """ ++ def __init__(self, diag_table): ++ """ ++ Init DiagTree object ++ ++ Args: ++ diag_table (xlrd.sheet.Sheet): diag sheet of the excel exported from MindMaster ++ """ ++ self.table = diag_table ++ self.name = self.get_tree_name() ++ self.root = DiagNode(self.name) ++ # Check items' name dictionary ++ self.eng2cn_dict = {} ++ self.cn2eng_dict = {} ++ self.__generate_tree() ++ ++ def get_tree_name(self): ++ """ ++ Get tree's name. ++ ++ Returns: ++ str ++ """ ++ return self.table.cell_value(0, 0) ++ ++ def check_one2one(self, eng_name, cn_name): ++ """ ++ Check abnormal items' English name match only one Chinese name, vice versa. ++ Args: ++ eng_name (str): Check abnormal items' English name ++ cn_name (str): Check abnormal items' Chinese name ++ ++ Raises: PaintingRuleError ++ ++ """ ++ if eng_name in self.eng2cn_dict and self.eng2cn_dict[eng_name] != cn_name: ++ raise PaintingRuleError("Check item's English name '%s' mapped to two Chinese " ++ "name: '%s', '%s'" % ++ (eng_name, self.eng2cn_dict[eng_name], cn_name)) ++ if cn_name in self.cn2eng_dict and self.cn2eng_dict[cn_name] != eng_name: ++ raise PaintingRuleError("Check item's Chinese name '%s' mapped to two English " ++ "name: '%s', '%s'" % ++ (cn_name, self.cn2eng_dict[cn_name], eng_name)) ++ ++ def create_node(self, name, parent_node): ++ """ ++ Create a new node ++ Args: ++ name (str): node's name ++ parent_node (DiagNode): node's parent node ++ ++ Returns: ++ DiagNode ++ """ ++ new_node = DiagNode(name) ++ ++ # if the new_node is a subtree ++ if new_node.is_leaf: ++ name_list = name.split('|') ++ cn_name = name_list[0].strip() ++ eng_name = name_list[1].strip() ++ ++ # Check if check item (in English) mapped with only one Chinese name, vice versa. ++ self.check_one2one(eng_name, cn_name) ++ self.eng2cn_dict[eng_name] = cn_name ++ self.cn2eng_dict[cn_name] = eng_name ++ ++ new_node.name = cn_name ++ new_node.check_item = eng_name ++ ++ parent_node.children[cn_name] = new_node ++ else: ++ parent_node.children[name] = new_node ++ ++ return new_node ++ ++ def __generate_tree(self): ++ """ ++ create diagnose tree by combining nodes ++ """ ++ row_num = self.table.nrows ++ # 7 columns of the excel are useless ++ col_num = self.table.ncols - 7 ++ ++ parent_node = self.root ++ parent_stack = [self.root] ++ current_deep = 0 ++ ++ for row in range(2, row_num): ++ for col in range(0, col_num): ++ if not self.table.cell_value(row, col): ++ continue ++ value = self.table.cell_value(row, col).strip() ++ # find parent node and switch column ++ if current_deep == col and parent_stack: ++ parent_node = parent_stack.pop() ++ elif current_deep < col: ++ current_deep = col ++ else: ++ for _ in range(current_deep - col + 1): ++ parent_node = parent_stack.pop() ++ current_deep = col ++ ++ if value.startswith(CONDITION_PRE): ++ parent_node.diag_condition = value[len(CONDITION_PRE):].strip() ++ parent_stack.append(parent_node) ++ elif value.startswith(DESCRIPTION_PRE): ++ parent_node.diag_description = value[len(DESCRIPTION_PRE):].strip() ++ parent_stack.append(parent_node) ++ elif value.startswith(ADVICE_PRE): ++ parent_node.diag_advice = value[len(ADVICE_PRE):].strip() ++ parent_stack.append(parent_node) ++ else: ++ new_node = self.create_node(value, parent_node) ++ parent_stack.append(parent_node) ++ parent_node = new_node ++ break ++ ++ def to_dict(self, node=None): ++ """ ++ Transfer diag tree to dict ++ Args: ++ node (DiagNode): node of the diagnose tree ++ Returns: ++ dict: dict of the tree/subtree expanded from specified node ++ """ ++ if not node: ++ return self.root.to_dict() ++ return node.to_dict() ++ ++ ++def generate_tree_dict(excel_path): ++ """ ++ Entrance of diagnostic tree's excel to dict ++ Args: ++ excel_path (str): excel file's path ++ ++ Returns ++ dict ++ """ ++ diag_book = DiagBook(excel_path) ++ diag_tree = DiagTree(diag_book.diag_table) ++ ++ return diag_tree.to_dict() +diff --git a/aops-utils/aops_utils/kafka/producer.py b/aops-utils/aops_utils/kafka/producer.py +index c0b2c66..d37a7c3 100644 +--- a/aops-utils/aops_utils/kafka/producer.py ++++ b/aops-utils/aops_utils/kafka/producer.py +@@ -86,8 +86,8 @@ class BaseProducer: + Args: + record_metadata (record_metadata): message's topic, partition and offset + """ +- LOGGER.info("Sent successfully. Topic: %s, Partition: %s, Offset: %s", +- record_metadata.topic, record_metadata.partition, record_metadata.offset) ++ LOGGER.debug("Sent successfully. Topic: %s, Partition: %s, Offset: %s", ++ record_metadata.topic, record_metadata.partition, record_metadata.offset) + + @staticmethod + def _send_failed(excp): +@@ -103,7 +103,7 @@ class BaseProducer: + send one message into broker + Args: + topic (str): topic of the message +- value (str): value of the message ++ value (dict): value of the message + key (str): messages with same key will be sent to same partition + partition (str): random if not specified + +diff --git a/aops-utils/aops_utils/restful/status.py b/aops-utils/aops_utils/restful/status.py +index 2400a22..2a3e928 100644 +--- a/aops-utils/aops_utils/restful/status.py ++++ b/aops-utils/aops_utils/restful/status.py +@@ -36,6 +36,7 @@ KEY_ERROR = 1203 + CHANGE_PASSWORD = 1204 + REPEAT_PASSWORD = 1205 + CHANGE_PASSWORD_FAIL = 1206 ++TASK_EXECUTION_FAIL = 1301 + + + class StatusCode: # pylint: disable=R0903 +@@ -102,6 +103,9 @@ class StatusCode: # pylint: disable=R0903 + }, + PARAM_ERROR: { + "msg": "request parameter error" ++ }, ++ TASK_EXECUTION_FAIL: { ++ "msg": "Task execution failed." + } + } + +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/__init__.py b/aops-utils/aops_utils/tests/test_excel2dict/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/__init__.py b/aops-utils/aops_utils/tests/test_excel2dict/data/__init__.py +new file mode 100644 +index 0000000..3e26da5 +--- /dev/null ++++ b/aops-utils/aops_utils/tests/test_excel2dict/data/__init__.py +@@ -0,0 +1,12 @@ ++#!/usr/bin/python3 ++# ****************************************************************************** ++# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++# licensed under the Mulan PSL v2. ++# You can use this software according to the terms and conditions of the Mulan PSL v2. ++# You may obtain a copy of Mulan PSL v2 at: ++# http://license.coscl.org.cn/MulanPSL2 ++# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++# PURPOSE. ++# See the Mulan PSL v2 for more details. ++# ******************************************************************************/ +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/__init__.py b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/__init__.py +new file mode 100644 +index 0000000..3e26da5 +--- /dev/null ++++ b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/__init__.py +@@ -0,0 +1,12 @@ ++#!/usr/bin/python3 ++# ****************************************************************************** ++# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++# licensed under the Mulan PSL v2. ++# You can use this software according to the terms and conditions of the Mulan PSL v2. ++# You may obtain a copy of Mulan PSL v2 at: ++# http://license.coscl.org.cn/MulanPSL2 ++# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++# PURPOSE. ++# See the Mulan PSL v2 for more details. ++# ******************************************************************************/ +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree1.json b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree1.json +new file mode 100644 +index 0000000..f177767 +--- /dev/null ++++ b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree1.json +@@ -0,0 +1 @@ ++{"node name": "重启类故障树(简)", "value": null, "condition": "硬件问题", "description": "", "advice": "", "children": [{"node name": "硬件问题", "value": null, "condition": "硬件问题1", "description": "", "advice": "", "children": [{"node name": "硬件问题1", "value": null, "condition": "检测项1 && 检测项2 && 检测项3 && 检测项4", "description": "", "advice": "", "children": [{"node name": "检测项1", "value": null, "check item": "check_item1", "msg": ""}, {"node name": "检测项2", "value": null, "check item": "check_item2", "msg": ""}, {"node name": "检测项3", "value": null, "check item": "check_item3", "msg": ""}, {"node name": "检测项4", "value": null, "check item": "check_item4", "msg": ""}]}]}]} +\ No newline at end of file +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree1.xlsx b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree1.xlsx +new file mode 100644 +index 0000000000000000000000000000000000000000..e0b1a39ff6fb253811053b8a1a710622937c2f9f +GIT binary patch +literal 9279 +zcmeHN1y@|jwrB*gG`Rps@bmv0|HUg%ku)mT%YrU;^X}ozHlx(n+Sl-$M}dQw +zbc#YcMdm5`$XhPqVsh`5>i +z2M;Mb@dy%~v@}n0v58)@66ov4m}LM`9UFUESq!d(j&j)gv> +zcJ}K4(!l8jfP!^YJ#HnN-K0;SFiwHY=xGnh%01-<){DM1B9oTvxOfved@@^x^}@mI +zxAMH)bb-FbL&HrxUGhi9FTCxFrr@@r#crRmuQB#oF1kd~hr38Ae8P^_hxYir4nOZ` +z2OXcK?D?@h!2$r!&rkq`zrga7Dhv4~*w$pgs6zt7Qs2qc#+jMv=kdSSSgcOj*tv!yeys0El +zdU%aoeCF)!NAyW1bwUKJ!WW^qf@uL-1G1W{Mz>WEUqqFUDnn};xpVhHlNo;VDMfoI +z!r{C!htp{|L(WF#i&fr3Hl$ZS2vwCWcr9y;vYq(IfchpjT^DaNI+%w#8Fcw{y$qrQy(BOyb +zX_u);*?lk9i0E2$HRhz5q3lC}C*y-|H=L-OP*_*p^d*s)(4JT}C;Tw>;}_#l@J2_K +zuL31turLqz56X{_P}7~UnuQXF7_zZ>zTMJL5`+~Lk-fcao(|=(nqYBdvvU6Yas+HM +zqxIYVcfrbsm4Wq*jU?%`W;@T#Zi*o5#Wt@WEp)Mq0LlVS(w296;`Dv}HCmSxJ_an5O%jXQEmnUZ1!-HSe +zz&rhSlZiLWX(xaJ03N_z7Y+Ob*kt}}EETHDb_FcxKKho=5I?dAv6v8~loF&rCAXB9 +zERP!R;8Qojj4c&Os$8wP%Sd^8?KKcla& +z5)^%pM{P&UyY!i^Af)0J{-{ZOaO-3(l&JJLMQN26pstv5%{dB`yh^A5!XzZC +zP;xqy&uPp@&dBU$X4H0d?20vg$QShre(MAMrb;dFEX_oAn7$59u`L(&`+?4E%NzPd +ziFK*6iso$<7&h0hyt4Y@4;+b>nvsY7cq~gs(J8zk@3Bq0}ys#oL$*Fly8elA#lCB~+KDu9&r!q;Q64OSmp%@UvqSkQ<$P^#v7V1ufq~C~l&(LG)||W#Nqs +z-$rtUGy}-pPit5rVSO#s16ikg98?v1i)-1|d#QExuXLgW$0>Qf^6vT(=J9ag-Xhia +zywenY?Rl=#u)5J7yQ7kfL}|~ +z@3v7(ji(@V6HmP8r1cwdg-qHwS}639*kSb=6;gf4hq{9v8n`NXb905-waZtC@LUiR +zwthKFh*x%WMVL+(ELmZO%vl)R+0cD4?>@W3ZAc!5?omXWKPG(;IfXY;Nn(*C0to|q +zMdK7ETYlJnr(SWx*rbdZ%C;2nkSCs>un%Rrwt!hyK9xF+@pVgRcWs!@AK5Natv{#= +zg1@v~>iRS!gSvb}c5-?Mhj;K|Au+%lPqbJ=jr@{DR(^9RPQ;xVz)^S-(epEPOgKpPvnBqgweKs*a#umnjU75{UF{o +zT}G3Q4-YA?q`^hZk1J@E*`deR#c+3{pJ&_sdQbV)c}vEwA7c^PHaL-q5hpUq +z|5BC2WW11ml%0Gl34fbyJt1*@nh&3E4+V{PfaGKQbQ8y087h2sUwBq-m}LL@iS%h! +z39ngBGfWf#7E-_f20dO>0mC;@rOk4c{C4-~M3u-G>>^+2N!A|c&!cW=Yq7^mHQCmlh>GOr+OV4L^Q!`F10raL_X4$upELPgyH_D0+KP4oUh%Y}OpL +zh~VzRZ`x$QQ@ATw9}<_Gg^SCfo>`)P-?c+SfGY1O$?+F$e*PLdl|e@hNTC +zQu=oaQb>#(^=OaGD~X>+^K5&IGDJYqqVQ8>}5 +zL6spA1DV`0VB7(6O%^{cAy`6)F_xDPww;wxO{!3fRh;6i9lAgFddWmoeBu6airLTm +z>a-C}C{)TL82OCEMMQO5q#fiwab%<$ +zQ7QV)Syx5fiN6F>YP^Ulbvjq8udL~P +z2>r#$H7u>7D(m$2U<*=(RZe&Nhm&b#|`N|>~dHg_5%g4brT!Aa8} +zW_?>Tb9ian*4!Bg|fS3~5NQ+~vF$W^yDC*MJhF`R)cJ +zdq)ETywmB22GaxIzTHF~vkuIDT!%Q;CBv_K{`6K%y}i9aHdpzHU8-YPQHiSNR+^8H +zUvnn?^&+HAxqDL5=4quU(l?={<$VTGS=}m*eMA{Lj}tJ=17$zl2%YhT^XRfsz^Re# +zi1Jf}HZkbHik2|09j=`7MYpJ}^h$7HHo)YgZiS_4*|i0K!|Z2DeuJ$%v(fraoQeDf +z&)a14PE_AQ&wv@E7qcM*b00QLVFN&H=mePh&vFEzf&T#!oVO9j{G6?m(AG +z>eYqwG<2z(oxn}qyjCH|Rf_>A{MjR#6_Cy+fH{Wu7@-WV-AF>p84u_LgWyQQ0=6=* +z*3LjqtjLqDjR5O2>vV5RL8ScNoKG>ZZE(P{xK2k+cYHPka@e~KyJ{d$no`-NGTZ$` +zZQoVcrL46=8Y<~93g1x1j7ZTHUr`@b6g%52SZ^^+W6DKiN@4$fo|d@d4kC*!4vxpp +z^}1O5PMFA&NT}J1q+Mg2;`+3<=qkjCG;vQ5dubYkf)O)2Nt}!FVv8ft%GI(=P~rn4 +zd9+Ji49hf2Al?Q8|9Hy1=0HnY*esiTvr#jV6Z?M5JchLxS#eVU6_xVmK7P9&pRC~D +z)01;p|G@f=-#h35+e|*IjhC<7K- +z8HHo&MZPV)x@*C&rgwOz+M+U=m0zgSt+ZUoj{CGU5zt`OUnjpKgEF1+;g-zl~CU{${q8 +zuF&pLj?t;gnyDET^9xW)U~ASV7OIN+?_wa>vu!cHuUY*AZd} +z0iETvo>r6=P+O8~v}rhdd`U}S3q{Fo)RB%@DpKwZn`aMHj=Q_b3YuONHC{tC_2fn! +z9_z(budEG@YNsQS$(^&uw%W04M4y!!c@VEtK%+}`Q?Td06S>z@ +z8|WHQz8ms}IR4^h3#N4bq5f##4>TQJ)67@?#ZKRDl=M=m<9ovb;2pNO#T%~JG+}tXBjE%y9p(sQVF^10}`kh%?u?an3s=lzA+`tSTG=teIP;LOMq>c +z?0llLyNpPw{FN!)jZ%k1nZp1Z(E~~H&fOf@yT~H +zY({5TJmn77K~OV=gF0tg7f6q`OO8p>iA)KQQjKi@{D~AKu0>K?xd}QlNaNWWWIFJbiR~dtXibd^@usHAc(tI(NJPQ4U+S +zLOTA<3{>grm!v$@#_Aa=Hb@zBV>TahKehPM%%%FfdAqJZGNAl9rowd|U^%j23fUb$ +zkjo*PG-R-Gd4g6lszBtOx_+y+8hq63*w-AenFeYiZM|2})39`CqyPK2^k^m2z^3 +z5#nL1D;bzs31p^b2a7kzj@elDa=r3|o409{VMwrcgIWtX=3H*=dJ?P} +zjc$V^tLAfV=Xhq5%**l@iNy9(~YAqH(*C3S?~yRJxy+(_A5oF;r0r +z#Dcg8qmgxHpK#JhwA-$yv~b$5z7TSJ(LVHXCS}s#w6F^@{39B}+X;O91vqcGc}{-Q +zcm#IPJx(TpI?-~&)}7l>IIAp{OMGMp90`bQ#_szlt(Q;meY#Li?bOt69~468*ck{I +ztmaq6XFlm_;EFhs$uT76m1%mIs&5+jba%ROvgVuSu}DliqvF!T(R=F+$r9ye!?hNz +zV{7-O0cBE;L>-3qEMYI~Vh}p!WGyl96|YIv+SN;+q$N=b3vWO3C(Q1iTarrO`7WKX +zVrLZ&5#b}QwTgyT2AB6wW_uIn6i(_>W(VRIt>bPPl@Fgg?}1L#_h{T~ULnCPFc+@9 +z>#Id~243ek1&ksfF^RROB>(HAdpe!lM#c5k#WA$6KOmV`c8qYFHL-)2`@QR;YAt#M +zJvYY3Zvt~$p$N~Xw3OxWao$vhBFa`OP-qBY1aqtO5S{Gq34;;)cTS;8j$LydOhH|P +z(`@|TG5wQMaJDctb#Z3?ZTcbCbrH9ph(kcdH>G<1oc`N+5H`(ys; +zQ=H`1Y&AH!+~|9MIWz{y`tseFHe_bNdV6eum?MMJtVq|%(V!Lo5v50yFPm9C=#nc7 +zRju2EOJfzLK%@zE&XNaZ&d}isjdCisJ6KjV`-5h*U2p%^iyx9EHtfJd#N6k2!!|2=qnDr8dZXToj#e|}I8l!rGH*WiJC`NPWJ@!Uzx;a~wM=4Kq +z?Tl(1IQ{kwGrZ6t1$6}kLfCg1Jc0@hlbpGARJ%BHJP{vwfGn2p6jc{8GaMjBD9B*j +zIUS;NZ%9=AUx7%@c=&12>A$)#ahZePDeb-bGW1^bMg@2 +zBzLeh3AA=5O=Evj>K^S8qq{vovI)jnonh8ECQ#QCVTZ>;&J)mFqjswlGgbV4rDnl6 +za-oiTLLcHqf!}k=PI~#HSI25RD(eAk_kTwP&ZvUNTQEmp4MqkAn4C4SH&$@6cW`Dl +zws$i9S6lIaoGjQ~L?tjRv0eV7#0HXEbp!sPUFiilv}UroP`#jvzok{jr3 +z>`2Xm69Gd>t#A(K?xw5ChwP^An9~Lf-o;w#}Kd8@TOeD@JtZ*+;9 +z?v#R$mgyyFSFGN?k!xyklRjT+R932Zas_Y%HegfBWjhv+ssnG@&tIv2@W^-3u1IyI +z?>x0d*sOZA72@`A(l_Y}g7>aBp>a~uGrvJQpd2yAoQWTMBQ?Lx)l7wuX4`V(H|DMw +zh6I#3^UDt#z}iHJFTOqW+SdO^hpewB;E)40J$!JYh5l#LGjwqHpXGs<>|Y}@vD@k^ +z3wr2DNJr3=AiceosD?>Mt>tiD9#ZSD4_(}Regnmah{dD2%jq|y_Mes4iUI`02e}qK +zGO|50()@0(_TJC-SJ~F08rQDk7zvGvg-+Z~x7d=}aS_W+h^4^mAjG(9gbPwR=IM7T +zm4Am4uT5oZa41FRRMdEvCKJQ>fnYQ?d1Mebe7PyPqpYG^+WuI}g+l(~L1r`dbFxNu +za8MrpOT3ql)s}m*lke@g@4LFR+L7`;!iL+IR#!#{{fMl!W|>QncfIyCuTgYrDN1#Y +z`NpKRbf7bASw06#sB@0d5=fPoI0@w%@_Dckk_e>sl97*N+c@P=9raYjB!|=xO)VTX +zA4URsp*m(5O${6 +z3MSzRLLdhc4VnBnTv=k|IApKVbe{CxNTkz-n9*mSfwU~bu{A;7_&w_2>KFa^!cKcP&uickF +zG@XD?qx{l~`BnH=()kZzPUPQ&|3*Ros^M2I^bZZ|;93m)^S?5qzbg2(diz6xJK{4)0L>Qwz~4gbui}4ojDHrNef1~tf4NBo +VX&CVC0sx5Mk3Tqymy-Vc_J4lKD@OnT + +literal 0 +HcmV?d00001 + +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree2.json b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree2.json +new file mode 100644 +index 0000000..190135f +--- /dev/null ++++ b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree2.json +@@ -0,0 +1 @@ ++{"node name": "重启类故障树(简)", "value": null, "condition": "硬件问题", "description": "", "advice": "", "children": [{"node name": "硬件问题", "value": null, "condition": "硬件问题1", "description": "", "advice": "", "children": [{"node name": "硬件问题1", "value": null, "condition": "检测项1 || 硬件子问题1", "description": "", "advice": "", "children": [{"node name": "检测项1", "value": null, "check item": "check_item1", "msg": ""}, {"node name": "硬件子问题1", "value": null, "condition": "检测项2 && 检测项3", "description": "", "advice": "", "children": [{"node name": "检测项2", "value": null, "check item": "check_item2", "msg": ""}, {"node name": "检测项3", "value": null, "check item": "check_item3", "msg": ""}]}]}]}]} +\ No newline at end of file +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree2.xlsx b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree2.xlsx +new file mode 100644 +index 0000000000000000000000000000000000000000..f76c6ea8df452959267476f65270a8b71e3085b5 +GIT binary patch +literal 9336 +zcmeHN1zTI$)(#G(xJ!ZH7Tn#TP^3^?i#vrN#UZ5xcPJF6MT@jRp}4!dLxBRtT}qHI +zow@hUOlQ7daPQ9ZoSi&5YbEFG_g#CfcO4BCBxFJWDgYe-0005z2bs1e2mnAFG5|mT +zKu0u`b#ZpLa&|X;=KaD7Y|Qb}$&n@x8Id^$fCxYTf8)RS4wNPhsC97R%3mto$bJJU +zFIGyS^Y24?@L8Wqw00(Tmsl8P+1cG^g~M=Va*6B(KjDuo`tlqP*;YC`)j-0!8#EtA +z2KTmVn@|XHcJ=Hrw2+V`xEbgl<`7ayag&*vMq8%?QeA61H2LIKrIky`ZHb5_g4Q$V +zdvx$ty6Ya67!(QjF0_4B(^Mx$aT^}Quw6|mNH91`eJw}eC^<5S?i< +zYu344zfoxhGk@WoGP6Kj<_e8#!d@C)L2YOjq*CQ0uPlx@D>3Mfb6{)nGxGCkVUCGB +z)3~k|AhIcZ10mzH01b!8YT_~;=Mn!-Nuqa829DZkmS7C41eBdC6qfZ_v2m7a +zq_j@#U-Nsi_eM7iUmYB$YzOk(p#lK+_ecPZztHltHW%F)9BWVDtiy!U($vk$5zNW)^Y}kH +z{ugubPnTYrpsL=%^)U2M=?2<+IkgmrE2H8it<(T~77+A!5$ALCJ9>(x7A8vEXXLMt +z6a$+AZu%FN#G-e4ffs9hrIGlAqI5N0Wub4bT|LlP8QoG8TuWCw@jR!_r!LYImA#og +zn_^i%d@0IR>06;wm^haEgge5aOO8R1PZ>%qo)&D-t*XClepQYzC8M=p7W%PPFlQ%m +zBt38@rC=LNGEDf%-gp{OFWB5>uH3KJ5qSQCTwBXl*zTiwmYWEzx2dIL>#1CN3&E`) +zNG+pRla>$vif2Tzhau-Y(4d;{cp%feQxK!$L+?=^B$B#l8NNgPog~2$NpDW!ccdOp +z5^?}K!b?Za-+AKY;^tuP;^Odg_xd+y5a3%F+{=IWDAiO|>Eyy~!n}iWd8K<2;mx^o +z((Y;R;-L3cvn((Ic>~T?C|K)_^`=!gkz7K(4~Kd@FZr?8kOLe0+*|P|RHm{f&$e`~EXywl8#zXn+hq*j> +z?7^dt`r()vsQwmo9ip{Y2C1&CrFqL@y>VYByKsS;4e_QPJ0-(-EQaL`82dg6WV>pU +zzd%1}ys=jG9(HJZcl~gK@`DOh3BR~ss5=d`SCKl9B>}vVm)4+txauwg>t|_MIAI?+ +zJGB1R*E6*OU+KR?CWa7bPmTlt+`ywQ4*U!_Wd016QtdJ4bS^^w?CLxyvSS!ABfLnWbK!XH;~LLX{+N3 +zW5hApm%Hh^Fn)(k)k(@^eJUN+FSh5F_I*A&ceW0lC>!@;%RdkU6b&I0O$%Ujhmz!( +zZ_4rKh4vb3N9ccaC5EHYQfts1V+ +zXKjW$9fh)e45@XbZQbj~K^WdCkfmM9Ml6oCpwA)| +z^l^=Wa*?r%GJN931DZzErMsUBezddd7UO#}@Os9pu$^F#x0lUF*xrVY2)){ +z!_3%8-Q6+wB2Jz@%C-mfUb;}!Ay`My*JFmd&XPiCe2HZ-pTt|lRrGsghWp0ueW!k3 +zF2&*`h%%mL&p>`}FaOhoR%%-qes4Up60}X|lzxb|l +zo&(kRo7mIJUFiEj6ycOnz=bkz02*g`g?T17l8y*EV%FErEdGOg{#&enq7iLczh^Q$ +znu^1#2EyNI1h#$dW@V}6?q=s~1O6F!OSKx5k+})?QSO27XZFfzS$QpT(IJqlB8EXD +zF9>g6>LMOV%||>UylPx|cE4(gj~ckQluK5ufo-L964qs&An@=|-)aVhjiY!>FS#vU +zlDl19iyVfMDT<7aas3yrIl(94(i})sEEpVR!MPSDF0mF5@qKm4--L$sFK`QLDU8H)8jVw&3{oOZ}rYAcd31~+n&1~=Q_C;4(u>J4(V4yke`X4rA +zdyWpNw|C2;{}>JPKHlS)paB3wG(U^K-=d+rt(B7%=Wj>upR2g1Zx~4-NEpC*Ci(F6 +z;7VY90N>@9f5B1^L!T)>*1+(B`2!i*0lT#G33xG|&`!cR&zN`PX7MNpf#K|0F$2HXnt$4$sxIT!PmG77V>+cC~Szw@J79EniLKwn|S>~^_u +zLVGj0AI)zO3?byA66|atdSM(J%XoPp>H>DDs;@pAFpFBu`PS4IhvaHHv8nwDFIj!%#X2WM)N=F>~rWgK%S5;xwfZ2=PJ_W*${qE`8?U!J_1AKTY*vJm{T_z +zKtn2m{;^nH>M91gG>;b^16sz*21|13wIPjmOJ7&-EapCp+-|ThAf2CZT452cRatzC +zRpYLnWh~+j#77tFNO$J=9q4!}i1IlLx%HfMm@hqoi!f)}YA{_~&B#cO#&2q@b^|)M +z5`?Uq@w!shjv3ols@zeyAi5P5ol#^ +z=|m)|UT3L8Q2@UNB@cTYnrh(O7_y1q#gQyas5PBp0siy+J&CMJ +ztcyL68r}#Z1NT}wpXuu&!(zXw3!L?w(x@#TAL7EmFzc`)e(5YOS!9&ZNN4HJ2Z0=U!`dJLuYT1% +z)pRJr1G*2(;g3%S*S4O`5!{Jd#Wn;9zk>9XU1=+4swP!}OIqhbU^prK;(Y4m7DA7w +zg?AfEge&ZC?)|l^>^QS$c(y(ddV|(|U_PMFPfVJntP;Jv-A9;~XHU{_l`l6SYsR?^ +z5{S#T-AJU9`^-y;MxOyTcz%QAPl%oS<5P}>5)bjn_7!bW%W^B7&D7)zd{`UF2u8Wb +zJL=y>sBcgE7kW4LblVkH8R-}gO}<9#^J^q5@6J_W&~h)!@;cPn@sM;cX(&}eO4^Gg +zeg4Fn;;9GeQ&Vgi!Yu0$lesj#F?YQ&jh#2S1`4hl54fC&h+b}7e3ft7fJ)Cxhgwr6 +zZCMZ%R;M*al|L9xQ}9XTElxwwFz4i@iFMbSt9SLb_pmDwS9}kmi*m1u<{IaMkgT$c +z4yC~KyX(`SlRWBm=5-WqygSh|cnu;slT;96kx{LO{Ib9RZQ2xcvF=Q|CvNYGe#823B0E{hn(ut8268KR;HbmK +zpYm1~Tc7lgL2BayWe@VG>Nl!&L{wEhU8v?|M}Me3DRAZFp&%rFT*)Nan>lRYA;rF{ +zoOyMmjaC~h=JO&@s8I8}Mtdf|qQ+UiU>u86D{&oN6*-=SH(1T!Zb@?%yFU4& +zAv1q_0I&!(Uxv<67jvJhGzIp&ZgTh8L0cQ`yDyXlwHHWV+H)fJ4oqSzmR5QO4BukX +z5tF1fXLH(joZtG_t<|?sm!x~I&`F7SIV4M0&F@V<7_#@(K=;X1$ek=s^002+BeikQScW+0l-_of;ouP9z|6YRW0^4nQAM>;#Q +zSdiYVlo?~3gQ0z&>a`*~!)*lC8lBJ!Z!~{PD8xwbQSwl!wDj~^OT$e~oXkI{XfC-f +znLJKU;oXWa3RRBJ`P!|BjaSEBRD^$aE4#@NKVi%MVDLQ+hDbbW)8m#qR_C+uco-6J +z9szxf{(Fu`(?uhrByLPPg+2j-sdBN<^v7q(Gm{Zt^SGfxr!p(1FDbQxMMC&TDiBun +zZeNxz8J&;GQD2Db +zr|>alj~jcl;cU_2)3iKf2@{iS3~SQnlzZzNA*QJ%k<^_}SnulQO(-H=wH((yBi`a~Kn0Y+t +z&*1Bo_sy`{OEtBE++8279j&ew6}hsYJ_=_s%gW#r^6TZ1)XLZgeq +zavZ-6J?%k`K3qwFPzhU>ube;fPnwsZxAhCOxx;U3zoID%m}xbF7J{GgNlWevtd!O? +zu)Dvxv)&%Ip?A|Av))xWZ5Z-QFS{9SfqACwAxZlXw(~K%|dAESlwZtNE6e +z>0RVLkR)MGvj|3}j_#4g;HPl($h1A_C6i`%F`;s&5BjikGPw4pPSgvdEVKT+WdK#( +zs84N?Z`2@KA_YHd+Jw$Q;4WvbRDgGYfkd$xXVesDr2Y7ul{}<&)g9M6an8(RD;o9X +zewnP;`SfhVH8kZeOBSlC9b@Sl`o`a^?MY<=rL}Rv_D6^oO3~zJ)2#@QWVB+$v+6Uy +zOJw~~-R=_esAvZkj0LGl)PwofdaT9TU{#9Qnqa8Fksd;#+igy7y(V)(g(lLQ0S(;< +z!(>mgcZ_su4hdGMv%4KL#6yk4*!LODx9izBA1)^Co!j`iHilE1m% +z1{8t+T3|n9nzo^l;YAuRso^s2g(`bUb7Lx*!M(WN9lY%PJ6qB=C)JeTx`G3oF?euA +z*V4s8!_DOdnA5_=&FWvh$o~ns@GugU@Kn8nhVb=@>J1LoNX@ABkitC2(<-JnsY34# +zqFaq;^q3K>YXdo*Y7H`RB-({9*E1-_f0*dBF_9Whv&y>4X#gT-@}9|un&?Enh(O#; +z#V;)(viuGp9y%R}OmgUVKvFN5HZ#HyIHI>=nIW4ODP-wRagpYs@P?kO6$m5<2lAmz +zM+JUFJPL_3WN+hcqwD~2O(NkviihARHSubr;Zn+?;3riH#hBmhdzihy1?OlfsOi_g +zy2y-i?Qv}Lb$xN@ButuK5dQXkcu2RG6*DlJqUeJ|I@GB-Sc^|Q`R>gPz@rfsI7bQ!HnX}^_QzqDv_OS81 +zn+Zv* +z;rIN-i~pey4!eJijD$A(MK0XX!`IEP#>ClN3}o~yUsu}o<>q2G^!c;K&g9k5zYm|= +zuR0r-1-guCtrP@{Df9@;y-d&YNly#BI^TXX*;VdTiEUB2Ok^%GARjt>HD2!obQYji +z8ww(fsfoNue^8r{+a5F8@2J^c{eRdzFQOS)?@aiFqxd|gk9fo>4=zkmjoZrBi@~7VPuZDl^ +z(EZu45#E6L|NC{n`uVkO^M|KHcwY5O6X#dsU&ZP_j0GP4US$0$UH|IgSDo|^4?pE` +z_|N~Un*QqG*UIh>2T?@7|Aqh5dB2+enn3Zwri + +literal 0 +HcmV?d00001 + +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree3.json b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree3.json +new file mode 100644 +index 0000000..6cf714b +--- /dev/null ++++ b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree3.json +@@ -0,0 +1 @@ ++{"node name": "重启类故障树(简)", "value": null, "condition": "硬件问题 || 软件问题 || 内核问题", "description": "", "advice": "", "children": [{"node name": "硬件问题", "value": null, "condition": "硬件问题1 && 硬件问题2", "description": "出现硬件问题", "advice": "ccc ddd", "children": [{"node name": "硬件问题1", "value": null, "condition": "检查项1 || 硬件子问题1", "description": "", "advice": "", "children": [{"node name": "检查项1", "value": null, "check item": "check_item1", "msg": ""}, {"node name": "硬件子问题1", "value": null, "condition": "检查项2 && 检查项3", "description": "", "advice": "", "children": [{"node name": "检查项2", "value": null, "check item": "check_item2", "msg": ""}, {"node name": "检查项3", "value": null, "check item": "check_item3", "msg": ""}]}]}, {"node name": "硬件问题2", "value": null, "condition": "硬件子问题2 || 硬件子问题4", "description": "", "advice": "", "children": [{"node name": "硬件子问题2", "value": null, "condition": "硬件子问题3", "description": "", "advice": "", "children": [{"node name": "硬件子问题3", "value": null, "condition": "检查项3 && 检查项4", "description": "", "advice": "", "children": [{"node name": "检查项3", "value": null, "check item": "check_item3", "msg": ""}, {"node name": "检查项4", "value": null, "check item": "check_item4", "msg": ""}]}]}, {"node name": "硬件子问题4", "value": null, "condition": "检查项5 || 检查项6", "description": "aaa bbb", "advice": "", "children": [{"node name": "检查项5", "value": null, "check item": "check_item5", "msg": ""}, {"node name": "检查项6", "value": null, "check item": "check_item6", "msg": ""}]}]}]}, {"node name": "软件问题", "value": null, "condition": "软件问题1", "description": "", "advice": "", "children": [{"node name": "软件问题1", "value": null, "condition": "软件子问题1", "description": "", "advice": "", "children": [{"node name": "软件子问题1", "value": null, "condition": "软件子问题2", "description": "", "advice": "", "children": [{"node name": "软件子问题2", "value": null, "condition": "检查项7", "description": "", "advice": "", "children": [{"node name": "检查项7", "value": null, "check item": "check_item7", "msg": ""}]}]}]}]}, {"node name": "内核问题", "value": null, "condition": "检查项8 && 检查项9", "description": "", "advice": "", "children": [{"node name": "检查项8", "value": null, "check item": "check_item8", "msg": ""}, {"node name": "检查项9", "value": null, "check item": "check_item9", "msg": ""}]}]} +\ No newline at end of file +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree3.xlsx b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/right_tree3.xlsx +new file mode 100644 +index 0000000000000000000000000000000000000000..3deec77093be3720b3d8df9ef12baa0052612a2b +GIT binary patch +literal 9744 +zcmeHt1y^0k()Pg)?hXkKfxyAt-Q9x|+=IKjySo!SXmI!74grFN;4VSGGjs2KXC^b> +zFSxh&TD#9)y{p#Q-Op21UDb**P|#QaAOH>k0FVOSpJbcsK>z@8&;S4?01i@H#Lm{q +z#MVh))%}Bsqb|LhjWtODG$eH%022KE|Jr}?3{)nK$@ej$iQP&*itK=-zt#!Du^k5t +zVZ2r1@9s|=EH~25v9Nf_34cHn&d0Xo_=GX})sy*r+`P`#rYRt7uuT~$(to&HO^=X+ +zVPNQpvI`eK!9nZYX&x4#AQQg6eza*OAkDtHPnlJ8Q%Jg!z#JQg-)}p6Wk?-;W3UCO +zT&s+0c(v!7y!tvi=jg{-wC!okIZF!~Prb`+kQ8(zndes=7rkOdQcV?__sT5}_o*_S +zj?|x7<_(M>*BL|NI#AXoHh^uN97J-g1a%)HuFACrMln#%4=ImBDHhSe{TQUv111)y&{PQmp(K1ed>%(v +z*Lb53hDmR>SSuqjuz1LuT&qG-@9mvo-%>fGh}&0g_M^MZU(es9OGvv@yL809t@vD) +zFEg@1E9YYJ>1?tFGpP31O~JRru8WgjNSOk;N6Z%fgkicIEY9zD)Rg$4g(YO7S1mIktdFx>@w*RFIjRb1Xl^FSpfTNjX%Shy0p-% +z%DGQi^%UMC?Y^pzAu4C%^9pey2_2Ro_N7U1+$~6NQ#;*s5`Gw^=~}&@8@oC+-5D90 +z-vF=l-yxHG43d~=iUzsq +z1?SjgqpWY(jP-JfWh=fLOf>r|oldD?cfCxxU5*p3N0+V-B`znQXM~$nQnL&fH$jQT +z=FKX%hx0QsIue8{DGe#0+*a7?HP`xPy9rn2wd&$_XF%S(N;7lG7}`wb7V^AV-8M** +zU72mJTh%811go^Tnv+8=+AGRvPI0(K&__&b>&RS%~!y7 +z#P$JMt=W5zY1s+z38#@U6k^VqAVt?!`{R#RVDl}7!wo_X-ZQkwb}Wzo{(yS4fwW}{ +zlNlg7rbz!zfst%S3hsu%=#0AvjmGDVHHt!|>SJo<7N+_mocNhQA$r_GUNTa^ZL0Zn +zc;@NE1V_R5MTvdxhC}}8LDO=V>010N$z-*D9= +zsazl*TRFlA6KxtAzAE)G)(Bwy-ndy_9HKwG;DcGrT(op>#g)il#FfJ#u5J;%p-n7oT$hS5_35?>I!d4YU?TN +z<3uq!v*W<)44+Z0^+wyCZM)U6rZtdJw#F4M)>K!y<%gqk$NM&?ZII +zG;%aIbTBbiadNP*HFNyU@K&Zw+RZXz4T1d=Y6|LC&3h|djf5jcR(SX#_(A-Zm_V-iia%oHp6=U9 +zLTi)5Y{%aAQ{u?t?9Q(3{dXG&w+}uyP7dvkJ~v;QlB~36jebtA8Vf=y)}U~d_l+NX +zsNinbLD`^87=pxnCvS}~3ZvFHzx+Tp#!)&A{%^VPil{qwaL{YAY>#FiXH6PHReFDyHWVjuM(>-J%mt~{^-fqT-? +zp|MeLd-B0u4yegRecG5_AX|4~_%PX;fXXw{7P~(kONHley?E9Ks~SIrRIg_6eb!D~ +z0JLy4F&2D(!8SMlN!OssnH|#c`}97&OOMw`p|4%ZS@0Uvq9WefXE(o}K?`5V>;#$i +zA)}bicoig41iIv-@FsXA#wZ-(@cU>!HL&I)k4qa;X*l&03oOf_bRD)mRW`}zH^s=? +zuza`lZ?OzoPHUE6v9ZW-{pzv#T}$N{Jsc+5;YA3}U$3UL(Ab}2{ejEQuzVP< +z1duLI?%21-Fzn9RR*gB}-@Pf0)zZG9uE58aT91bJPvB1$&W}rJ$CB2+SCmEsu{NMS +zF{~v_jm7?mH$949aH!5qNLkaxiG9p2EOdq7$lJ3(494Pyhpb& +z5mvR0W7`4VzeyZ}9t_3idy`94NrfE1Sk|a<&*nGktkbn#b8SWIP2eX^u4!QzSyivM +z55!*p6cfL0^}N<=wBAI1nrF*g&A)FvBS<hSL}sGN_|@_^bz4@-Y!pIKdb;0g=ArZ +zt4X#HlEjyTXV07ndSN!dMjqalh?PHiadUVnO+*#oZF$jUZ&o)MInFb-9DTaJF7@?2 +z_rKjk)qlSJR_}B$>t?u>>-8|5nCt7E;h|}N5;2tP`?521N-XfW6KhAr+0e|z&A2Io +zVYI0`Ftm=0(+BzbW*h4HJw_iKx(uXP-fDg;1399bvws=lVowvIoxK4*?%B*^lgXh^ +z|6T&8X(z@2&WBj06~nLlezcbKeSLlI%+3lEdsHVtVTtONHtJ81Uvnq@^ulFKIC@jk +z7ipv^GPa;)6ugH}m|QAPyoKqyPU6wc0_2Kr`Omq-ICYsRVAaWXg?T7KKGW#{h08!2 +zM{5_{QLXB0eG=>#O)v#0+o5S%w(UXRF$U<9MX*$6H`^q|>B(<#y^Oc+g@ubs)0k@n +z28BNxSKDjWpkL8;GoI9*PK#NSw@`%AXSanhVJ;hmuE7nkl#Ww`%O16Dp($gzFQz;7 +z+@^Ji%(3)CP!jfeM;g$6oumrL6s#_hat|o8`~DSuOeSsl!CvSMT$^UexKX6;Y8GdG +zAxo~-$BsxQu_5!QPp+4`p)jx6>%DRay}2OOmwT%FTceJ2)F$C^mS&yDxy4Q}42zHW +zbLx<9jzIG0laQ25TbZo-!DZSX5lgcYrE8|A^Wod2^ALi(?74R{0I$UzZaFMEh)55Q +zlqY+$YY=S*PTgc9QS{a0ZUA0JP4~`F}VP$MWIgBFEFd>n)Lj4H7^aT@YAa>VL +zxg)vEPzVI_iuG`*i!WPys>_(qJSMSieq4b8LsfTb(%EuJb&loT%RvvQDQtYK3e`rO +zQVU#%?d4oGmX9yqYV{Tjxr@wujbGeB+a3=dphj7}PC=7IS9hn$H|t9m>1fioy8&Ce +z`EC4=>*j+{cyq_pYl%9I05f#2F9fnUwxjVW=bVYB==jGH=D@1_I$HyIu@aBB?f95e +zOf!A0g%Jw-3*M#f?L&hWrS&@My5n=fkRx90Sk;61GL$M#RXMJw>IcpOPUUTtGEhlJ +zk$8r(ri4n)cuM*x!dN+`L3+#Sn$u32(~1YF`C8)ky9kUn*w}8nH{ZlMc0+|$ghEVT +zCG8numo}ugM^z(Cq>FnbvV2U3P<+q8LK5qwvfOI#Zs}}M&L>d>B9C&ak7k@<48Yx_ +z;~7tRcsJOZ9y-UY(DJ^8(1GP3dJ)}9jI8vtKNXeAR6mdH&qhnQR9bRYtDl%Z@cM?_ +zfGy;6+PDQOpS|&=a%}HX4O8%O>q147xOEccM}bW7X`V^_AbpT;GBWG*s{$KZ4L`JX +z&Fu_9TDuH;G$)>6H|8F4i;y)ur5t%;|4o(Z4?YzG`b3o!%ddJnh@)dWpt$!q(26uQ +z8E9c+!u=ux9S+8s{4;B@<9C0$9vF<8FSbX_a3#f%b4Mz=6r>sF^sJ$vx8i5Au?C@w%GPb9^_DMMc;yg +zG4C$TyFKR}1J%O_%KgGD_pV*OFQUF)X|;ic4h~6Sj{VNYS)7yayzY&_aQ1|jvmo7} +zboSl-_pj~qrm)TaydEEXIZKs)DE4NvNhn?wbHvfubmO#;*At-gyF1EjJ+CP*p|mE~ +zYE!fI`jD0Zi-pOpH4u*(D^nf}TjmZ`PkMUD3O~OpX}*E_+?yA9bfOnqv$iobrk#OE +zj)R-tnag0=cm3quvenu}T%PH^K`zMcYLzThzj8E(FmCCo2jL@OgyiR!4 +zYFNnidCyaV;8jD#THb;kmgTN(GuoW==%aYOBI?^@7ezacd!YwC^}+5@mHS~Yh?992 +z8*p0cANEj9JOL&j!6Un1XN~;t;Lpk3+T{1B?~D3)#I`tEC+r1E!70Jhl7lt51%50@ +zb4k#ED$YvVazyT4qG2VlowQl!f`V(eb0?t`K66OYS*Fg|#dRwkEiG}b_@t(@{Jwnd +zJTrx5FTN~9IzI2)pacqf3td?m#?_O^cl!8Qb2@}CMI`Xt@xTtLuIIP5SK;vwP^2pm +zaMKh&@|-(v+NlK4VloNeVM6Fjf8<=KRZU);i}+T+6v}xiyrJ**O2wZ$h;6b4VpH?U +zt#VE0g21Y^H0mI6Kp16m(T}t-srOxI;%4xo^lWT!1peN +z^-b=Kt~)L29yta{7ZObvuRwcPhZ=)uhGzt?vI>9FU@=y!y@NXzw^03h+~Bz99y$xP +zU06Ho1ZCprv?8UXvH=~WbY50$eN&Wdl=7-mJ8o +zAR!1AQlWbQAK#CoFV+@)>_i^0i`LDubn#X$P#gXyY^!bE&wSNmQSFdqHQbIJtb*oT +zhDYm?TcH<~O@N^3lo-y7sKaIw&AZJscc%6L<%dQ2cWY$PhN`Lom=Kqt)N+n26AqdQ +zwmS`!<_-rnm;CmxI)*>Zrc4@~6?a31eL|%ZoxsCeg7t!3WaBZ3gJ(&6z|O+gAY5(Q +zzIPc8W0J#kii_xkB}pWkwf!+hHdTng@eYl>7n>#+qg?+)#FsxgS*3<2UHGb&)}d|m`mrrZ|f!Z2A&tUg&?6s +zF^P@mB)^-ahqpR+%}U?emcO8W{Rzpiw)-BZVQ{$WSx00pU39-_-#O58x+CC +zw3doI9=1qT2!dRdB84VDdJuZa?I +zE!}{yRyBG~`h&+rN9Qe3o9Xp_)o?cLuJhn%W8`4x;S{ssP$h5Gn!w9aERjwrP7!ZH +zRvZK!!Js<5HZ#)l;=TEibetlh`iPXX)THwOIJez5HPxJy +zL*lACL^n_A8gZ3QcIYXO0wrts149B9mGY=t<-!~L!o_bSUzrz3XAHxYXE>F22p +z#b8KL-sObfQk5NIja=-jGRvcI%{_%sQP{6@J{sI1pI8vIr+ys9n6ONVU=qEtwK_@d +zI>RB7T9JIZ7r+9_@ygevzsf@h_O+EVUm5Iut9h2#V`{^=OyAjVJkhk0q%RSkXMFz~KQFj<9MJd)6l85PVc&nQtTA+Ny|7AsL&tEl0Y3fA +zvDx6fCv-(ADnTE6nTXj!qA}ciWg$)9ayOwzY~jL6q-MB0pe(OhmuM$4vZm$WlUOv|;u|c5sj1 +z$d;MtAqqC}e9nwcb}-dH#`)`ov)|1A!6iVfaefvSHq*`g1E3=QqYvTCNFFP+rqnzM +z(!P@-0mN5nsCiuxZo(AE%DVDRf|;LrtUyhZM#)`)FV71x5bDY%c;pqlVfQm|-_*I4 +z$k~f7?(nBVFf``1E($icsS`e9XtyE_EgJM-XuLPEb6oB#St0FH8~s`0wxrHe^nkER +zV9JCulN~&7%fy1JO&hYUHy;IhfZP9`u|D~%*=y5PYeG)CrkkPeiraKVQM4*EdvJq5 +z=h)lYp=CpD*l0Qy(%ss|T4h_^&HTPH6Fj=!i=$z*rd{Pi3m%R9_{kOos@bH|u5mJJ +zT!AATtedYOlo^LR>_ni884T=PkP!BZl-!s&i#2m+^njh`3Y_sl2j|(1?Ti#1>^?X$7}+_P{JTQ@zw|nI +z>Y@^q6apEsf;Z$IQHv&p5$SB%yD`{2XZl5>TANEcj6|vBR!445LA9jZ#9r2uE&7u! +z7Ij}2lW*ZylA>jm@X&VrccQ6H;tNu}b{XQ#=n@0eqJywrcjuY7xW|7Gb1&SK;y`Es +z^|-}QAn(2v)3(AMSULmEeSw}J&HilRG+xPb07*M2pb5cAwnMOJkZzE&iSnZ&J}gGE +zN1|wDzZ@1WCO?3nh>a6T`0=0D>oJwP0jh5cc<_>(SoTrhyqBC%%#TSX6LP*FGF6q&Y#O-X`V&0_9zN= +zLIyA4&!LAgyl(jF82%A}`g*({a=`$^1A8d6KLgP4!-xOj4?Jc6ZdnOEmR}jsLQaD_ +z1E=|D?X-k7jf3kfM)LC!+eWah>+S%sce+^^NJ6#FS}*tcx0yg +z-d*pf&J9%C)S(#Ftz*CE9}^3ixSMIUA+=>EmY)zyfzyGHcGV2yqq5J}?^3S#0VQ6S +z#@zJbBO05MrewNoG^hxFEGBt$2q$dyb8=^SWsi*AiIx+E!sVmvR?JkgW=>FGKHh8G +z*H1MT`*M@1wj2-L-C7-p`JaGcb{}i1!ufwj)LAhu#49-8_?XoyIkc9fIYxh{*IGH$ +z8L_BX021h2ptlB4gAD=&TBvi)M=Vw*RTRo^xhQiRt;Ccqe2L>DUMi&ETOD$pIpCNd2 +zAyJXYPr_8RmOAqYPsa8hciXRQ0L^Bbirx(;GOh50zVJZ`?}$+6TSLMqhkS?c85K2N +z8CiQInjqUo?#@og0coi1SANYFaPJWH~(40 +z|MdTG5=T+yuL}NpO5i_&zxAnLru^y5z^{UTt(^W@upL|&`~TNbf7SDAIqeTkr{Lp7 +zzZBPg75=q|@P{xv((l55D=GY{;n&3Q9~z2Ke%J78n)p`*ziws!P{4xoufOnr8{1z+ +ze~p;`5QQc9zfb>L{QRqyzq-~xbN~PpL;%3w9PO{-e>IGM7B43GllZ^Qq@oNAcy$2) +P1n}ht_T?Vrzdijwk-$9) + +literal 0 +HcmV?d00001 + +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree1.xlsx b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree1.xlsx +new file mode 100644 +index 0000000000000000000000000000000000000000..79deaaa39345b82371db5bda09d186632598f0f2 +GIT binary patch +literal 9419 +zcmeHN1zTLn(jMI1CBX?cFt~ew-~k0cfD2BZ?%Fn6f`yf761uAqd^XbNdsH!2y6cgP +zGzz$T7ur|lRF^Th`U@w}H^#6g&CO`tbPoZ*iuHE4iPA0h +zG+$Y#^b8@FK><r8}xMEaI*eqW2Bykx}pE@X4(hR&#sO +zU(0iI()oGi_x63|YLh=SB=!L3jlpd~i(NlqUt#PvoVSUf_qCBxdIlb@_U`h!AB=8k +zemFW!*!5<4garVeo}d5F|R +z#23ETOLn!+RuYPd%}Y`1TpEygYwHL{M{SoNZd~Tr)Er4y{IwuU +zx^IO-eB$)=7xWQEH9`ceT;c#+{$yW`ZrOLs2G?Z}QzA--r2!RnoSA#EBPrf9332^HiuDb6n+sGY0OeA970KB! +zuUSSUdZ;oly)~-YP6yIlyEqX#i+fM{{6a|zmY*Zk-$CLl5SMuVJR=RyARz?6L%3Kn +z{f-l7YdZ@AYio<2(d*xsfp`vK&szSwTZy8qbQcJ{8R;<)N0W-hS~R66z;=ta^c%UWRmRuJ6}0R0M&a^T=Oc)Q<AkA|ex2$*;wU%bBzw4IpZn4BRZuE)?*=e7 +z*OU|M;_|kGghHNZKA}1Rn32gpvtxPtxU{QU)EFP<{1Q;@=0|7PKx{Zfx2)hv&but~ +z9;hOUvRH&Nowi;*iL;$oXp0X`3obAngCJA0Zjaip(QAq-v39XQB@LOG1bAu(%b}T~ +zxFczaPHC^)$Z)(6DtQeYT=pd|*T31yiw`L?k7}j}u2VXZU6X*s@6dk*-#lN0AtMQ# +z`3{|V>U%-|K-uPhpWwH&Puzq;+a0okq}fS9AOA>Id=V5c@F99NV+>w>z?CX+^MOjk +zZWA|qcUNQbexbeLA;wx9G_O)e`$Io~nqK3Y$ +zLiAyATMoI2lgc{fdk7u6>_I*5O^a0_MaG4jKofC8I^M%Xk16YuK_)6zMSUoOQE<~5 +zZ}Hu!yq{4k@^zMGy4HUhS@%aZbVK|Jy{lq>jh$6+s*6_A5PJ1L%Z#FEOPC{LD{3Wr6nL0Kxx(e7#Vy^Ps +z&H_HBXZAMS80}Cjp+kX@O=EGV6%tZF_Skh||6wuIKkY37>> +zYW2|WEHtz^zx{+*8#%sueE@}*f;bz)P=S=C#88ajMk>aFW`dF0V;grXNz5eV!E#;& +zk}|f94i90rwLD>RSbT?6y?9VlCMYG$X_K>{G@Fa(N>|5QFc96Er$8 +zyPPO$MEmh$i@BN37i+kg3SVqbqPU+iIgI1Ta<~^V_x$iY_H1%p>EQ}>w|(s-Oq7)) +zy>HJhC3H)NUb!(|M>J5yM9bW3)J8$oo~sRV7)xWR{b9__}ySP-yXyk^Yaq<~V_e?kPJ +zn-WN3`I7f>9I|oa>z*md@S&WmgU2p>%NODcLk?&SKh`{AdYGk!jvQpF&4Bf=e3P-i +za?@w@cz8jQ=wFNUu{EzRA?7QldLT_Z*Z_)jDkkQKZVOJ*l9Wxg1lF%bT9mWhr$R&#%_MRv2e*qq(1_O3UV4 +z+Em}rUN5GWjW}(QWrQBDw;FaGFzr1&FX7+4WOl_9nhqQQFhu%uQ}~;gbTBotGGhAe +z%KUSE58i2p5^-XCFImJ*Y&q>USSpZ58)eS|<7cf<8?T9TVY +z5>iJ@f^Lh#8GmZnY!c-}g10>v4V#Djo47CxBLwgb62rZ7?TXhv`FspU#22`;;8_u% +zV2oX@3PT76GKH=0kPYODEPiCPzk~oVf}01nnHi`mm8-!lPI=l4{Vnrq{=JC!?A^l{ +zlefp^NgbL%fS|WKw?szA>tJkiz1KG_pCB~-2zdn<=>64$7`K)&vVkOdL{%GPEfk)S +z;WEfjIoHGG3NYqxgXeYSeDoefk}>T9?Z& +zE$BT7eZ(o$&CEkft8{i@38n$LBrh7=FLi1x*H9j&*i#m=Zfi~mXX(G2!&akF{t2DT^ipL3a<|-bU-5~(&Y*l +zB0f!7XA15HWdp;I&5ZEW$)^M2_|x%iSz>`tET)$zy&Dpdaz{@tc2D`SXo7t8Pud)H +zsz!Z>neWZ}A1*KRy}eI;e{7@aK3=X?IqXfi=&xsZ+>OO%czb?wQ@1?|>B;bZ+Uz+d +z5xn1ww0_A|UB}G>S`)=IT+{y6vy6h<3Hjn`1M1NLvlAXe8d5BCA*+Fj0?EbEw*YCT +zy%x#ZR*wMhWc(J8jo;|3{P9;a4A`GoZ(HJIvvN=BlPI-d3v{@+O +zRLQqQcqs$EGHAhy6aZHamd<#>8dR4$B{(o^VX{#-0+TerP5$38zcI#(Vk=FoHNFvN +zr1*j7@qYbQL?kCaiKSeyTjcBeGF$a>j0^fU&{4(ln3yF+J!K$cT4NA1)|_GB68tyT +z{2|I&3i8HQ>vq;ge#X3fwjYz}1>>oE=c;(V?h2=juY$|Ie~lN(ZXuDz_QA?!d) +zWds3tcEWLpN9+hwKleZ&y%Oc>04Rqs0!hWZp2DX4u|TsBDL*w=^#;P +zn1E2Qa2KLCWA?BLki)rO_CPiz5CV~+csW?|Y;b*Bc@FE5*C?{lhx>zHPwBOaRGMsD +zrF~J`oc|qK0y{sOe3>DaZe4VJDK!8^n_&Sb9D^{)_mN`1fJ+2F= +z3-pdhVH+dPwxU<_L0?wi_#{Mc{mB;Hfw$L%rJceoUQ +z#2w8f;gQWyJlmY;IqK?6^l%ZuKA|7%_J){!Qz~$xw{D#G^!iO_nnK38<02?{LKU2{ +zlisIyum_h7j>*%L;LFc_YR}qd +z=i)!Dd?nbMHmu<&#IP@wc76MOv1!T}uFjXw&BmK6UvWpFBaK}`;Ubqaiq@(Px1OSk +z5JSM#UQXk2NpTjnA-+PBhP}g!tN=Dwgu+q{=@3+saHn5CxvzZG-cJ7cD{)@k71Y;` +z%+P})oyhW~m7W33Pe>HFc*!jpOy-@J51#ew4Xq?aDXuFNLOjkE@xoQ}2a||H=I#pc +zZfW9KbES@m%^G#fL>CSEpSizoyGsxfR~Ij3PFrJ}Z-ML3C#CxD#j6z1=;ECetT}Im +z?{rkV+xnGmdp#hIrkt#vd7ys?$F;nEMupEKTSx!^%D?h02Ukm@-x94s)uE6Lar73r +zGt}&3!iQNqOA0fBNTB+xkRElEg{FC*?5#xg{D&s8I;}HG?yZ*1n0$nko;QxtmG7OL +z*OSqcVkdKt%3F$VizZJ~5?HsR3j(B~GgrGMP%-Km3JNeU9z?%0Mo*YBAP(k`BJf1R +zHcPfX(t$66qwk=|<{{w6D0ea)o3EOv1<@l?h~8mA=t>oGO;;$#&rF7_W-|wJor|pK +zx)3Y*^7yllltZkkKe&`EX`K;TG~|cv#eNe(9htGFC&O80&76m|Pvy0$P{^7nY7egQ +zS`VDi>Jv}6g|*>-m%#QaV_e&n9&MWfle86?Hi%EKDX3Y6>Gdb~5I#jEfw=Bm>;_vq +zS8N{Ps^zHeA@^+zRvPP|CbnU!*#0pEsyB*y43PHYhu7D4KOC7!AlzN+(-2%jVsKS33&wRWan|opGP`+c*tnGsgD1Hhrahw5|_0JkX +zwnuemvdPBv>aATIqvaEhzsjU-NIDd-zYkuD@5m<&D-#@jBaWohy*G?2t8xczG89n(R^WA49c|0jv5vnMAs-3TdM(M7xyeuQZbt6i5_!13 +zva%l*#Caf%tUc?noq7y-vzp4(Zm;}Yz?QhV_sc}Wh~7zV8)VQIG=|s1`1rGM9&j`4 +zyhc$7tg&}EsRU|73$+`!PQ5|QvRDpLAuVvEvE&orodH^RPyf450vVNKW7}O&2(5#s +zv9A8A-eo){BaV8Gi2bQ-y<+aEMh8i{Mt+aCCu_$mUdb+VB(xLC4jpWr*B+3dP$x@{ +zl`t(UuxKsHhz_Y54DD(3Zs2(jbolW~jNeP{_hl=WFFfPsMJP=@yiFc4+dHmFOTA{= +zv;y<(mDz*^4>?v!Y8x3G5+99shfOH$)W(eW#m^guoKi~fM_cb)?OxrXak997^lyMU +zckEnU&b!rfKl||+C>$#$vGN$_a}{?-r*&QTcC~SC5N+`Wl4)tn0Jr`fw*SI6kE+m0 +z(++;OwV|OOewmF>glA(KN^H--4oXQn|JKSo!Yr0X*v2PvGj4f+@0CVY!U#tVux`zU=aFZIqB +zx~Vel8-2i7EiO*|*8J5Z9Xjn0b%$T`D_B{jnAITaJIKUrgbtes=#mVRob`NDv9oj= +z7=w{J^SIG$KXjrPtWym*_Hgodi^o#^qtt<(tz0jq!th|d8Skm{+1Qr{l%xFL$|*4i +z^r}{Uf4thBUKEieDr|jn{%Fe=x&{>uqnx!E3GAykHLRzb-Tcs=X9Y6D(>&*}R1RkM; +zqx0sRR#VIQ0?*hIy@zbbJG!)&l*)r!Gd0_jv1NSY1$CngfX7OWn9Mgp;Ijl2A_~$b +zPE1C4m5(;?;dM4W0vxqEnj-3szS-S0glcpXa~YBNp(qfcQD_nd+#s|F18!{7#`}5< +ziKe~!1&Pnm*AdF4rc+OJAI8$T7YSiTyhexX1=B6Enu)(>_iK~ao&ou+;M*{yG>jhv +zN@=zWCW8*$+8MTmtVefY^}{asHYF1Rkx+3F#q!UW(tU`Kvnif(pvSdn4UD>q3PG7< +zv+yo^n^X~9mLwBwF4^m6^$a&X9jE{Aor6nOTgHhI{;i@$UtbAML!W#WZsyb77WJ;r4j86_dIt# +zZFj=45DSrO;MsaDa}2jP8`X4JYZayqW)CYvJ99g6=L^sz1;&eLzsNVuoQ`Pd#G)ve +zaaBpLp}pN-;F~=W@5BcysUBr|<{K{3YMuxaFJ;schFdL&Tg~^J+#q;q^YdM*bV@lk +zuKR?@*z}`-W|9o57Ia~(-M9FQ22Wn^)~d7at9q?1Aa)(D)!i(v_IXA_ppl=9?I>*K +zH03z|*r%e4;Ud%LLqm9nB~os3LA~T~{FHQ@jf!`cw4TxB-|e4Qg>VW@!{TwpAHwvH +zeqL9H&nDy9`hJqm=zq38eH)wq8Q=4i{i~(Mw3{!2&;yP?wtN`lr?=J+QGfrj(yT8l +z3#qZslP+>5yOuI1c3I#h@aXvOSe_Acn3mv;ZO8N79ne~X#c=dGu4_Wvx@LoKWo9)VuB!W5b+S)Xl +zk+QzP23Z%Dmj(;mgj8C9rlaK@ue?ku-r6Tu2qt1#M +zfpYvf+EWKf6s!6I$iud2m~fzmdZPS3gG?VyH3&5uMgn=ZJbXxjlkDvznxOanPR3AV+5Z2hv*L<4LibfVJGs;Hg)<}L&ULYWW&t&gE9~=4iBL2JoheIR^(tlO(*8=yy +z1%LL5&q(=Gx%*eazZU)eEZFp1)A|2PfWPYbwG#A)rsL<6F~8J>eiiTeo;C0u`1@av}V4+S|mzyF2*Y!QDI{WbUeL)3-fKd=6`O!QYR +ze~qhu=l}rLi2;DWMcH4)|LPe3EUriLC-Hx|Nd;+`=hX!O5T8Fj&w-qs{O7m-15Ue# +A!~g&Q + +literal 0 +HcmV?d00001 + +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree2.xlsx b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree2.xlsx +new file mode 100644 +index 0000000000000000000000000000000000000000..303e945574b6bd4495a7ea9341b006981e89fdd1 +GIT binary patch +literal 9416 +zcmeHN1y@|j)@|H^H16)!xVsJz+&u&ucL}Z`5G=U669NPa?k>SCKyVB0ZeP#Ldv9hk +z^ZkPNs@J;Jz51TL?yWleoT`0FMF9pD8vqYL1ONaOfY(QvmPSwjKnyGZfCWH=)|Yg! +zcLm$K8f$nuf?W(*J?w1B^I)Ora{$ng`~TPei$|a=VOXh~4MY0+mj}sB7WwbhVu;*_ +zLH(GFsv;dd@qMMH`dL<1&skyj7!tWS*1T1iW8b|wPDd=O?d|G<-uE@Dy@&`L=+HDG +z;sy2gAJDYp5yUy`=^p1`6N#}C7#l~Krvp-)>buprq}Igc%LpxTa76+(G8g)_fGd5C +zFG}@F_y-m{SCzDufqX+Br!Y1qu%@l87`%USEt2Q@f9Wk5@bWhlmT6yCxnS +zOJf%OEU`d}pvi9l6`P1!+;R^4G2b3hocx!=Ctc)A_cUu*#62rSW=&bqF=k5mly=Ok +zh5cDl%KW^HL4L&pgAIHg%7>=JKK4Zu2%E6dH_zBtz}=?v4hf9G4hm}D(8JY%T|w`I +z@h$zgN2f`<{v1#60KoGz3_#^CwEUvUMs)%4wU-dqp+IP9>6ii +z7+PEsirO2XxLW5bi@?Mdq^ff-4@tgrazkLGbxx9TDqHITzL~k4xk{Cl_oRQ*8qN5r +zp(IyfaD_@{>Qt%(z3d4rhAy@FI188aNpYiXbalhGeL}lO#|iA^99KB25sI5CRaP +zJZwR~^Tgf3+2*x_gUzqm>))J#f}}7A%71q&Q&UvvVZ&%ec?xB7Pk)00oOcCL9%$~P +zBM#OwEYebN`dzFLF*X_M%qoCj96~&gNBZAfbEB=pU|n`Hm4su$dSTff3&J^!oR7dD +zzCJ7`4U&b!!aUg9|8$6gmga)hC=xfooQ2J|eM3(}5Sm{^DRt2}8Ny{f%I3yl?J`a> +z1o4^S+RcEwVD*FYpxXL+@-znXt>;F`#Vh#iAWyQ1a{?qeAyiMm#E)qTrkhUbE5x&w +z2XjTwQJc>EyBAx;pA<+-xrKc~T**TRWXb#);#{`!Qkyl8*IXs;hZx!y&zOfVj?Fg* +z`)5`lGyQj$S$Cg_!+`++E+J7D9dZZ6W&Z3eWtxT#>1^1(*|pCB9_~>{7-EXF^ctUP +z6(<(#qvK68R&m&B6yr-4{8zXc?B?2B$$+PG-2FSt-WB}5^$tWkNlYwe^^SvKI|41H +z)tF0f){KlZ^S`;(<{C;9&7Z#WUS065_qpG8rJEVi8zMpg2(&udnBHNVj#w|N`BHi> +z2ZE_I6;NN!;O-ka>FuVuO!0iXZb7Wqa6JN2XnyXDNsoHf5@T!WY4L$PoOc@FYZzgq +zY{}w{YM?MPGEPOiNJ>7N2i#^0YEl}WjIg7_G2~sn$_gGe>cr~I_87mJl6&Rlq`{OD +zr)U2aNo<2^7w-uR9qUXV@d{*mB2WNe@?)|^Bh&f#1EedBt}{V}fBHla6;LZvUVzck +zE`>tYaC>y-N=5jzMQ<2-^+>m{@RGY?O2uwS9^Z)NJO9hA56^WuAtGc_Fk0(0M(gWa +zZ!@%qKHh|&MKwZGD4$fahhvJ?3X0?Fnl|i}tXWiZ=5%|vKWVy9UFoujuZ>zfnPP-X +zngq6WXj?KU?CO0k`Isrrje=)v<48(&ve#iC7qU(#Q3U$5jb89FH3~fhK$6M#AsYe3M|GEaE>RUN5)_UeQ?1~I +zf&`li*r{p*kxQP|H#=)>pP%Q55e!Nu1dE4MTV~}~{}SGT@|KTkUim~qnjzkm?Xs1g +zdAlo9mq3))x8QzIqQw=>N(=Q=9Q{et{-lt7_*R2z? +z7NX?#@S$nP8|Jf)nT-W+);ey-OC;h~yLU-bU9Axif2VpT#&L1}(8Y10niT&cmU?tY +z93NK~Guq$;i(>-^rJ!$lx39XPc8)c$`TG@51)J7KVM#*`v#*O6g!!m%IVzVL*lDvw +zXAxJ@(HEGNa}nT>b7Y}G>E3byEzY=k@8e=jk#qB$_m8n_@_T142zFnpDxNpZ;=SD(yr*`Wy;^^N5X2)P +zm#me{m=JKGNp7}SNG8f<+*;H_o(vV80Gs@#W1RbV3#A{Q`b{|(D=IKvvoDfN;_FD9 +zyx77}-6XrL;Iy4NCaMq{1)vZ3Ns|#3*M)-NRFiR{@C%N-8iPn-r3g@{A8}0F!Nc!% +zynRIJb9J@_n&g-R +zWu4Lvj^{Lj?VsXUs~*qY>tH2X@XZgAYP7b$w#5aCdCO`ImTeumY9NcIpF)ep+saox +z#VfYHB%}LUJkJagNEg^0HB(2- +zF&it_J8W8J4YiSeWw6l-1`xm2f3CLU0JQh`kC`>E@)_L-0RR{w|8-aRTbOjU1lxf@ +zzg^jXo#O*t{RkpnY(LP2=!^5C8=j3}OovnMMKfL`UAn?(J^d^CPXq*V%TY*yaUzKl +zxiLvC*z(4AD)J~ST(#(rpryF+;piW+<_EE}&K2o#NlS*f(GQvV`7R$sGWnZsMl(~q +zWK&SO;*yNpRL+DmBEO|jPbK-g@X>SmDF2cW2?QhA-=Z)-xYw=t?Nctq0Y6kBoP~Xp +zU=fXTuG3@=2fm0N={aq9tFb$1X#C+6vp7bG2Y5A@lA2 +zaRTJ;b9qvaE)pW@@69ip-6a)wk-`xKZsCJgqbN==RNDzGBA&Ytca*)gQAVf +zH=2@SEh5QyQq<4s{x#4JETLf^!@UWAM;AW~+ZT!>OqWesMvEH6R`Nyrjyqt;&7ghx +z^Q8@wFJXWTm5!BlM0t(TEE+Po#tSR84q3IEgI?k5Ao +z2Sr0v4UKacNp9Ws(;(DEs!KvA-!h@|9q2KZ*rf1Su9=i<}C)=QHG> +zM$n?%k`Sa0X<#;hmndObIaoRqh-}hY>Xzlftb@x#+Xzk3vu_FB#_VNHl*Cq_T5JAA +zhL!3X&&O>2PC}xfIECZ0XrDxbS%s6%XW#`>2isBQ@r1N3RU>sMYi9F%cC2~R&?UrP +z&f*d3u$KqT>lkX-o^z?Lo!2R?lGB_$P&7o{z7emOzK_ucrHfS*$$18qIBb6h4lAV0 +z-#dxZAvWt2jhIFlE@tr6Ta;{Wj$`A|QQ)X;OF~l2wg*b-7 +z3Dqv89rMBW=tAiuEc0Na`Ba?=SDgu!z2say8K*5| +zHai>~kFBd!>DH}K@dfb^bK-;@Q=H=3)RxE!f=3v +z_P1ZG5tEsyxNL5*e&Bcad%!nRP3z<3sW2EbDBfi7`2(Ibpa84tQ{MQdxy8;j3mTkp-@)S2+58>NO@U +zqn;=3Y9)(`ZiNxp;l(J>(E%|bd%RAD$a1bFe3C!m1C>C8 +z%A33tv#c)eu!^q}%1mVV%JXTRx%;d_DWx!1iONeBWHUo9@K)OvMZJuB)2{Km`+}RO +zpUxzws~HRJAC*{~D{NRB(J*H`<%Kq;eLui-vaw`;CpwThs^=!gyf2@5bGQAyWyTz#K2XTZ(Vwqa?T1QNCbz80MImntgIx!1 +zBUKF{P{h+kN$+V%?HgKCVx>Mkcb6YU34Eahm8~|)AzN9}y-DNrzQ$2!CuM#EaZ&vh +zOhZ>r#KDnK^yj6O{$c$z6e?W2)V6Gpb@%0?Z{vDXJ6UPE=L(gWfV)kic+JAWH1deG +zw+f2FrjxK!yLdcYan0gCllhF|99s7&3#a +z9r%pgHPT#|Dr~QQR@gkoh>Tw@L5?=DkU{~2ddv}ayP)}WH`+b>>EV?J+;#$qpgmn( +ze`$!VdzyJ*MsC{~WgxwZkHOWthaz)V+ZsQSOoh#9H>Ckmy@M|gPx3yWspjA* +zm>tKKOHenE4@@0AC1+0z7Kh@bknutn7M`9*n$L4v)cJvG;M^)b3EjBKvr4jRk)!rkw4Qsw^QL%u1g5KXHe +ziZ+g55e5=xyf?C$jP+98;V;JMK=v}LfGf3$`W#Elyw3=SRgHj$ofxuUxXg1Mm>U1# +zj%Peq&W&;2J#0poNpg!rCq~m@Artnyo2SQ}760^LuzSu+^$DCJf<8K=@l;;Znnv?Y +z>l)NAb{^Hx!pZT_WGQWekC77|eu~ass~842xH-&+tt|eQ{PevQO?XbpqQU~frKJ+@T-UNVcgJD7(~GER4c7an +zXJo`p=<^~J>WyO|16^b|g|=i!k-;yRG3b12FH#-*9))fpZ2h|eu|%$7zz5-S7ZLz~ +z|2vm|RUlj}!C+Sx&~M#u>+1}yMf-I&3}0ibXNogHYP9;GfN;zoI96ZgEU<_x(|U9l +z#XikE-G7K*UtLWsl`l^q9_8O4Q0^l&Za4ero+H7LrHo;ZFp%&0m6v%%X*~LPcoyfZ +zfbuq^K&Tk&LY}56%kPOx#QBujJ%JIO*qL8kurNIKc-TXhlX9^ohyNkYF%}5-`St#= +zgi1AowC-N0`1mzvm$L?cWRJIyM_gr!jL@`C_ +zT+s_T+YHuo|76soyCBk(<&Fi$hi`F?V;=9m=wfS;84R=iOi=*QueU@?|O +zx$nSLSe1tXOQn3hlXh;Q*n&V0P|(G!r3u3k9OqY@Lh|Z?`Ej}e#(sQ3;e)mDaEtAr +zdv$@gRvuR1lJ)eO=*!!jlS5leiK~xyL(%RDcJ9dIZMrT}*r^}jM7=6!Dy4KW9@kcZ +zo7^P*gqcdn*3am6q@{45i@Z@o1(4&Jxsdu +zevZL>f*=CeQvC?TVDd_xlsG|#Ojv+EYEOh8a;YFrXlk0J?Z$AlW#3)b>7ZAqvbjOw8KZ+AK*jcLfmHEv?keK90!ZJLZ=Yt;{y*GX(K_@i3;1XrNwN +zBU)(K4xfTrQ;uDPi%q@}UK|WCM88cFA;A-DKxDw}eo*$PxR}qI@?}{~iaF;WTd`Ny +z(}`g*1Xh6?FVxb+QgTpz$xHKdeY9HWRV@d(wZkon>q)*R(i0i +zU64Vz^mn%9UeK9KL$R4UE=&r;9t7;8hatdGmQB;TFoDMyddA&c#u3j$_U8IZudLx5q^43VJlMY{h +zmQj*ATon*Lm!}~eVx$$}7y`YYf>~CIWA+1pJ90i8kzg}q1EX9t`^o@`=Y$%}Fh{T; +zP|VPmjfD(=@#>=7bN`P(hiGQ4ds+f66{9dWQ!L)9*m9OEtjyO_rN@}DaV(lF3q0YzWs5GR| +z+PxQQ>*Hzy(W?rcTd(G3Ti(0G?v8ffDn6jGF`ernc>eQj$T$%t6yqDU4WMk&Q<)`FKseejc! +z;4yie*DyU+--lE*XIH^&?xXqu9F@=D4AC%9p=48C +z+>qj8Uq3KP?$ZZc5<+Blg)1h|iH2D=g$cUWd$c?_}tJ^$dwS@ +z9a95|vf1_@9|ABGGz&!R{pT|y|DMKw_y2H$L`C7R0DrA+|J(4_ +zJ{iK5Kh?T_HvDVF@6U!Ukb=(tU;q0V=jSreA4tcLV=+G!g?={vSpomUnEl0X#(&ep +ze@6IOUi||B7wtEMpT*Xn0e;>P{sB;k^ZO(GXM6aw>CfxWKTMqn{`2a8TaEsV^4Glj +x2L=GJLJR=>EzACF{@2j>XLEf>9{-Q|zXGL-0vu#^0RUvkCjgSjIVgYq^?$~2N)P}5 + +literal 0 +HcmV?d00001 + +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree3.xlsx b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree3.xlsx +new file mode 100644 +index 0000000000000000000000000000000000000000..6ab17be43c9f59a4a8215ff752c6fb69f2c138f9 +GIT binary patch +literal 9328 +zcmeHN1y>x|wr*U5I|PSdjXMNFCqP2u1b25Dx8NS!gS!O?5Hz@ZfZ*CV0fG}O=zw^NrXaGisnHG9*06;7v0Duobh1Y%# +zv2!-HbJl3DgzGet+7DPA}6 +z6I$p4wX>f2(*#V+0TitxYKcB@+l_hmh!f_^4j*?>LvI+?@F{v$$W5BEVq#6?NoZ}^ +zRtx*HUML6&umt!N4-9@1>`*u`rtq>W8b{egl)QQ%xWL_Q`rZM;8SJ2;_YOT+9oQ9l +zv;SpFJMi!%dDoZw9vJ|5ct8Ls{)Luxs+@FZFkO>{u?_=9OMORE8z&C-pXdM4@xPdZ +ze|q)ucsYe`PRx*_m$#tK7_oxc5p#qVRTHEawuQx!F8QJpT0&5oe5DoeT}XkRpWWX{x_gpPc5h(NzW9G)U#(Lr7JFK+d;>v2@?r +zFY(<)2Bq0N! +z!nxaU{LT|Mh@-U;1Y-Tud;Oa;a4;7Jlk(p^%9Z87J)AhL826!^Zt1Rsxbw~&wEL=i +z*rmZf=4*|6e4b*AOW?K)9&lfL{vjaS+#=n!Iy%K%o0T|zzqG7%2l)OMa +zZMij*^BA@6%)iFmq9_AXmhwII3UQ_m9gwE-WsY~+%1dolJz8@H-3&3eFP^dupB#%+4Bjw=|%)sX#35T$TS*bhX-hF&NfStx4rK;KmZ2>`acO4&;JtCEpOmit@6^qnewuJ_E$qGU1Y-OZnD%dl0<4;8Dy{#VuT9{G!NQ +zs8UXG0C>dWh5({mH8f0%?(V5XH2lo+M6NrtWg-gU@%`DJKHsddei9i0Wy?E#5mR~s +zq8$vc;N*x5$bMQg_l_)|#AZIy_F!CjOx|XZ9{HUlx;fhHhu+{NXA$o1U?O?)b!zx} +z2xLoQfaTW610Rk+Lg$vZL$lxK#rk~4 +z2q(Guccl#ahhPtV +zZg*Xg<5e7l9`=y(bUhF@==HPcUT6Ni4U$RI#8pA^P{D|N9R1!1`_#zfh%M2R)Dr>8 +zd-2iXHpF0orzWtO +zXYaEvmw_Q23#|oBxW<)*w?QuTwNP4>s!!+JI2_kWiy@)yWv4OkNS@Zm!6q2BxH3(( +zf?7F<1(l_s8J(skWzy7w9_n|hlq_iM9|ChHmyctOG4aw_qreC~?%`Y;C^!y0rAD#I +zn(D&q7THaGvgQ8jasWT20h!<~#E4I^iB~LfI}`E1^p_t~J7kHEDdn-BaG%GxC$p|IeO-jO_p$EDbYy({f#O%8@(*#d%vkJa=k^mPys>H< +zb`2+viOVg7gIFKGe;1fR1o6h2HTmAUoKAW9Kr#*r->FCKP0sX7e0OL#cet8#9h3cP +zhyj{-pBxCj%OAd5`*?rs?(0sA!Qrp;_M+3|tt2p!NMmfHw5W4)-CU@P+c9cBh+hd; +zIfc5PJ}tlXfvt@i#Ay)&1UomVebd!4*L!FgtuSd+ujbhk2Xo&DwL<>U{^eCYVC$g( +z03*~t)4kv9pRBQ0B7mm(q%cNH +z`+}*Al=Rhd6qeuy +zMSs$Y*qd{)U;>hwySW?Z@bx-BZon1`5%+x~B%R&$;w^!d!HXYlX>i&BWFlg0tU(%K +z>|4vYc|fWn@|q2rHahPZTAH+oABP(wTw7DnYr9y!P2}q*3B!nep@dJLWK)(i +zJ__J0sZ+n^^BZ!}XiYBnd3kS;uecFKBnGK>#M5C{WF_8vcz?3>nqRqP>gTMK99ngUSZ9CDXdaE +zU{`MrkR^dSbfp_|mZ)=Y387aTCh1y*qk%VzbRx!W`%nOi&%)_ni8iLJp^-^&yWujR +zWVkn*kV&kMD0U%YzGEqrc#r-t4apJT4a);YVp^IKYtYVwBtFd|ao|n>K5$!{KN{GO +zj*&lnaCdwtPQVryZG6z>Z%{KGJjgMz8oE0_FZT64@xR>0*1tbrt#SS~>2A25?R7Jr +zknQW8=BeRu7~Y@l`>@%6L?wQ^83UmdtZfhy=3IM@XS}A{+rRvXs2l$A#RkH?5neYc +zE*M@iXEC>lgAT*p#lHk&wzD1s;$T2Zd^~YmZ@TBxvmGyJ)`r(hWFNz|V7RpF$7;pi +z-QDfM?V>Qc&3K3mlCEfMX8H)fls)FB_ZDm_(3PAz%Pd2mwvGr^@b1UraxFjf2C=mt +z#^IU=$Q4|Qod|~s>T=VgsL^hLMCe04vFRX#N`Nc-&{N^aCN*fcG(TQFQXbYuXo{9y +zOVC%mUiPHt1S*qj%`c_c=`M-AOxCYKpn~EQ?n?1K&?l1$2aQVHGu95y!>XfkNgKLG +z`cU@F<}fb&dE-zhYA;Xm2>n~x{pNKXWde`cROimil-B1{JUws>;jJ{lV=2c=0n1iOn!SDHv98QY47=VVj|-Ch;YE%n4hiJ; +zDomUkryH}d5GJ?Osdd84X8ezzgHeYx~dICQ$Q<+ra+Kd*1Up2xoz +zF^y^V6ABFI|8S)$lPQ;2?Nr(^A9RDA%=eU6p~6`3)r` +zGnp)r-B4WeiD1GJF6n`^1@ed7YC8jY$s$jd7E=5#Toc_*`QZw?Gv37>E&Y9##Wgx= +zx+7D;@Pl401QmU`UmPBue^NAc8EjAieoi +zjd5p8o;c;2VN +zDH?I`P{%l{%r`lBSh-l1K9w#2(nUJgL~%}V1`w~YiHs!QX!bRwhE8!SG#WLMJMw&s +zn#Hx2q%HpB&&a6qrANf>N1YYwJ61Yg>mT?#B;Eb)$c=PU+QfM(pS($5W!YW5GfXBW +zt`2>XD6EqpKZMK`m+~gD2dEG9O?t#TPLXHJs_uugtg(^yj1`jRfa5GO;LhDiXBh$| +zQOc61@?TS_u=gqJ)u$|{pQq?*rHYDaMG)Q=z$wts;9y0Ge(M(z==g1f%Rjw}FmCII +z+c$$Di`ka&38BPjI^hUK*Sr*ytS-K{6`#iySgA-9=F>WJ_t-^KN)axS6rko*(?d?k +zmS4?_dRe|rIVbGyiEKV9JAFP?%~EK0Cl7S2ux4+>!khMx5#5;bjz;L@1ZIEcmeXvJ +z?+&l2QCh6!VME1KnBu>(b&(Q$dQ$yFd?0gF%jFr{o=oP|_1C4AX)}}te^F0+U%_JK +z9mTFpK550XLV;Lj+YX{ex*9TEF%Ktst$V2Q99C0Ol{OP!mk&(|av_M$MjhjTvpo66 +zuyJbd^wIwIoKwrmco?(;kE#`UIls?v0i +z6}o4_Zq`W>H4FPw=p$Bd6j41hrE=##xS+RcH7t{#H5uj$ecFB_O-4~$2F;m)5Lj*5 +zHQ-Fi4BblAC}OiDxhg^gt|e~t)cQJxRIUfS;0~u1#c1jMFI~XZCa!MlsW>SKO)^kbeN}H&=t+7q&vsl%h)i70YM(R~ZX;Vs +z3EtV=^RMi2lNM~~p9`qbgyWD~U$x(}*qyzNyFs8?fWyyw^+jUO^T0#Shnhk_pqdf> +zK2h1BT;g@o>{R$_9#^Q~chHKyJB5nBFot!OzHyN8`Nl_Ar$OuT-7M}>g&878_ckAG +zwD-qZa8vv#8%vgf!RVQ4kL1fQ@WnH}%J5fD2^h>Kn7(p4WnM~ZIJFnkf*RDX0WZAM +z#KG0oO(oUC)#Q%MXJ!$Xg~K_xGUQUIX4=RIqxRlCQ*o?E{uyRC*6@z!x%kaIIWMtc +zv;@>p>N4Dizm&54ju()|?V9>bv4D$H4xfP=C&TB?zLdrrJ}oa|{*b3Xdrp_x3%zUS +z8X?N0meaKS&}RH~oq}SrEyS(a#_aR{4b^Lc0tU$dqI&ja#46^nU{i6N4ZTu$GM}75 +zihZ|IUbBKJ1J&_w25nk1eW6oiX|z!o;jGM@JbaY$vI<=}eyOq4-hNq>?ygexwT9%_ +zt%26{+Vn&Ne9dr2fXlQ4U4sY8SyL%JxLEbf#w@aDwHchQ5T=vo9%kBr9t*HEg-74H +zHZ_}$pqU;f*ubiNKV=%t5)-I{CXe`;Dvx($L~vMw9y(wNUfrD1-}YV?^o%nu24?%O +zg8hSWjHyW2`a9r1$#~viRamakT9I<5qiB|(>-9O9&`pz2)a-^|R_uRat +zsS4{E@pvm%QnhI{NT>>?kH}K?j$NX|8Fz}IU(=^u-vu8|tgu0UM7}s=S+Idx_%zJh +z)g)0}yNN3-n>?&Znq|i-HW;+ok+dwnrn14l_nBg_pmjbzC`7C(o>O1zYL9|n88P!& +z9VK?(Whbr~7|EkvV*i@1n<$CDKaQbq*G7y8CI8|VGn3}nRYGFL_u8T~nAZR51g>ev+w@zQouRg9m(Pjgt#A1N +z_v0xXilb_djMIFPZNao&_TzrUGC$45S6Uw~R@^WjB@RDVWsY0j%;4D|eUtHH#0gK^ +zoRN_DeT@Bz1QGbX;PKf`ezrJ&D_V6TTYQ8AB=$ax%hvySsTYA| +z=7^2H=$)5r^{D5a_ciCrPm{omWUG042@)k;C!$87P?hJ54zltw0~7U& +zX*C+pt^1x*eeg}E>y=a!bxQ#kB9$^f+VJ$b +z^p%dJOp)&K8*7={EMV?WG3yI6eUfk7%fbAXXp7SOLFG_{DGGS@MZ)s+#mB%2rh|$= +z<>?upMSa0L-8-aD`!=c-X%l;|m*I6-%2;L+yXCj=R+TkXD~c#wSmD@{RJE;Sc%&E1 +zJ(28_Z0j1Wl%0lMU&2)sOjocL^d_ttX~+B24DLD8mi^KB+z)=t0!k +zTOkzePZa(--4G2$wcW&iuh58jV%8T$DkY9?5!Yk$eYFu0h@=>MIH9+EIJ<7Zk$|O@ +zSSw=lny3>xX3!_(ZQ;cf)Yg{buYI46djF4}(9uHzECs75Si@?$xUgET3B*{@5n}Je +zVGMCJ{h5}++8+Na;lk`mWW16>H#I@zDaOg})#6^q%bV?I#gQ4+&e8`Wnd{N> +z0hjBmVsnn1{Brnw3G5+b3df75yM<+B$)!+2l*Hly6+6Gih0S#6J)`>y>yeUVxU@`j +zZ@6Z|8OBO7^^R-70 +zDUkdN;?uyPONNYIO*Glk*zr@zk4h$^>YzorX@osx +zbja0jS1#K@kg87MuD5@W!>6S2GF3JTSU@@)oix->6t?&&sjalU6AU@ja;8`Kek;2k +z{Uu2wD=09RnM~&tw<&-zH+~GkCHPB!13Y_5k&zSr73K2-`o51gOi)q`0|*V+>8o&>l?MPP$c$f;x~bDfx*UI +zQH98p#a2jh_i&=w@YtAihhZvOb8R`~hr`>qTPzXu|4 +z@IY8S_MgA)`1dCMd;W(nJru!z74X+C^uK{W=XWrs{HZ7XEAX$KxjzG2V2zjmzfbq8 +zoL}2De@Hrlg;&2cZ+->;TBQC1%#ZmS_-`faUnTrnE&W5n5iDDS{rs=B(_aPrn&15) +zz=!bnzwn=I?^o!rapWIRCDQ-A`ro3;U#0xjt^Oec00^T10RHA=e}(_mF#Z{iMD-{5 +Zzs#f}7zwt!0027d@PqmB1)85%{|6GoMw$Qs + +literal 0 +HcmV?d00001 + +diff --git a/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree4.xlsx b/aops-utils/aops_utils/tests/test_excel2dict/data/data_for_diag/wrong_tree4.xlsx +new file mode 100644 +index 0000000000000000000000000000000000000000..57e60f1eb51ac958511820cef09c8eac13aa464c +GIT binary patch +literal 9281 +zcmeHN1y>yD(jMI1-3jgv!QF#};O>FI;4Z--xWnKQJh*$%03o;xE(soj>o?iG_wH_X +zzh7|gJLmL!=5$xhbXPqkPqn%d3@k1H9)Jh{0H^?F$2m4ePyj$YEC7H5K!nzpa&mOF +zbaXY=@_J(lG-UI9?Lb)o3(b%RfQDTEzx7}I1S*rqRQowFWv=A!rM6iWSL(zOd5?mJ +zu$f+n_6#HqmYeH?Y;B)Fk$0Gq`FM8xAFwA^e7L@k+tfL}ZVHYVY}Y`I4jS&!HX`9? +z|2T9=*G)i_=%V-PBoCKFoRi4dIMyl)knY^vufZd|A)#1FY=eg{8n~6SG^B&IHrR?< +zu2&{FyxhC4sVJ4k{=lhb}co=6)6B*XwjC{G9W +z-*MvM>V`(emHDDm7G;1~@P~Q69rNJhI&Juohj}sSmXeFc3!? +z7?IV^$f(V)yVCw4(74vnk%q8Mk$Gw$` +z*Mxm<;VtS8X@wGbIq!4dFjva(VL6Hb#zf#wK}Ngw$%d=s-6&)C@)^t6`H9u`$k6N> +z#L|Cv86!zLIszB~;0h9TF(6k!T;|WtQmJj|l*NJTm)r0Z=;;xQgek5{FM$#+NF;Z=%$j7}bKJSF6CIG5+upcgNN*l1U~DVYNhrp5&}YAYCd5IAxh*1QW_;HiT4c|PUqdBj|8 +zS`V)l_?*nF?i;|2#?JH|OxXbsOXpIf#*_-0BZd4|Qd|+N`$&XXv18a0Ykl5e#`imO +z1^^z>1pi&J?;KR!q3fq2@a$5^wVTa-yQiNNWdcT+gv@2)Oa|_(d7!@#HzZ1)K4POAK4g>|mX<8x7ISoDw`!)3K7Tq2-EWs+x2wfw|Bg*CIq71= +zT)M5lG`~yyR-~`Lk3sSNb2Y4~MdLC00N +z!wO(r_`wlHb%XJuX^|c%B_;TfK1adA>9Lf;;@!}WrBzsFtJV9QO3E-MHSo`dFtF@? +z%F!hfs)vBG%$UIwS%6 +z?g*j(<%>+@rd;XO8?Zi`J6 +zAID^(r%6aaJ~wHdxc%20cZL|X%XPXpnaiuP_=8N1K4le&s2L<;<_%Wrh)pE;g9AUp +znKlr9sAk>Jo##nFoy)IVYO*86zf_jQ9f*wEG}$b)jK7l+U_CR#T3T69qJE%|+fhgU +z=6R=^n&$_sVrUN%KA>4+D}%?&A%zA}_9gI=cV|99nu5P$h~@kk41|PI5eSZOf5#Be +z#?-~qLetg7*3laHGwK#4wkyI&;D-71dJvZICrP7GXEjD321}l5jMWN`pdU|Ea$AQ$ +zMe`I;2jdq+gih(;qQFfJO}(u;Lsr$2Tu`A&{qe9TwC92E-XF*#L6UEgf$!p_DDGWO +zZ&9es&!1Yd%fLBo8H=DOnbFkq8P3C{j}ty}Jf%e0Z@>n`xpjl-lB9_xUon${F1S?c +z$bnzh02 +zF#7^#mC;p2)hK+_oWMUGw)fk7fO7$2htC`CPUO}K-yME{RzVzAYer>z7Y8DbvmV@9 +z{N9p0Xm6rb`JBTxzdZB{wEnAVVq#@_{{)&!n)!59LPRA=PJc0G}k?k$&&(wCTO+@*f@ +zJUeC~opw4k04Tt~Ee$2aEvXGQrxOZN?A{ +z6ASPWl*{dtj>NSyk-q85gwhWt78Ye;eybb7wzG;=z)Dd$Rn%mSa5cHvH|gq%qjRaJ=Wwa>*;{(SkY38eiV>29CNJbg$N2*faYP2g=gu +z+S)}|)f;_-Cz=BkQ;@a!To^SvY@j{N@@6gP-!`5SC+}m-orXvdbl7b0GJFiTzpkA= +zJa>2?PTsV)zxI9)M%hpGM~h-mK%f6yT&r`$_Od-#nGo#Umu1RP_L6Hy5V_7Y#n3Ja +zNol*pAbRTcAOb*LJc?F9q&0m5iCBWm1B(tJ+q2z*SYmTry$=>Ojj34TJ@V76Q=Ztj +zhyvCaR9i~|UFx~8PL05Yi#*Y{4^{)HVo~E0*zIRhexdDEe-n!1mQA4=_PuoK$6k_+=aZcm{ +z4b6f=92-*D<{O3|hgQ+>`=QA$w_qO4u=^3Ql%Qqumh;=#X;3`fg33_ldz(<4oK1)b +zPQTnYSswTg>?I0Vbzy(Re-p>KWV-S#klBu{zrWv$%S~-!kNy~5Qm(qSo#6xYO73K! +zQKXV3e_v|GJfk9Q<|eF?n%@vQr+ekGpCn87aRQcguu9RD=y#zA0YffY1Rd%fNnzTs +z78V0|$uicpL-3hUOq&k4UycvE39bNrD?DA#vE%J7_D8l9DO}CzjdpoiHkvB}UyIFK +zNy(zpbgmk)LCF@2YG>UVtaIibj^o;sDH#WvR@!j3oc0J#oJI3+Fycq<(s9~I<-_(( +zObuMG`3%?ItMpE(8SVinI+A|BXcOj@N&4U{@#+!FL7ha=;w_XUyo2}0{E_ErzH#T*)FPa@$jOuLN> +z$sA*w79Wa&>d-C^Syiznq3Jj`vv`a{%k)=W+1BXua|A;0x?7TU=Kkjc7S +z@!NKiQym;?O!no}pg4X!@wy~D99w2@W0!#Mu0_SX1KwFhl<`V{IHX|V!lE4|29N{T +z3MRB!`8-Bd4pp+kp^#}RRwEV8K5g!4E#f>1TgJ5q3Wfv^Rb6W<=BOms0n2+9-`-)Q +z@;>KLt2P%N};yMOZ2uD4~+o#)zX{N%;D<$dSP+Nf;QC2pDI;pIB{Y<1x* +z15@#8CwS8^zg-l1)n*WeaOQ{soMg}lu*UNJM68VOIGT|9T_EWMi|9zs2EHo4&e23w +zro@}6g9zs{=a>Gr!YH+GbAF{>9Yce*rS%3nhT}7#&?CMbxYdLCN_3j8RUnTOoqabk +z*YfsCC79&HXhKtEE0Py(gfEQIC2>JkZ;ci+bf;W(r_}e;^7Uk$caS+= +z?u1J$NrYLEChwW!l{RE_#8e|sWXO6aalg-iQa5Agri^pdTx@gpvU9U7e=b+VN)zK+ +zAItHDBbZ==MR+{*?$uyhM)(YuTB}(ri3|6B>^zpe40UNs5Iw!-=K*2In?^gtG-ete +z`x~6Eg#AOF@U1j6`UC}mY{S5WFF>jkK^vOYZ~QAh8k1MjE|T_jQpCn} +z!U*m0V;1S^vNI#RiwukoaoHc|49cp-OW3*b*f$xqneT}DBAEP+Mkrd{y&&BJ)W;iH +z{b@>#nSxMlF|#-SfK4R59Og1b4Qx#@I|_ui-L)?H$h1G>n)K~JcpI(aOlqc%so3#B +zmDQ!%o~;!fd)7-)WNXIn9ZW9=Ywjm5l~*0A{ZaMxFP0m)SrAdxX85jOyU7YX|6cb@ +zY&d5^&rO`=Kr!d~c6X&?)(W9HNW}Y1fIz9nSM|OeUODyiV*Yr>*FE^HH1)(-qFz8% +zy+^Rd0(x6Ytv&;9pFdR@e6b{r!%LJSj>^fNa>cSS%lIBa8mcG2`!(*ek +z8t~fCn0_V-4L(6eS1!9<|HXq}>tUh@uHH6Cfhnnfm1bwRva%KmTtkM2eGr4*>SpHTlebO0^+LFcWLO4S&I3a#m +zaB-lqC5mI!T@W{+kGI#i8&SEHYgh{DplUWaqZQog+DwFY_t?z9OiyZ4 +z1Zx{>`)Px}XQguQC6t9JCgiOT%AsSmvXqr!pFc?LvL#I0upoabqC^r(fbUf3eq?ey +zk4(6Op<05%DNy(|;an-9l`=mQwO+s(F7QKg&DfJvGe`)< +z9%O9(*8F1YgFA4&6sFU?VEJz%?*an5NA@yqC_z> +za&YsKt17GYFgio*oAhb%hk4tfP5@V#M}{U(f;tkmx?oJHfrM#^40RuKla-?2^7y6Ve1_D+_s7fYw-G}M3NVce~21u%~pC}4= +z#>WN5Bxu3Iwo2>UGsb&_$^BWsM{qT4JNQ# +zJ9(1gT4wfkD_ +z4HO|eKeebO6xxamahiKlUq2Geu3b!_d-b(Y5cGOz7`@~E@qYKPavZ<(dIMXsyr~54nc!F6h>Ty1%RyHPS?} +zO5cZ{xWOvqyIr!-4(SFE>T(-B$L$cntq26~LX)(-lB{>?J%ab`~m(h7f2IQ{7YY!lr2E_w)B +zF+nYujU6?cMB96Vufv90)@`t0oN%6KjL%%dx^gL|LdYC!rF;;x%m*XvHfo3fH_w=b +zKANZ6dXmTE_*n{}*!0jGp?Kr4DMaIyeYcp=v=P}$vh_0)W_G&-N9%309!m?$7pyKb +zM6H09n9A%5nRo3pP}MFv{6&bYyE0R2NJ-(obqvu_=614xbLU8%e$+ib^(h$k==I3i +z>^>VLney?4;M6I0q7Je5x)J9`IebAxg0-v(2p}eAE2<*rTO5lKv;A#*);iKqm6Nw~ +z)UCcUr^wvte5w=^l(>?{qMyDpuU)K*{UbPLb&h6aAsm4{1Rz)tYSzNZT;0X#4Upa3 +z$;I+tL&g6wvXDm?llVfdpAt88P30Z~ZL%rZkqyPRU3wg3V73|4#t-dbXr2$74u0%x +z0Nc5VUL)?U!~q@U2@qbv<(fFWsz6O(kcDC}iq^m*@_xnMFNj=eu2@^H6Q56`+b~>H +zqfyORgC>%p7vfB}(36y0Y+!A(mt8`r-vu+`uuV1^y}Ts9dq0J%%l+7k+@nA`SqnP^ +z3q)=1QOA&Wl|&Y$yjayD6Mut>C$BzXhK+T9Jmk>pT)7+2bgfQO#aP{vLzZXXB0mI2|V`S7p>6X^O#SA1n25 +zLY&!6AmuOE>$9-ahDpo2CJMj=P}Fht9Ce(=%` +zPbxZrr6h*cmq}V>hOJ1sWDx#KjOC;vo*B%5?aHXS_T0K9hXVyTxuy|dg`B^I@% +zkbFWi0_g^G9hWW>mvN|(&+r&<=dRHhg@5$FYQ +zYN!j%u_Ku6d@p7&!~*UgFODX!eCS?L*h>k#&KbO{0%4$$lJZ}|(;%P>y(LjXpDwk7 +zOE3a})PfMgusjA{%}4M|WS^n2?C412<&7L3$gvEW%dqhK8l| +z7}(#00q6EHI;NWc)!cfsuIQx*!7XzOnVR9`q}5GUcDI{`=GnKh3uB0ZelIwnpjjat +z>p#D&@ozK!d;f=5H`JB>s^G6<<$nwQ+|wXf`O}d3SHZuI+Wsuq0hy%y|AV(*_53=2 +z`9sqQq>TDy8uP30uf+2o!rZ983IC0T{#C=Ttmq#a79d?2 + +diff --git a/aops-web/src/appCore/layouts/UserLayout.vue b/aops-web/src/appCore/layouts/UserLayout.vue +index 4c563bf..36826de 100644 +--- a/aops-web/src/appCore/layouts/UserLayout.vue ++++ b/aops-web/src/appCore/layouts/UserLayout.vue +@@ -11,8 +11,7 @@ +
+ +
+@@ -69,17 +68,18 @@ export default { + height: 100%; + background: #EDEFF3; + img { ++ width: 500px; + display: block; + position: relative; + margin: auto; + top: 50vh; +- margin-top: -180px; ++ margin-top: -150px; + } + } + .right-side { + height: 100%; + background: #fff; +- padding-top:25vh; ++ padding-top: 18vh; + } + + .container { +@@ -113,9 +113,6 @@ export default { + text-align: center; + + .header { +- height: 44px; +- line-height: 44px; +- + .badge { + position: absolute; + display: inline-block; +@@ -127,7 +124,7 @@ export default { + } + + .logo { +- height: 44px; ++ height: 80px; + vertical-align: top; + margin-right: 16px; + border-style: none; +diff --git a/aops-web/src/appCore/locales/lang/zh-CN/menu.js b/aops-web/src/appCore/locales/lang/zh-CN/menu.js +index 1a77f55..23ffb6e 100644 +--- a/aops-web/src/appCore/locales/lang/zh-CN/menu.js ++++ b/aops-web/src/appCore/locales/lang/zh-CN/menu.js +@@ -42,14 +42,14 @@ export default { + 'menu.assests.create-host': '添加主机', + 'menu.assests.edit-host': '编辑主机', + 'menu.assests.host-group-management': '主机组管理', +- 'menu.diagnosis': '智能诊断', ++ 'menu.diagnosis': '智能定位', + 'menu.diagnosis.abnormal-check': '异常检测', + 'menu.diagnosis.abnormal-check.rule-management': '异常检测规则管理', + 'menu.diagnosis.fault-diagnosis': '故障诊断', + 'menu.diagnosis.fault-trees': '故障树', + 'menu.diagnosis.diag-report': '诊断报告', + 'menu.diagnosis.network-topo-diagram': '拓扑图', +- 'menu.configuration': '配置管理', ++ 'menu.configuration': '配置溯源', + 'menu.configuration.transcation-domain-management': '业务域管理', + 'menu.configuration.transcation-domain-management.query_host_list': '业务域主机列表', + 'menu.configuration.transcation-domain-configurations': '业务域配置管理', +diff --git a/aops-web/src/appCore/locales/lang/zh-CN/user.js b/aops-web/src/appCore/locales/lang/zh-CN/user.js +index cc4369e..98c9b04 100644 +--- a/aops-web/src/appCore/locales/lang/zh-CN/user.js ++++ b/aops-web/src/appCore/locales/lang/zh-CN/user.js +@@ -1,11 +1,11 @@ + export default { + 'user.login.userName': '用户名', + 'user.login.password': '密码', +- 'user.login.username.placeholder': '账户: admin', +- 'user.login.password.placeholder': '密码: admin', +- 'user.login.message-invalid-credentials': '账户或密码错误', ++ 'user.login.username.placeholder': '用户名', ++ 'user.login.password.placeholder': '密码', ++ 'user.login.message-invalid-credentials': '用户名或密码错误', + 'user.login.message-invalid-verification-code': '验证码错误', +- 'user.login.tab-login-credentials': '账户密码登录', ++ 'user.login.tab-login-credentials': '用户名密码登录', + 'user.login.tab-login-mobile': '手机号登录', + 'user.login.mobile.placeholder': '手机号', + 'user.login.mobile.verification-code.placeholder': '验证码', +@@ -28,7 +28,7 @@ export default { + 'user.register-result.view-mailbox': '查看邮箱', + 'user.email.required': '请输入邮箱地址!', + 'user.email.wrong-format': '邮箱地址格式错误!', +- 'user.userName.required': '请输入帐户名', ++ 'user.userName.required': '请输入用户名', + 'user.password.required': '请输入密码!', + 'user.password.twice.msg': '两次输入的密码不匹配!', + 'user.password.strength.msg': '密码强度不够 ', +diff --git a/aops-web/src/assets/Loading.gif b/aops-web/src/assets/Loading.gif +new file mode 100644 +index 0000000000000000000000000000000000000000..8761110666d2251ccec87353d5a2250235ad872e +GIT binary patch +literal 399082 +zcmeFZXHZj7+pwEP5_(WNr~y=plz^1bG!*G7HaZ$mdJ80wgqnmFdO#rbrqUGzg;11G +z6cChP0Tk3=p$IBAnk64TUzEcx~FbXHDL` +zNnCGl57E~Kfa==X+EO<3cXc^uXQ_Wc?eMc_&-iq&kB^PhBaSN{KF51SAVfvW+T1ZU +zHQ_xB1)$ph{Q1LsLHP0Ghx?gh08Q5S!Ju7M<3mG3!Y1tkhBsI5`12VLw%@v?t9?k^ +zW!fF7apsyfQR+{)p~ +z)vJP-k&&A7095S>8zbIC7#!UK&}^tkJmc){g)v2on|H~YH};o%h#a{MMU@H}wVkp% +zf;w#MAB5YFNktsFot}}k7gKD4u@OdPNLt?Ax*wd8#jwB_%c1@Ew9|r;@ +zQQ^BZNiuqI0KK~oj?Rh6^mE5CA-G6Nu-*1(2u%M*bSwp;+r)bwkxV1=-Vx7x`6-xC +z0H|h3%bs{@vaO}bkCEVoE?;TO+sGs7;x=Poou*ypox4z^+S>ZHzJR^^wNmIA5{7xY +z1}2Q$%OEtnyu5NB%EJ(?Ct!MqHx}w(WnyP#Bw;sERej|K!+94fJ`8JB-_R&+Gmu9* +zx!mn1Zd%KyS-d?SDrs}S^=8|Rme%a36LRQy37zQuM^alEE&$Cs0j&%XOdn8}rFbM6 +zs97VY8_w$LmedLs(5;lVx+7-J*4Nj!G1s*+H{v(yKpsTxH;sd$TV$>70MzUF(4B6M +z7)8`65%iV4Mv0P^T~fBAywT8ImTWtw^ +z-B^I;HQs12@<=j3rx$r7H8Gy_do&oLbN$y?=cp*t4!J&8~lBuHCr7&+Fqn+iy +zo3#{?|42dzGF3GDy94>RPK&SzAEdq}O5-95rH?c))V$>7?RC-HTOFx`($UsJ>1t`~ +zYM@X?hYgK%b&>yiDehDkamm-{w58R*D%)8zRrDtia7J2ML?Tg>cvv$m!cR-v(9lo| +zrK6>zqp_1h15XYmc#$+h@k;+L!O{nRF(Lp*2nY*B{w>kVJ1ml5s<_kCfA%2+cl`K& +z6&#A!{6~*8FNTF^k-TtP+L|b>kdVI}`$uX#;k3{H)r|i!HU2Ca=c9Gn2Okz0anWZd +zxzfKg?;O1UeMNs0?zG0}WJJKuS@8!oo~U;EOfOWLSQx_Y|*F6V#BuV-)!ZDy`-p{;{5)Yd*`VP<8C +zGBi7^r;9@A9X^KE`FCF1P&~ma^rFwd>kind`(Js@|F?OKEFyfo2w@Rt!@`39Jq#TE +z!w6w`|1ccV!eZyFYa1d}oC88Hg%R0C|6Y|N6PTwYjnWnB +z;r+X}Z|1qLU%i~0eerx|dg|HJCyysN6OSH_k3AS28NPpSXmFsP-FNp+Z%=m@tFz0fR6~rg@uL$2L%TB`}z7@y6EkNy>Q;s!`;o*#o6iH*)yk4Ii5Um +z+`-DIvalml#4+L|8~rfFBNn@lB-nQ-P=w+2NjUJPAz$18T_cDEa!HMz70zI<@IX}SeF +zn7+rpqxpHe|C2_~v5xDrow&CT%I!OEyzC);`|xtCv*mRk1uVGNfz>)cNZ)Dl)XkL{NHAw7wX?; +zo2)Or&A~ub-!UvD1K#D@9Bg=(=b*p*?y{qm>ihh&P66)=Tznhe7kWf3zh`1oR2Paa +z6$UI6``0!slmvGyFO=f$t1gxirUDkrNec~&6|w8finxa4jum9rmL4>~x*(!*NBki=*SAgt}7fEFl)r-h^Gk#&D0%$*jPJuvHNUb9K +z^jSBpC4%H}Ax2JE97r57E~fV^xbtBLT)6r0uyhTholoUi_vvFA1xVhhfv2G`O*CZh +zFC0H~UlB3{(7B(J3PT7fZN3W?RWj#CpdG=xzhYuWG~}_zx0VuPuok;bTHgjsN-tG) +zyt_Jl;`Y0}bFEnjK~%=5M#;EX*DAZ+GiG<7N=37=Gz-}Ox!>S|xtLxoqnY2_a$s1# +z)j+iO{Q-3B%}Bl(q&sO1{eh0lm$>P8f5QkPIk;-#@rUR$19f +zC&CdFQ1*6d!4VSSsOS=6!YUaj0vv2Dg-vH9v{Z&6dZG}D=R7bpLLgR+d{sO{t8 +zofz+%01A-5Ig$J#w&Hi*F?vumI~P4yE4c2+Gx51s4Rxekla-@nK_Cj?F&(OG;1Y0u +z{$c70Jrxofp5vR9$PY-KvzTzEiF>?}tR_vF6d{rhG+$&&5MBsJhYR+aWq`a{_YTAZ +zZf}`YL2Phr^(4&kfO({tf{VF~3C@D+c>{RN1^|7rW*c}-Nu;xCU{^e%GtyEx$?5HL +z+nc8-_7DYF$B+}W*^v+BQM1dy1~_NOrFPtXB6v{+1e-xX6u8*L7a)qFzrLWF4MyA^ +zM*$aBferRo@kjnkCCP-KQpd0eu7 +ztN!@)fZYBUQ{C&gTYatv`q|GuD((#X4k_z*Kw8hvr7x>@ZcHXo)LET-qXntnKd(^U6WK)5mg*&(-J*B_G-ORO1g>O +z61!-BVf#E#`qRUf_$M(?q05?Nz +zhz*m$s-GP~so4^0B}-WES7E{rHkb6h@gCYp2neB05h*ffzL +z3Sa?eX9~hz(tuk?DzU+PfAz1N|G7QQULRIHq0I-B{y8&pqN6QQA@vO8=X1^xKKcCP +z9|HveB9Br!I^yEeG3fpmTuA`D7PT5uI{O?B@4RhBNscIWpB?lC!k=Cbj-#;o!6WNE +zm8PrvIXy4P9Ln8|J*#K@xv!sv_Oto_teg?z&cdzr;V#_Jgr(?FB`&QA+-i5Nhyu`a +zxu<+^9^!Sl?6n4u&T1=-`%2sA@qXmQz<|4EaM*<00tmS=elq)0>7cxz)r{fb+bsM= +zV+91KYOCe){O1}$4A*Y41NxOqNVdK+Eh1$(X@*En2gi(Ftp@_v+{AagRKZRHEDu7& +z;MKc(U!O?mo_(zeYn1X_a9-5Y4L(mupZNW9@IGxqNdyEb`t@N@Q~!|}d$kW7|Kh}O +z)T?BTR4lrD?lQ3h3C+RXh}kn^#U6xQ<$whlo+{#KGtpnD#F$RgZ^yNaAAUVWK8VX`O|Pk5)i?<}J| +z$K_BYOwne%;j}l2YF9l0SraDT!?TGmzwKERU>@m%wUUcwdse<`vBYXxe`Z1##hNhV+=+1rIv|o2>cT^& +z>RJx>RMAKEWWygnw~Aic#oEnJy=mJ~BJoEFB6%W?@>g{BP{q&##9$*(8W +z&cKaP@umu*@|*nk>7=79k{O+3fzEmWjcxUhVA+uRHfcj0k!W=4V@V?aCanXO^nexi +zs3W^P6`-k-(+|ggsVB%dgF)|hi?PAmSixnLVA0f^Wt-GEW)6jzu}~D845P4@2w<5s +z#gTa7t{5rR@VA>ZF;?V9bSekMr$i3Ugwg1Jg4-Oh#E;!a9D(DOWmikR9x4KgDK%Q9l<8@Z_AWUZyNzTr4A*X(1d`E9CS9P=RY)Nh +zsJBKw^Jm_Z&MPnoqfi7#0OHwLK>l*JB`IIFs~}jWXjD0CeKA^r%v9URDyC-&*W*nL +znIZi7m(iJq(xtakGu3dg*A2@R8WP3o%J8I|jJk}O;f(tm +zIrz;&F)5*^TePJ#oGmwheS~tG48+_7dohA?ZOR5n1zR$qcZ-TGI*Y;eMfjqs?@l=@ +zqm-m!&{x&iAp@qWYt?sh@gCJ!$F9VNE#|O3IEP>S2nXIRnWi3)ueJz5$foTQ2u)^I +zmyG6RFpCy#a};E<(aw4JCDA(#5y>)XsjiiW)oPB|(ZW;!uQ+@c`NhRJ;A-VqqjY>) +zF+stmh#OEO?^GNwPv +z0|hc+UWGH6Z2?4K|0D&~dfxpj#XN(^b1>nk?W^Td%y%yNS2nNZ!t3D5bp$5t4lE(? +zL71ye$l)qJZHDpDo9S7A`mnkNo_m;euta +zgqAc+*(OWMhNt`IkxGIq9f6WnS*G);r(1(4i;drFD{?pElZK<5)2@H0y*?lXPbtah +z7YH_0u2A4FA<4jh$y5&uWcR|Zlq+B53oPD7flpG>>xqyo6mY6B1Uhq7wWyi5UJ@qT +zGRB|#d9+$+JL=GAab{zTo@}VETI>OIu2@&X??vGWHU3y>ak}2l03Vx +zjgG5AQUnRXP^mHe96|`s3uu}2FIJDaT0(C6pnvn^Xo`YiW0Pw6Qz*PvP2|`-SP#jk +zgl${uZmKFFkJIrwFRq+0Xcy|F7{uHVHYn1eHw-Mc>#NZfTyCgMrv!GjsSBo8K2O*N +zz<=5mTINPOJ!ljrmL%16XtAmdmf9>*+jzCN!|F=TrCkvYY^Zi^9)R7LKX}8WuCsix +zEMug2(l*<*G5Bz6oid`$(ksdo$p;X;-LptQ$Iy+MLKTc^Ax(t`Ur_2wS$F))uBgVe +zsWv_MaC^=d^6){n=+CHqjiRrRo!5xb7GJ{kFanJWjFy<{?n5cZWa|`ESs$PvjkO!O +zHn+}g_4ew=uF8hPdG=QX+^dD`vm3@}aAb#YV?6~0ZbC-gw-;}h1m9h&Q(6;wjbc_T +zXTR=gc#_f4wbJxrBZfKFS0;0JK&pMpiuJRD@_wweY_wfCFwK)Id>Gg7)*Ak}MBLJ< +z?<$)gi2_r0?&6XGmVm%&s85_z-~n~;`IG?FUzaY`0@Q;8{nkM$@`JJZ!dDLl=*JGA +zng@@sgD!Ii%MXGrIsWG3ei;n^1M&fdl0!zl0ahjf1`huAy#ptM{VY+!r<(`q>wXl9 +z2xtwGzzu{jL{0(Ezd3eZf+DCVd0`wQAS-$CQvp`p>7u05aMav+He)zkNGOpr{OPuL +zxWfg4!^kQx8A_2K_Gb^Lmt7p^j2u|^V#tpwMU8xo^kzDYWHyg}{UKC7H>`W#o8<`H +z!@Y<(cjdf$IyANi%-3uNc +z_HY|@7#lP39Q`%kanJ2+EBG1P^VKug7rhVX<{rNK^^hz7=#9yvcOH)xVjnG)J^Il5 +zXm#$#W?mSk1`B?M! +zW7M7}I;Kx_)jgl+$2~!pKQX%V#N_3ZBfp6K0WdK +zY11yme*nsV0LuRt018t?B-*7q3$hV`ka@TqlT|&s%YKPXg($Cz +zK}4gQF=n0bV<}XnUFbE6)Ji^T*a$Om3bgANrkY>Etln&NeCEM!YeeqS8hF><=0wPT +z7r(6AaA6Y4TE@}C{Dg>@hn#?D_7a0O;T-g_W6GliOf}`la_*Zv$*ZzPFn{s5Ku8#D +zFAIJE(`pGfPui^^>;$O>Lk`f9e*p@Z3KMOm_M1-Zt-7!7NaE#_+mgQqaw_}gA;>(2f0=l6+{nk$JN2fMGfA%(gjdAYD+% +zRAGS1Xr~zl!o-4vX$9Gq>eY}#tM82TflzdfHn4K_>TCr`5Dd-OrAGq%zSN}$x1mH2 +z=2;^w#SG7jTmc&3AbZmeq#)`OFSn@!VYF6?_cbNBd_7h-3?pJHA0(XC3RFA54}^Qr +zU~kcq!cQ^D2L6|G4&c}tm|S(@oH^)){Q(hEM_!U_EY>xNO*yzgfyB9Szy|&_*~YqN +z3oMp@1-(xsRRNS_znbL_%>N6B5nj#;_x^(MS +zE*y2%J8l-g_gHdNqCJ)i7Ro9#T%}MU;`0#(>U-wxII!bM6j%|v@Q`^S#jdCYD5g>8 +zq(FIAm|g|10SszPwe|-W1H=`Y?j4ZFCTd@03l2)B=$+=$k>w54GK$+p)UGHFdhJ+A%X6LroZ}$q{2uZM?dn(|}8al2C&d%_7 +zzg^hXANeV)dUp1y-A2dL0u6~M3{0nWu>FjJ88jA=s79y)u~>r=6f9-Ed*jYpmR2kb +zn0any6eu=o~$(AM@w-$&CymGAag&;>7vi5-CQIwr+We6xS^4oG$fpd4nL~+rBLJulH-T2|yA!!KDv&FqsaFh3b~gupgj@(|NO8sl^QuRy_trLxA^P!lXEA +z!+_o#>d{RzKqX*k|5=oo8^b +z0v!99)4zOVMz}ma-|TPsr%=L*NyQIIh5h^K7+=G&=cAS!or*8Le&;ryPdLR>K@ig= +zEiFUgxFg?-rE=d}wUBq|NVi{oo9V0P-u>`01%K6Gj}PYX_7it#66&-zK-HxNtc}E6 +zC_bG7K)4DXE&>|$PVyn(-hjHb)YjDFtKM=kGj;w{K;gqxU*n(8K^#nL&he1IN^>xo +z($P^_o^-~cX9u9HtAgesh~S@dNG!E;@Zl%y#s0a)SNfn|UqYhXe~O=>Z*-L-Q=?~p +z&eTy7?@kEA5_ml`qj1X(n8-&}>0jKDy%ePMpJ0Tg89zyydcO;>0(g2+^&k=>?3DY? +zH<8WH=B2Q1U&e&{OL@EpFfCASpH?MJJl@wyW4Zz!1xLRA#eJ0x>LqK!qX*|^Mi&g* +z6DL1kkuv7m`9NYe^cn<1ve-9% +zg#4uIQwY`C6QH$dbO)^ct$xw2J{G%2Cr +zy|VB2n%sL5vf0?<{!Iu51W6waR^obrPA^gUIGW-DWmT~FqW<>cKlrLy(+`DVu?xRn +zk?R&?-=u5Be*E|`M3NHEy?gM^=RNx(1HgY?9tf}; +zN?e@sm!WvB1;due5tPWU)`@^c(TzIN6p`dPg0E4Isvrs+t0&yri0!N+oLh>CB@*2? +z61_(7F32PcQi=&N35*72;UErRaWO~m$+f6QQz;g#81KzwpUu$o{;6o5A0$&HWtJGX +zB^lXINy_R>G4Z3Lh>^rP`rXAy?fQ@!KZx5VVN*Yyqn`-#gGji5eZb;>VG1(l(ZRTL +zOq@#*JsTcwR+laTOSd2ioI+ABEs-DirY$cLI8|x7sTrXxd@m{8DK(D08GLz$R6tJ| +z8wtI!15*(3M-)Wm{4>V&X#`lpw10%C4bfLR=_x&!P9)M0a1JhRl%6=D3=^y)aL`Ey +zX6Rp3LWfw{;Re(RW{wGrT!#kPyEq*Y1Gc7$gr{a#NHM%!Qo*)izWxjlfb!8Lg)~Eh +zKPMTC;J4SPdCIg68yedh52(+bW|F+=5pv5R0=!YYT0;a0AZ&^?KQh15E5NJP;N_{j +zPb``pGmBurw^tRQNy;3Qrk)}uo%PGhA5KFvDe0-SWpt_rov#+2XhlyW5c5il(spoJ%ZLhw%sg$F6k{9y +zv?2b^R;-b8+^}uz71b0{F(co)0_|5}N-7{20Mz~ga&W4G+~tfnqa_n{Rr_75@6MEg +zm0?1S@#-UZ$>kX7u5yL0*n2i5FPE#gor*BSIRcxpYsx~b*5acI`F|)dsBEOFYO!oL +z`G9N6)+mnLdiic0(8Q)Dk6fcsRC9(*w5I2e+mdaJl5`%#=%xdTIeZe_Lb2!IX7&}o +z;(|O?{7KtNpJ<}so}aI-bRYvXf>v;4kA_cF{M_?E<7!u2HQoB?2c_pOBq{^nHt5-q~TvYr?m%E +zXQIJw2(Zli-R(2*G_}Ux4aB0(42}BQbJ4_WA2RA*L^QZDOYOoPmdTZkrEr1pQB)&% +zx(TyU*=F1D!=Pk)jb8!_d?TH^10^n{h+E>Kh{{!mYfQ_UM+5S~)wPk@Wd((6f8i9;K;y37U(R?+*$Frnh= +z-@K8le_TTkH-$N>l_c1PpLs#j_pklQx~esMLrA4v#_;A~Q7MJf_y!q2YXnkvPtfck +zhc_1GqRU4G2%=*+f!VYZ-DFd_inCkwWV>6&@Kl?i5Nczb!=aMddQl3X75gEnWikEZ +zdQ-Jj^N3-U5xLeN=9WQM5mK(x_t3R!qomi}bsruS#oNb9$VFaJE*3BZCq;vljl~0} +z!KWQt&;2CCX2kjgRqM*Iexh0YvmF2fz+if;!b$_Nn^iu;TGWq}BR3@&UCwl^%3@}2 +zbA=x?CGla3%sSlyj{K7{m8ZXu5!5sjC?lz#$rQ@5pS%w +zIfEe4)BYvuh6O3{FFnCV-Ln_Jy+cnZ^|GCXR(^NBOclLsxFbGXU!ayXE7SFHq1>Lo +z0aS8xTT)1FCF6oI&MO1|Q=si_M*7h)+>#*Pjse+U8sx?af8Ho=Zgr`UeVs%BHDdkh +zQUY}_klm=jt4==7DFOBOeRQfojP-%DQvpQ%!Himf7S;!??nChPGjSMHUmY;}1=8^t +z(wnaR?+h1sv=3kBz!l3iUr@G8nTkbW+Gir4?R`1e{!hwORp| +z5fVEXVfh<*w*Nq+RxJ)!(q@5Ic}&*!|eV=s&YMk|5=njNFczN6QI0aqCp_c-#Iu3tPS +z=62;aIGEviE7G&m$AhKr-ks@rXLYPk$fG~jO$h5b*zCT;P)5hy#>U4V#*R;93Qab< +z^?HmyYjz9XAt_u>?hn`h!vIC^|1m&u-1F?T>9ez*&l>+jQvRc&{GV4*T4R0yF!Vi! +z!PFcj1>JeNk_o)}zU-k)@F!r=On8P8KaPt5>YJ=lgdNY;q(RPN*%p#=IQv9JcLqL9 +z)qu`5<6qI`03hZQ^ZiE2;tCJ;O1p@i=tz3!m?$SYbjFREkG!uScXwVF?@9EoMv~(HQoWFp10u8~dpBjWcEV&Gq^I?Q;uewW}r-98> +z+SG;3E!#%rFF!fa^U-LIme>~l6q^VeFldRKM|jYHfDtR)*si^FZtu6j=C=d-QS$aw +zgffO^xi%~K*$gO6@eWoo69>j_&shNa`OO_MtDDT3lf3mo6`(i@C+Zyu2J>NG@jB}6 +z?4F@Q1_Qxzy#Oj-ZP*?WHpMW>2ecQ5p-K{7$%i_E9pHlUX)0_ZbCxNbqyTh6nYD`F +zOx<^uk^FFXzr1BX_yf>h^ejtWgjc>u6*?m9;>1dV-!ibA=Wm4$gC%#*Q6LBP!=qMO +zsY=p+NlI0^^HopqUIgf+?W8cG)m!?I!Tp0pU!2cC-pblDk6EjWq%kQ;vWqG#WvegF +zn5Rnn4mn#2$$xRq^f%uZiXLHoWWapEl4#%R0V{z=$*6pVR)4OUXw^g>Lwa641*Lw- +zfstF41c7R^v%QW1z>WX~73LCVfmpmc_BY_^YJdBhqC$zKDkgY7!GH@7wBk#C}9Bh +zRILIHHv5J50{CDoszCdUx!jQ1N7Cx~gY&7-RU~*%6o&5-X}x^}OYyh1fAP3v>Q>e9>w(U4FP`kEC^bIU +zgU<=hzTg$E!-a~jhq?#NzFLUtZ2u7wMwRoJpDJXv`2a$*e!9K8;nX!AqKRv01sgG_ +zdvxms_;t96(r2r|66||>oXpOD-H}fIt)jGT2w_;}HifRtw8ni|@%mFS +zFQa(lv&0P!ch|~>&{f7-(q~GB0HI1id(8~`;e|LJfrReaOxy=qh!YT=`T4?@vvD_d$6z`8^YsHLQ1Fo{F1`{Z +zxN1d`>eU8&jnD+|(#-(9Y2v#PRnR7tueZ$*LKh4?Uf_5bbUQdcB3{nqjoS1NamcH$ +z7X(2IYA`c#=~odKD`|Y%Cp&g?U*c&>#!Eeqh?B`L!>)cVd2he54iMQ(DF4>G^dbfQ +z*bE6z$izbaswjB@|58zs48DhdG*L1A@W;RV2_N7`eS-1aiAuwVa({xZJMRHHf9~oo +z7ona1T>jabxIOz(GZi~-I*ZHcIU0KRNBO?^*L5g!aa=a;#^5ZRR74Sv#sb?!lR?T$ +z)P0VopYCbhc}*sn=qe@<2ef{D{rbsxktYm-NCP*P|7?BYg=~%}D}Avc>VK5uk+^ko +zb4i^hbFjW2#Nil7^HM)QT2p-AS%r_mQk}f7b+0$)JLMqJm3PGu*gxTiOxE-^QeR(u +z6ghe_K>#nBB476U0HG#a6lI+OKAh;@wQbAE9cm#`B|*iR`khZ}}> +zCfZvkqtP^XG_G$U*(?fHI*v#u_@Lut5iuv2A(w4}iONY+zLC{7I8p2L@da|-2;sXvF_cKQ +zLQwS>2AET##*V;dVn~WVkyDp^T{^hWIqjTsoH3l>Zk^PU8f(@`b(yBxOlPzhkQ-US +zweX}Nn*^qG_Oy`Ru!bv}|GyrV(rzc~Qf{Cr}0N?t?g3h7~Ho +za5#5^nNT{C<@`JoMTe0%aPiIE#uULyY{>o1XwDkF+<@?XNt9D{dEK{=w47_^T=1l` +zP`fV0#X0$0YQCOrVYzZqi*=EOBk)h2B2$EO;n0Ot^3+lxKze1Vwss#*%MgN`a(slGOR+S@|FPrj)?o#Mbo!U5U`Db +zz6ga2qzc%Pqz!QoM#~~>nLYQ@F{JA2#Nt0iC61$v!}PrS%o?@nYWm$V#{Nff3qE@ywd@pKlZ)JWl%%iq4?5DE;^qNz$c%27C +zR54}IuzV{uCu;G^$HIbJ8xc;@)%O+(akjbf*j(dulEw>W@KQlaLm@u8He`mJ?TRm1 +zEKpS`^CuUBWC>la__xoC0=sLU^Q7{RcxAh-@rMhfUNQ +zCg!3cu^~N@`#cZIA4+j!%q&s7Kjb^h-jKJu!AQgXBu7|PL2?{{xofEyFY1ns(*kRZ +zExA$8YCf8N4PtX6pJ&bZDUkYQiEufc+#4NYGD_4Oi-`+NObARnRFbb%#b@Ipe#8-c +z8U+%Ji3mm$kBuekG*P?U@IL~>mSF7;%8c`Y4C9!~GVXch(Zv=&!P3G5v;cbi3JNrw5$1 +z2y1B>7Tjiol__l&|4>o*jBo$$yum1H$Eg%3H*~;%hHXT%_O0AbA0y7C;j#s91{mMW +zE5+vvrdnaUKBab<8nkJxL9Y5mTh9NrQ7$k;K@D`cLCzl4(tAd%7`t286?~X+M2l+u +z*Yp}e-geIOm5}%8?Uw|5-%hoE)d%%f33;dX9uo`F<_aGA85ydYezCa1wx_%}ASLN% +zdBE3i~j7Idm(M&H>5}U$>sWi1Fn= +z%`H4#8&EbC;I?CzpnxRFz*8N5T1as2RKWSE0Zk8|oTvenSpS0i0|q8OPg(;GI}Aqo +z4jwfDYMTt@9Rxc-1Fgpg40{J{3w;WGhYaM0@(f8y}4@7ehMap<#1&*wh>KO`y5{~;;=Au0bSNs7Y(z6EyG2+V0_at)TA +zG?2~z_H?Vcr1?&;i3GpLn=c2H9Va1BlURVA(W}P@imaJ@zZp=JML7;$8K0!Uljj^M +zBK%JoHu7@*K_Dgfr%0f@*E1x6Zo0{wmgjry~T}%VL`$9IAvuOrx<#Gl9KGZHL#4E5At(`E+k4 +z8&6Cn$e*-Fm}xb^7x&UR-7CGrkE|3I +zROF}%CF4}c=Mnq!1ER-KxvOZCGQ>%6kMipnpc#r5EZ;tdvi>pElRYFKiUm+^u41b! +z_~$!!OcYJZWXjF(r>AYeAA2qg?2;y#u7W-s>VNW8>IhuCMlu}f1n71Kt%-3Wg%QUT +zMNsN`9v~1nw(7?M7A$0maap!W5;5UoglRW`s#pv84`R(R<2CTj!__|+9Dqo0Kd#T) +za<@RZiHzF>cn83sd9)wY-Ifnirw*9`11X8v=Vl1D +zkE1PHpCSb`qr_+i^~uurU940q#3F=A0O>Y_`axEIK$wuHM2ocgArHlLGNgQ0A|M^R +zOcfZ!T|_=xpdNr!%lAK?7e%&?+)b+jhK}@`&?K^D#j$$|-pL7^v!P;7VFg3iFe)4R +zsW1o-I=Bsx_|*B})^!V*8DqG0C_DpH-&V-L(4?~0fZn+nVScQ;M(Qch87!9XUOE6O +zz+pS?m?{<6#Bb^6LN8*KKS|0aBA +zW&oXcj0Xbke*@efdl4*)q`Ffsy223VyF(d0s^{BNI+2#`k8=|b&LD{gZoT@6bUdQ2 +z9|SbSC8>`44up4ecXe|1-tuoXG+&;5-*sn2RQ3S6l148RX#K~v7;9;%w}m;Uov#>Q-3wE1AyRaZ<26u}j(QWy%y<0K#im@FRSw +z@)w%nPs8!^2hB@V!9bIF(LHn^kT#tk4dITu74-{o=Cd^6 +z+`mCR0NPGa&*t0boa~NUb;2Ldu+5&&Jc??&j1%xX^Kd*04SvN=L`~S*4h8? +zlfSW?;QJ%Qjvqh}WOH!#qc5@36%j^0RQl=!9CG*D?~kF%{jV2O2HE_3Lk-^n=U;^O +z56EbKJR^ibAcpnfHLaoPsXxU}Ep>MEaD}}R%mur6A^^bxZxHAyR^sd<7C;vjo^$l4 +zxGcl4<3Q*)oG4{(o3nNEnI}GnDEG?zXvdw5(C@1If6tuokD90p`B=Blef~@fOP=v!fQLc$ym1lz`LOd)x^Ce~U5Q+6>t73;{??Y?jR@wBmS +zy%r%_>hAYC%=YG3=;oO<_b(tOnZu5CQjSbu3K+tOT~4XOe;JNBxZBA7 +zYZD3cizGWoN36vgbta3^!*hodHa8M%Q)4STQ%=?;PX2?ZToeBXPr(vpm~klvfVL;rBPc1bI9q4hF&5cLDm_Jt +zWbGe1V3Vds61b;A@Gl~da?_1q850LHvRc6(rgwiu(7NeKmi0S9H<8h-B+pcU-AMZA +zOcHb?9S2C&fn}+oL!CCVeKs?u{X>7$X8DX{c`hbhTg+@3At)h|yT9)iL&Pir!lOpg +z)l~@JT%zB=!}(RypG|>a4Iz1*IlZvl{Wf_MQxxUmkY(R=D?diAGF*U1P8ZrDkgM{J +zjpXtCqyEY%I8n?$ESxz%8 +zA{=GWyRD#QT^DvQV_IQJGRIg%74Khon)Q +z79j<$DJf~h4nLBJL00ZE>Cak`hCk$jYKrYfRtc-{ol8ok3L(8HY&RBa{ZC5;8Ftn+ +ziCG-p%%5!{O&(A!2$n7yCdZA+P_uZXxL9&(i%h{G+cK&uql;O}=_or65SMZ+f%BL4 +z8J5DG%j!1EWjf23pO;rEXMNl%WlNLm`QwF)i+$kvGBAeENX1)g2DhW^A(=@|%lp$n +zo3F0`swHhp7YPZ3i$td*?5Mi}30rj~BZWzQuH}eDMggmMLb~W^UCl^T&69=VD%Fys +z)coz%{ENlaYSJ}pofYp=5(T2uEkA@Q1riQK$D!4Vca!4KGDVu{IFwtGjvC>3V4;-X +z6;Nw<|6(}am5B+@jc4ROD7;qERcOmC$mXx5KQDPM6Q^sJYGfUDyelhS6~Eq<>QpDX +z9YE+DExr(tRkK*S^HpP!=a+xm=w+Wgv32`qfmt2d6MjI=Zsz+ps=z*EnUW6*)vX^IeCvQTQaKyyGLXd1V +zwYVm3sl3dfIn}5_#=2?9t$8vq*1Wq)-YCwx@dhNi8TJ6zrowkiMf@Bo`6d$J869=f +zsO8iL(&a6}VszB6V)_r+&{;bwu!pf#kDn$}?A5{q?Qx@=R>iH>t*An>bm`W5F%?ZW +z1JV*u{7R*5$~<^+AGu|#tKxHDjg(x<8Kai8u39>*9X#JFv!5jJOlpvzr2Q2_neYUwau?7<8yjH~qT1eMhb|-r##137y?hXMqhUp*Kh`3MS&868iz|dN=VIxG3lPH +zgnKdgLS9jN#i7!lZqcO%P3nNUO{rU1v)z6h-Cq|vxK?+doT!N)@w+>I3Y+h#F$ixM +zizGHXj>UAxgw6&L9A;^sO551{1xUBMUegp6Qhlai_>+L*Al1G}eP*R$uBy}x+sR8O +zcVG&NnUNAmstt7Afhko2Web6nbN?EiYQIgP=uf~DvMetTf0MCyBEGm7xqXDI=c<^Fjqd1|0XFG+2?G`S5-W521rWl +z4cGA}E^qd@-i&a))#Gx<$fZ@owSBA0-4>Uwq@HeD*ZV(RdwpD*TY4UB?d|#2Gkg~@ +zb>-BDEa(6JQP0l*MJMI6#=oaM|8ArFH?>i8i#BriOH=}`yB$zpqtRwb_{<9jNuTi9 +zA%#1ZPl<=|*NZzf-^>aDHAQ1=o8js83LWTTrP&x2Y=_3K^?6FQ>=iX7M(eGoVXCGH +zW9>aCkFJY^X_1PrKhz=(FdnR^DS9}^*1j9^KI2@Syvm~xmRwcBatSt+f_d +zZ-K6_@tM)UZkf~C#^0a25k8uzYVejXnfDLmk`G`FNOq7H+*ISF!2=rdY`y-J_KvOa +z2xHq7Gve{b14Qs^<6YJ>%UQa6ZDV +zvN)n0#cju0unZ0lJKi80!p4Kz(k--hk28^ex*?V%7VI%lL1|)X*oj}FTPK+t>~tLy +zN)N2%>F%G+Vnhp2C)Ae@;UO~Lu|YC)lHwm80!Et~X}!V^a4aM6j;1=UN>}Iv6tE6D +zfl`*!0O|cz-j_(0KB1+7-@nUU7Lvy``_8x#yW#qSquH$w?rSlwDMhL3_MM$oQ8rQM`Vrr^L^KrBgQSxp_L^P^^2 +z6nz|FKm;QmlTc^e6CqWeF<6D#WEi(t!LEiWqiF}zA&O;uC@{GX!sH`iiHPSYlEz~O +z(*-~-d&G>j6bw%VMW +zqw-tsCs&0QC7ya)PP^6j>!0n81U+mMMVtHe<)@5*C +zs^)G$UV&I04(rdbYrV4Tqo+~D*gzrThV&_h*Ab)|{1GvveEx$6so~j^;J6&w|9xoGcNpxyn=;71nq#lLXwVPa#edkt6xm9K=r*J{lddX)QGYBIuFhS=KUCb +zvo{v;dG;gq=1sT=OK6@@U7Gkt^x6ETba=*P>q$^>PS&_5C|Rz4#Z?z}dRWlbC6 +z0xcWc-bG@FMAw&AyoHYG)hKP((dS2J=Q}62`N6P1Uq4?)fR)Ymeh%u&gKK}Hs%^=7ui;9bHjeWuVFvh-&nr{pp5u)Nvh!sdv4Q1 +zxl>7-HQ}rF +z;Kdgb&p@QU3KhL(&7Z+}bM>MhAKg-KzOeXfrcxC7Hsj^O{qY$X+i>qEk!9PfFW8y> +z$&=7T6;R7TxnwZ0=#$E1gZ$2Yb96bQ&#ucx?fa_a{6VkK#x>%xSW~9rSH`MEQus6V +z>Tn6?*30V-jUU}U&6eGNK5fnjeHK(5{$TsZ|D*u@+wHC;31K$%sJ1bO(gu#83bNLV99B#fNlz~?6E<{+7) +z48{xsatMo(YNWj2Q_ZxaEgR^zqVV4oMw&&4x@Dk@Cv#>aN=+wvwNCKTZ1_JrAO@yf +z?B_*bOn9*5=#l0Pp=lgYop!V=E~Isq^_(BJrZn8Li}g`CaMM&UsWf;qjh!+dcx1{K +zCtD~AaS1U +zG8JJ@V7cHpCk27nDb8DW*zj@#T~)#bYsXn5k=D8pz+FlCrUSwhllgiQ5R)Tk)oG(T*tA@;RJ;^)b*_#xj%Q^_AyLI!oB +zM@reXjj?xeAw8TV0Iw`H0C)xJ%EwbEOGtyx=#Me(UVwavv)KX^kj`EbzApKFokxrG4?F +zt#;zcEH4I(>!hz&rS5HvousjDScVSNr05_S1)O9AIt7~}9f(h`cZhoP$5@#M0T1WF +z5F!1NCtC@fG;5i$3mtRFGL4Cqp418PZ;U<6&mO7gg5PB~^O#b@Y^G(Jf-2QpKMSx_ +z^!~O~7_lE0LY0VNYn`aC2-(*hv++Y2vBb<=4mS8K(Taa(ZHm +ze&~N9DZy4&*)KVK-lpJ4$NVx*{&}Pfo0xEGS)W!imHi2s@n$jYS&krxkkYA>qv!z} +z_oBUa^6UNMq*TQRrg;`@(B}@v2ac45&se1@h5A246+U#h*vPpUB)VwYkO6lRS`R`( +z(3#c9EL2X~m#6vtXaQ?60Q#QZP0rVuEC`yEyQxb3@GfN9JPH|>M(rxv;gxAR$+A`r +zt3$_v69SKvCHQX2%%J6#HRbM0%euvnTDLh}ZFBHGXC5k-s&FAB5q;?`*WXl)?`6UN +ztxBbk^IQac`%qyZH&%e8&yk}rlk@l4)d8S=U^;ZSnPGpBrk +z>+-C4|4@!Br}&g8Kdej$r(+_PHwWKd@TZG0C(ueihEn0$+;W`~YX{LAufT(00po~V +zw}k9VWu;Oyk^6h$eqyxWCeR^#X}Cx68crEpzYyhASh18xoiAOcrthc$^)^CR=?WIj +zD}rd*=-f-q34xtqrN(OEaj47pmjYUxdF^GvHqN|jVF6Z@qH>^E)f}alz#~}s3%IE! +z58@9qWD75p?)R*yLzubR)C(#56PZ=E%6^VQ~}WJBv>`1GY6 +z7}{>@i)-%}!j{oxg3|c?%oxDQF>3TZoLjszf#-FYOqmq5d0iY{@>jHr^Jt6)noA5}gh^r~3PgKiK(m~_L|(e;SRM?bmpt~oXg8hA +zcBW7a*tr*IYv*gI&q`I0j8kQt#D*Ia(!(HmM5Wk4V5bPNo^AAs&#Wy=pyl~z +zOI;DTMCPhzK)N}3-37F=%=Hz)OXZ=`_mayE*qPx0a;k-NUGNcXL_6{(Ojin_3t1;j +zySnV$(7c7E6kmRFu_cob9b3_Sqv2|7Sj|LJ%Z3v}%bn~ottGfp<$-&IF=|*@~B7F8pdz%T{bt^$T#h!az +z+ZAUa88zO(q6fezLDY7AqYh81r$4o$5#fE!{H~cT?6CA*o*vkw=kCc$jAQ=Y?PDIU +zTiY*f^n5Wb2ly#H6F&Aur+*%Y-^L=`$Ifp5E`6Z$93l>(^ZlG$r?d(}Qb9f&vrLl6 +z>0vlY5~w*Qrbi<6T{>R68@f@FIZBR8>aGC6GFrM3AXsXW1g(58GYN4eNz%`_m(+5u +zqUYSDv3t_y7+&SMP+0!E#b-kAlV3d>n +zKCS>pd1T~jVb|Li(c3fHGidAj{6p`+UBuV(rx%X9{5P~wO#T-!3NYKP{*9r`IsGPB_*aShWXce*ksCG3@|sBiB1c%M?~?0nylM4^pyI +z8e{12&5l6j!hGxwH-M%BY*2nmW2Vrp4IctAc3eO;u_XtR>c7G4;5dVyxc +zY@NL+`Y$CONZ+NyE#(GCm&sH=+p~j%YR6vvHsD0CgDA*R24INOJ-fCMjeL%pp=EqN +z){kfbIt$K#c)03-cw%xxHy71=m{+-eano3yqY9$>PImH{ZySk>XWJHC%H@RL*yatAWW05zI +z{Wp*$usQg(30q^NLK{KO1g&wbwyic`cdZrlI>A32dhx54S$ju`B3mcOv-+3r1||D; +z>|KQJeYt5tb>t`?)!J0Eadylc+H?QqPYpd3L^duq2Cvi#9saWB9ey?rHst1b!4d>p +z5|PG3$>;`4i<-7H6fzUfT$Qf@+E3z=uL40?AsOY(fo;zh%7}|WihqH@@|CmEK%k`N +zp*kqjG*S2;33pRmDCE_`;B_!OC4L*D+A%>)0vlluZO;}8gd}D32VE)kn!2QR +z$1Ys1aJl{5YCMum2kqhu4j*2RM;$);)F^Na6%INmnDhq0)>9JC?%cB4wzO3N&$EPA +zoh@zX^ +zr7;2 +zf#33W&!>O~KV0=bFl5h>yMC3JvZNRAh~u*daL%;}f6FNVDIqfesv<@mnq~HrFz&F0 +zR=L}4I!dt|J^#nOr+II#oBG-cn`Gi#Ow?V6f_vHiLj*JDEHaV}GCXU$EKAO^H!im~Cc +zkS#AUMPN{XoMP*~14zr7ky>F-dnWX5E-v@_n1eA~y9t9q)!uSO-NpH>lPW`t{bw$+ +zF%N|k#_G+zI+igQvGkPTeX=|!LZUS4UQD6r&)K_2M(Ch2tqAt~&UEg?R8yD;f%zS3*Z@B2rQ1_3Z1BtZe$pS!R +zX??z+4AOXW@Kb1^7WR;Yc2h+D81?9f5|l)0gzxp;sx&(Gu3k?{xsL*#g2E2z5;}hM +z&Lr5lK!3his6Sl9^q{q>?^+hLYF`I^0GaLj(^0Wx`WTVUf>^T)*DuXd(o%maDGqY_ +z3JT_UKSoEaa7%SD@}DjRjl8gqrYZ=>zoy+^i?bI|`zxY+W4Ek^p>$afBLA8zl@gqqJ#ub>qDJ9E*7%sJ^ngGrevUy9tXSE$C8w^xnfb;#Fq*>`^4dcHU8 +z;r+E^ub*-VpgFdtBl*+*A~8BloFI=g`4&64IDY)j(k8~manI_=J4KoyB{s`r8wfup +z&VCBTqE0S8xgE9K@rxN-DLVCY%02Sw>D1p}`}e*dIUR|wx%lZ1RUu6SR0X;TxBS^0 +z3-8yaOm;Z3n6do8HWc0tUV%Y +z`O=?6!E;F30EmKH@L3Z7i>FY0SRTIiybzW+fBbV~wjsYYo(y*pW7LAZzabP~>ic{?5`shl^#*lp1R(x1X`x4OArHDD(;e6+ +zr(zu}!m*ao)*^Prq@4aF+pdv%em>;DWL%C1JE}C+fWQox@(m-#K6nZ*Tm~ip!f2p5 +z@{5DKKn(B^P+69o&zx}Ud1?iLbJ~Iu&y8^6z>Op!q%Q8m3HlXJUw>k-kj|zOgXu#t +zY!QHn(D`=IdF+a-lB^54FcfyWD{7R+a&b)Hcd=%>l0=knCkNkAi>OMRFPWd<>XBqU +z7*`=mGC(4`?Kn;rlqZ6SRkQ#{fa%mgw*!`nUCcURl)Vajz>}-VXGL*?264Vik`NFf +zIZ_g0P7aG}h^d~6apJ_j6of82GHr>;O7!5DPgDD|lh*;MUc4Mh9&Aj8(SZo)ftQMCtKo3!V=YuCDRCvIqrUDuoQ&n&wMtt7t(%|Es88P%!D^&8Je02 +zvP0~OmV%D~vMFx5kS1Pe-4v(TlYUG!c2`-DYh&0?3Fnk*76b%w8;&8A1+gU|^0*ML +zjv%}=NM$ffeIe_fLo8nZkCwue_TV98-=(`OW_Xp+y-S0S@}#R9xu-dBs1uXK5AhVG +zM-63a^7$!Fi3T1ip>llJYM+QJ=xwN!&s8MpP%t8JO92uAIQegbc*CX +zvYG|S==dT{5i?3vh!O_aPenX&NcTp{j0sYEhBDldnNfn=oOi_up|LyP%jFH{tf)c4 +z@J-f~E>2?fFkSgQCs8%bpgiHNhj8zEj?Vi4HD~UnCiX>ClvZw(Xy9@Yho_FavT9H; +z^gTOZIL&wrIW<5(x;al!78D(4<}9M-Ndu&9V~yu=SHHOwv5oG+srqyy6) +zdr^VwWNzI2@w}Q+B@Auz1T)lxJeDZ +zeI&FlC;qjhls?>SOpFF!xH_r_Nz0A4ctA6z)W5(7XW69Dr~Sez=n;82Wh0@5BYs5{ +z{7c@6(bKUWWx*esnO`2biHgLf#$rIQFT|->hsq1<9 +zX@?@u+cLky#Hiy$#N9`f4*Li%Aj0A>de`SWq_pI2Z3)D7^sWxb+o|#bxgK?Vw4+J6 +zGZ}F=_e7UuVgx0B|>|W96*$Rzw*Ymqc*!%lB +zr4eKI{#IA)_0AvPxa#_O7eHP4a2NFc#;M*>*M6gu4~$%U_VnI5-upDR_vV$}wvgV@ +zO4k?Cu0S^Bb<4@|t$mX~batyNAlQ7~;PQW>IBWM`qbjF0hQz;40p +z@2-7IcadA?(i3O}-nX*nk;xnt8)#;Z#mX3bU@}*I@NtnN%89j9}q +z?fPjl{$BUlu7i4gn<$i!rvp1#OxbDDGPZt{5hG)Q*83W0qijLA2_Br@Jxd#7tX%Z* +z_f&u>PUzaSEBnlj6W<8GNMDy3nXPHP7SQvVgihsXHonpPDK^K7)`j8@qP|o`p!c!KCdQr4CVetN*Q+9?G?navYXxLGEA${|dz*mWz?97=`qxC>x<;(OF%ra|?Q9Z|_S>obgm1A3MAA>MshQ-r7JOn+|>Hwy&WuC6R +zyUFxS{Ou!VsQRJ118di+lfJH6cU^LKr>S3D+JdQKArVP1onfb-VyojE%?uG$;F404 +z&aD!t?1zEWfV>#s(ogG2d!dZ>EDIrzzH#ka5!xG+jap-8VQtzA)J+cbL~Tel^DIuj +z_+i+<6%?n3s*p*uWIj65z3X$%VaHdcEZL-~ZnsP!>bPY6FYy5K=h}yiO+<*IkWd-C +z{!FCY^>qpoTH-T#8})_(6Lh+##pw&F@)Bn#LZjy3^xYbtJWvcg^BY(zo1AW;fq|EE>^ +z^m}Chw@6rFQ~F!CnX_&5sPiize_}Qcm!o9RrUm=K8bxvA_V;1FXQ4_@WkUx6w);yKA3)Idd@%hU$1MB|Fr<4MKwW+LKX6l#VV}hD_#_O4x>3hRxAgki8uk +z>I-sf@D!q?U`1&D0CSUYwnj@y4E7^K_3LNB-cgCL86kTWox#LE79vb^D%GxmFnQVc +zVNyC4z_$W}<3d450pFc)f4DE~d!10`>i-jD*?|5t23r2WkdY<>w|xFlS7iR$8Jst= +zqOM2|R8jahM_1I9yXUKdqotm|-%M+Mbfb!TDe>e4psqamSVf~rotjD*ya_;-(Ac~e +zudn?*2J*D~XAH#J-bO|o!HGd0^v-|O6|n=UJ^EkjiY{oEtgRacxaA|)R};5vh$0_b +zXouZ~=<3;?L8U@E@VDDG|8#k!PzaXM`8~TzVf2hj#+kMq^|Lw`M$eE-iCqZg9jR6k +z2>ef+1mQl%Q?bSDVz;W8tUwmK<=<;)lwo&7^#Q#C(iB?s>n?2E)@>Mb=FCrn}wLW +zsP#O;w-u}_nA~&v*+VzYT_VGS?7h)y13IO$puR3(41~Y$I`$3K{6Q`{AE>5)n4l8itdGmhXLv44{?LODe&Ihx#%8{6 +zI@AjBI{jnx$x|c>xRT??`SE;Qlcq3m@Y97!6WN)#hTC_qGQ)njfUG3*x4*4i0~&uh +z1+-K;jb|f+)TJiOiS_a|AL9%oF!FrjjhF|1iHaJsYWoNpu$ejkE#s>@ma(1M*RdZ! +zy8l3yR&OI*>&U1%Nm82(_u&70{szI^OWo_M9sF~0Y%g3Af5=ycr}3_DhVV%AoF!U! +zYwVDuv2#@)r8Mz}lA~a$IdbNkd+JU2Q5BNz>`7|C_Q>~O|C_^avC{NvW)PXV4S1@8j*hEE@Tlpz@vA(h7r?R6+D&6+bXg0|6+Me6Lsx)-lR +zq#uvft3%5UM}EF0GMqRp|FdJi%Ln^~mcg!VOGTeX7p@(?z5IUoukgwC-wQH@znVP8 +zIGXvwG}Ac~naiM`myrci@2RYJd-i?0Zg}#`!}*9Q0_D9$FkTn_3>l(t5wVPb7!w#U +zS~S8l7;8!SMF?4Thf>>rA@z*>krXCC_TKpDN +zz)=q9heOP(hKM%}oGhIL;$)nyN`j_k!g9$=V%2MFC3;*;TVfq_uK73yDOe%udT- +zRmT{+g~(0;J(d`9oRHLq@QtJUUGzvD6|o<2*-s?F9|iPmLgc;Dctd{VPfyqYE^rqm +z_A_YZ;)^*bp4gCLh(vnhB8_pelm)uEF8ixz>O7a{BI1oB<7_8cwo}pTLosSKOtV4A +z0EM?m_x-UfNVnz4E)76ew3Go<&jfXNDBejs<8lKfcRm@xW#kf4Z8`8YPJypW(qC%F +z?ct`a^@wwrPcbBLKkK9;78nNI!8v$TvK?y$tkn3J<-~1oN;)%`Y&pa;LuQW +zxO76OZ=_R9t7UM8B*Du9?rHN%XLBK2H@ac4Z>2#i)%UQdnCL*E8j6r0xO;p3KC+bvev4kj?6RH +z8u;qRCv?Y0;~81;4#lX+;8g%s!sqOh&$On<^xzVan3$!-;zuP#PVtee=9A8JrC;kR +zTFWUuxp;94aB%cJ%rm*IoN>3jU_Idv +zQk+yw0q}}Y_#`ak$mW1S$EXv|5w{Z9_Yx>&i{*XIG5f;`lZGR0MVI6GRSFhYQbk}s +zA$`$(MGb4x`4?Luh5amN42)Il=ZZciP(C$BDoy)OJI9Pzhedft<_?GYG)ETkuh=zT +zuJEe$;_&HXS1rU`;{X-_RiMi*Z2u*CH&*m*W4Xkl=&nlb)8^XWh?-+%V3kD5*0A!s +zP7(H$s$=hRT=;eN_v^k+)V=GHvHIa(VgSC88%uJ60(iyJ%XiO(QVgo%xAL`9gK_YC +zh0CS2X&%=vIVWv@SG->8MuAh=U5gvX%Ib}Oq?24CgrPNigxNO-!=tceU-hcN&hg#i +z)CD4wX_s-$-3$8jK8^3O>7@3SeKf;K;U;1f~7EP#s +zVOO70ag};Ml^&iIASI$VH(j_;?l37bW}O{EH;Do)>#Q9`*ET}Vxdx9@~!-U;vX=I+~h7j|#;$f33q +zTRcjhwo^8G?gVw*0Q44~x7km39dbJ{!acLI{ottknYa$C9(SO(5~Am2pWj|pf4BL2 +z$7aUeOC{~cMmuz7+fV1WoAEBR{6Q+MdOr4UfD{5>Ek12se@=ShtmO%^sj^$t2l6G# +zIc5u{A5d3fd&rk-FkB4@Ni0cjA)9AQV4c#S6W!8rh%^mJVo$f!MslI_xl5$(d})bb +z>zNq*Jt?Y0U_=J)26+seJA49u#Qgm69WFOQTt0@JoQAvJ$#=cEs^{^Zp6>HKZAm>{ +zS9-d7dYX>+^clInf%mrT>1{vX+Z5Y7T-iJLp{M!VskuF;Ymfgoo%PxKUw|w;`F|so +zeh9{?N`bu^p7Ts0PP +zQ;fthimc>M(e2s{WP(>f3W*W})rPfpU`APOG3yK(buG|Qnc_Ef*u<09|QS*nm%?zM#uJno}?t+5!9< +z!nL&;U^PB%~coo>7BbA7}-=%wJ;!n+DjBQEL^mU&iyk|8ETfs#T3~gR$ARFZwe;DA=>R +zPVYFQOb|dJ_Xj~bY!Kqkk(eo-ZtDjVJUm>&ss>RK=R$PDEwN142fNf$Zf8QLiA7(` +z20*K4$3VB7p{p?RyHpX?u#(2Rkma!Rv5H>@IO0e7YfHMe7`mFWEMb~Ue)cJ+c8tf{ +zUw*zQq)2U?xhIPjpHj9VaPYEe4!%cJn7TWu{;1hx@VQLhGfVPlH$U8kRba`V;B*O%?aOs4y^jfCUkZ-ZmQCHJEJPhQz +zCE^&P&ERQmPfJrRY||L&bsznzv@TG!tuX<{Qpa+e9zdrpnNTh8z+U%6SSGe@>tAjO +z>Ri&vh8)ZzWM0p;6Qz +zIan1KZZkTeM{62Lt_lJgpHt3+rpLWAAr0QHh;y{&=WnY*?ns@QOX0Wl@2L)cVB`Aj +z{{^Xx-2v%|RV`~1{_$2;4Pb#a&_XL{Zw#1`HimInz^r&HdjK{9cq@q8kX?GVuMhx7 +z!6W8YotR5fxVa$R54>FU!x;q|fhypwtYX-`R*-|>w)E{_?HE0)4qYoTm7UxCbL8w1 +z!CL9trKZvmJg|bJ0A8GQ-`#y(;#(6%Y_)KIm}#Os&O>&B}PmmZ$|E2wxw +zvZoe#=^-_$b-i@&JG}}}U!n4K;ANmAe6`-l-$CWa3*t=gVh(=Lyqli?Q?3fVc7+*!^Jfj0-M5o(*;Cp=z+co-0Tt<@+W_-p9ba5j2xW +zZ5;fyBCCwfcIs&WpF)@gFwfyO+;*X0!bS{7MK}f?SOW$5-+a03PP^konq_&Y!&o@R +z9mN+j%EZyp)&X(HCRtzF>CsWEp@loQT6~V73_+)s4Y}JT$?=Mf-jYOPvn8QGlPT +z*l&z>uGSm4^&+5{WBle^*AnbVQIZ;VEX#c5<_|s=Ep{CjaZTG3`@UV$Zh5IZ*t~c5 +zMXHCkv7B2WZT!+|ZiM%(cZqyG$oErUVt)L3=`?82ygfCvMtiSn;^DZ5H~KzFq>L09 +zbW^#lp!uSes0#Tm>P!kP&fEA^e#87Gqw0`0mA&s$PKPOwmVb1{kk%YPnmh`sp7rvM +z95E-~?x98feB;0ObIa-GSJC^HuJC`)|HiVHZ~WmXn8nNQHg|r1syqGl +zqv)3slhQVP<&K}{o5Ig)kdUq9kW<>ejv$5#5`tm`og4Hw5{2yRiXP9F{*+BuC=E(( +z0AimJDnovc>S;^7uz#KY@u7(*z+6a0Mf#m`=&I5XPlt$ByT}8Cn0W$di4X!EVy>oP +zki-zeWX$S$hK&kS7e|F)1I;|?%Uv-rdPurQ%+o(eCGPLx51H1iLbu@~Z}X|$Wa>^p +zy3nS$%}46%L^(Tzcn!bZu3J<1#iui608*$iAhm +zv{aG@d_Y~CDuHwjDg}Y$ye`drtH%v+Ogywmk+O{6G8|;N5Wamlh&aU1+?4nbmwvR& +z_jp}EEM+677*arwRs!f;Ail&g^v4jxT%6QBDJX4Z9aVu3fOwHk!Hr14 +zkY&7icUER;e0NtS3>_WTluLCC{*RN%Ig7r(8*;!zMjDcRHq;lhz&k?6@ZM3>E%Qn} +z^Nc-`zYON4iNK>%3AD2CYh`?WH1F}UDuboVE5)V4MIhP$EZYmwIvl{nhg=&f__mSL +z&&ivg&zD6eyvoUcihw)~y=YBLJ%Z!+N_ax_#U3Z_V;!DUXn{>bo(f3GcQMKMvUvHJh8kC=U!B?5XLTKtt%_qDJllwo(PrZ;1smWnyQ4OzTm-< +zYqFK&)8Yns-L%Vh_*d36T!A|Zer(EhRHOQAj`a%*;B1OL+!(wbomMs+6rGdjrj6{N +zML%1NXc1$KL7DqnQ|>K4_06CqW&x+TpD`|k1!M>ccnRrJn}ZFtMKMl{YA=DMQ^XHUGC2y~DDCt@L>&1Yn@4|VHs-?IpOXQV&vpdmtURlU{%I3si +zhTuBfA>_9%*p(3r98o?FOo7NZaGbis7R)m)6rUl+OVz8^VIe0kFw;a;;Gy6>PK?6E +z2pjd#gD#ZATY`(00*>Cy=vlh-OAmtBQc{HskJ5#%A<#GEB1@j-SmX(PY~t<1t2As< +zSGk0SFIAS@tAbBe%~2}@)Gy_sqCPtXQ-~05Sd_(DnlY&U1*QH8qWaxZt*mxaZ@1vk +zNPR55X-T;;y6c*87o_i=a3GfVDORS}O?(YF_ +z#29tdMX9Ugyt6J8lay`VK>w|B2hF{mpW9MpT_2@h0hX}uJ697oT>y254=x@@~ZE#r;iVSfosYYHD?u;Pn=Vu +zp0kQ0udA0NeIR=>&ZV|sJn`KbqvRfPcd9fjrlwnNLc(_?)9~ctQF4~+J;ZnE-28j6 +ziEh&pN!ciw)OpYB`@Lc#%(eWpE7z2b3eqj?`O|I +zr7Mu9eUjul=z6j%tM}%H-Vs;V7nQw$vGTgb<*iTOYw5l|TezW}%;y8A>-PNbzm$CR +zzkpPLHPHX$t#}qZP5n1#`S(8P|Mz`R)(0_`q;X=%?SKZD0TnkwSBtr;O?Zk{5QWCI +zCXlqpz>X$K1N)au!t9u2gY4)SOGOL6mZcIqIwsz*Fe^mKyr|;sm+>0;=HN@#a4aWd(P!GK&BdkXt26VG~hl4dLo+pv`h-uSSp2Cko# +zFO44Yy}2BD4Q5g~>tmcS8rUXd`Q~Sq^!vMp-{`B)wH&fAzWWC94gu!Bp2s!nT=8Qz +z?4~r&CdwUclUqAvFITIczwPsv0q+^G67JlNO0<5>I2<-gyVHmY@(fJWYm;S~$ZM9q +z{54{^-Y!;OoC^>J84~N)V**lz!9)*!StZLN-T5k`8}_nYjEy=Ud#YnUOs=I6^)vvj +znRLkkImFVL7!yl>zZxJ{+6oKZkfKH^0&hH4*sj1Jor-OfCk#1D-*kW+#j#em03F+9 +zOi}x43=kL-z}M1OF`O?8x#pFtVMoMqM3}tv;iTeOODfuM7NeL#0V~}o)tBEbmeC)} +z(sLi*vGb{4jOUxgVi51E_5V7p*xV4 +zKmZoNA>dB#0q`D%oAidUD@zgYm`Io7U(y>#vX4 +zRh;1X_J&S?l->}-!)iM3;DH*p(;Je;$U{dNA|(bv7lLt*hY`kNq|VO|&^2$e(MtC4 +z2urcjzJ(#%!}YS>LuB+VF-Wmlz6x%qg)}L>i(W4Vul8gC^o6yqoE(diRy_5V+=2W6 +z#8=)h(9{8_ERg>E6R%x41GI8b`TNQ~Xyu^tv*l-h$7}!WgMjMFKl`8+b>(0cWo1DG +z42XQlj9`(?$@f~Xn}>RoR;Xo2%0D=Ab{d|Ne{j5B^An3LE491Y)dLKuo}d=?x1zk7&V<(J8(8 +zO^4)xlgwk!XHyzlUdupCq{o!r^)=jnbZbVr=f_JBaIa?e)+g2^P@V-fw!M`>vebW$ +z|0V+Y+nU*g!*0*9(F>cB7$BO%&zE>HOJ3t*KxB0uTyuGu-U6g>wPCC`8}`u^j>%FY +zcN_r+>_CVh!+08PNFzce-pKZ4fl{yiYF_cg*5z0pQ8_lC)S%6_Rn`D5?hp0dCE +zWk9Q^B{d+M3I(s_^fBmR^bn2H{_3v+9 +zRpAT!j8tzEZvB$^YmVG=`fZgca-A4$Oo3Ps{v5`@M|w>9v$QF%J;E0oXd;U!FKv2< +zMN|bX%1!)6nZ{%l_YdPHy~bxQQzg)HVgZQ!r=Y*oY0L# +zYO@NnRRj)dpe2f!%ku#ip5f(uwiK{e7+}(Af$`d`AN(K}8f#N&!vpQY%ok!ePRgC(GQ;SU-va@4+JVt1+L|dg +zW-xF&GAf8j4OzxT7;?B6TEJKrCmj`N@5qg#aUnvvTHyFA9?b`?2MHN;JZBUcaa0F6 +zF&U-m8O|g|99f_S(Bi)+2i}-uZk-Js)@GKK#>de(h4TrmD%^S<|FwifvPJCd-+Ie{ +z?ce#8r{8IcMCvwNgsu+V*CF1KlJwO)=_noJ$fHw{N$=ew>$EBFhr;imqMlDh?Q~3H +zNkX=AK<9}uH+B41JQp$;DN+1f6ElbkIcdrKM8}j9;_lI@`If12i!^fza0AHy{g8kB +zWQw~W*|j10C^0qIA@waTrH-HC&JF*f2fe^G+6PRBC=mh?m`r-+cEoeA&)ILdwMtAHeGTnckrP4^-+mtk+lX9GpS;s65ou8^G +z$cPYrlo$2PBPoWM_P8r0oFDH#Qv7W-@yhF4I*T#RSQ; +zIIyJta2etsF7rk?yHY37Ff=uolm4tLJ9v_>i4yGY3JfZXOqPUDtrE{@)Au#-)ra^O +z&>=?p;jAWJ(I)>;hHT@%gcr9>>E831Sg-UMi!4pY%wrsQg=)6EKDE#*ymmOK+%k7h +zcL+g0wpNgBIGh?jnAkd){~3X5WJs?FFU!I_xvtC|^!#aBoQgiHCx_1Vj2kM;Pb`hp +zvzD2Qy^$Ty825zPx4^r~nNf-48c?R9#<|JY8WFQ-z +zJEbGMt`i+F9AptHWYG#ey7G*>s8XuH1(%}h^epqH@UO!`FG9hj2ZGd`kZdA%a~R-! +z6nW1Vy>%?y>_F3BqCJ@p-D?m_vJSFFQ!foBtc$;>rV?JLo&VM{-`F8~vt`IGyFAi( +z78%V&ja}ToMB8c|Vqu+#ZlbbuODdg8b|WEF7FUi+3f73^{@NVo +zpaNWzkz8lFwd;a|9SvDM?sS3>T60VUO-;JSpL)0Em= +zTKdghIPyQ(d#|V_8@^9Bl@wYOkgmZ-5j848K-2)zQ9)5qP$Mc`3^fS^NC=S7gCGP1 +z3`Gb<4Imvf6zKvYN)a{KXclZVQRc?y{r1dWd(Ss()_iM@UQV+X2RTXZE5HABoktOCF||{3*Ka +zX>sp!Sj6O?uF?R7k#s>~ZQZua#|H!?}qrJU- +zugDY7vJl3-0LTKFgN5_1!btYVpAe-_!&Pm;wL%x6=(6+bhD#1&u4m2Av5lSE=Os)& +zbZ*i`Cu?^pid{)txhTXTGifgQb63(Nx^lBFrjB=QiWbYDb*q?lpD}Z}L+ef`>vq!Z +z$&42hjJvEs7LKdebJr8i&i_Cb$ovISAPk(=+x_v}_h-FhKYORPJ)Sjv{QAn{k^RJx +zkLSn6&jHqz*D8I_e?Au3^gVIDU=M_rIYjY7=Q)-B{~Px~$NvjC%TX*C3{qQyp`f5u +zkTwYT`vLv|1|e5Lt0)x<0k4^fl+kkT6y&zDFk65TUU}Z5IE8(l-!FbzWSei5 +zZBF_;bYpkkub=hd8`_qzhJ=J$!)R5kG10?+J%LdB#nU_5^_g+7w +zL&Z1R)CwkA*)luM_tuub?ci*&&atVhc;Ca_=k=<$u5z-UXA-yZX#M@^r~H$p=O5Qs +z%?=AL_2e9FsQ&b#)_?ZZRz-vF`iiz`q&Ge*t4T_Y4Zc +z$E)PA6k_Su!IC4!FapTaJDI&j4R0(ZZQQml5%pf{a)j>b@i%N>!D8D6)g?^OmF)rD +z=JuaTZHWrTL!=1kVIXX*B5B{4h}c#DdbG{R5p%2sAtJ)Xs%q_ETeksmovqq6y?#LP +z7zCHA!9-lumr(yE5Z_i-SyALAYI*x%4n;4&;V?QPF|ju~*2XX!qCgU%%f?iW(eACj6e_DDjqX;w +zA7L2nmXaM5s<^OfJoiZcH34mZ-^g3} +zsP|s;A5Wy4+ICJOZ#M6=reGlpk81vyAkQF`J9J}ZlKyL&CjtRtxCx34hIl(?!AYdgrA!H*+ +zN$2nI#B43F>o+05UO!sUTSd>n)k4OfqhEb*IYo%Cag)xuxWpaJ&rg8V$F#f9XunDGD;9}kA#6Kd*4g)FoD2!HN!5b(h)wK&6}Bp7G{5DFr#nZ +zZdGGc6aK`i8VxWID>aq|6&EFi6;S1}T+1URQILv;E(11Q#zpZ+Y<2Ml)kyd9}9mA8?pUVE_X~#aj+3uP_Ega82B_)xX +zlzLDl`W0sZerZpx{Ixk(_~-`)WJoC0zV$Q=2U?A?3tNY3aWck~;Sw)p5RiRuRgll7 +z8G$2f{U4J<38ri;p*c=&{1S>ol-BEY5ihsj0QV!o4(WihYnTL-5-xfhpl)&EwM}tP +zB|2H~Tr>i++5~nmJsmZq4Jb-CI|^z-;(>=~&d)ew^iA5i@(LrypuL1UppjJR>a2Xk +z30oLSCl?(ALj2)J#W)Fak!}Yr49wRi>f_}~^pT(g>rYGVhfS>4#c8%bXAG^kIC|;4 +zD&J8?7lQ^E%l;|ThZ{-EP2)l_I0h~k!HeF_WeN~`nh(Aaz$J!25=3`nNgNiC++N~y +zP1%Oy45*n##5&%HQ2gZoim~{m_W%x-g|#gUz*t_+oL%^bvGlzByvA50?@<~|ph5py +z!#wYKHD6!T)XBa_TT3msFEjyxppZ{J7yisFJY4#8k3K0bf>bUbB@}>;x;GKF!%-;V +zI>%H~xd=L5`%rGzT-2u?5UeI$@&FI2II`zCpM!-SVjIcP)`{)ow19AloO=qcle^_*C4GpeC +zTSlt(Zgh>m-2r;L+Iw`@x6IMTOIi;V`iu`jm8Ql&>~N&E7vM}3 +zXm(Kwi&*HH$XIU6eE|P@jm?6Y?D26)7m#uf>%pbm#7n9$G$5)Pr*hAm{&bFPc4t! +zJ65fgc5c?@A@A9M)KC6Xf|utlCAfzzqqK6Tr(*Be3%*&iPR>!E=DPJ1_oBTpcAnEN!mALk)Ea}0i>4m5^|8!0vo%# +zl<%|=#x?)lC;y7t)8PC=WfNg~=O&71$M~k`{sV6UcP&Th8Gf-QN4!fo9MI@==zII& +zC^5rT>dPOb5`^YB&{p(l-RTr*ZgANERGk=PGy&a@ +zg?~lRR69at*b#ohP{uIBj79qc6tzI~Q8F5dV-i?!6EahxGw_K6Syq8Tts#%5`^h7j +zX2anudH6v-6}SZW7mdM??mL7W(IA_3MmQ~yb=eVSbWkCS&UAwvB2$oak&nq}3x{xH +zc7#)B;OU~s&~!focmIncG@C`Tp*su7TDxaZ#{Jdc;zBe=Lc)im9iV$=bpi#rw384GuVSwT5~?%4Si$c%JN(B23Lr3>`7+-~QL|jSg*){+mwtDVqbDDyw-~K#kz_ke +z$#L_|t%>)y2uySg@#7#Y+2OzlNMrR(0J=6w5}EkeEp#71s<8hET*CG)MNt=$nex{M +zI$~lSu7BgEG<1+QFZphmOxm3huw#U#>cROl!h%~|?^$H_P0{rf7;E@t;K&+&0Y9UhB-^G;m +zf7y%uU-klZBcrBa*HON2$>~MH6u1V`TLU0=>2DAjJ$QIN$~Sd7>A3~#&*BYuCpUgF +zA*n7?(&IYSft!!_{T~U;(c*+RdP(FudgT(SDkj{?!yk#c(e4JT@Sq-byZLYxh&j1a +zC8*JlHNr}!lUg*%LsM~^5dVQ-L|O6jNi4+8s|sY%nT#s(O&x{gc7)_>{RC)N;@dO4 +zfHU+-k9euBq(h^eyOx1pih0-~2(XFi@eo@qPPt173CRfkq!}i!#H%sqU3JX0bPNCE +z$hobM7J*F~AC2Z~lHD^xM?L)gI1-CRU>%g-w>A8daqG;DY0E&ZvmvWfRO-Nuy%~8j +z1{BRHs%<9Ml9H~s#I;xSKTW0Rk3}Eo_G9a($7=fS<75%Q>3*8@qKrIY23Jd^W%qkZ_@dG6mFfO#Iw5CMHaKNE&w@jdwUUOn`bU4lOBf-$jZF_UeOP@jUpr` +zgWZasX@YoTv`SC^s*;o(tH9=xsJ!5GKWt`e?ai`o{|9I2rQJCXgYzotc|}uYJLFUM +z;G7OI5@$5%h)nXtnXH_2*r7}YNh4=^CaGh&NcwD&WoE#vX7a=3f-jDYv2NBrPk&jD +zWTWypPPH%3oeEjX+ofkbo2+2$Jevj%y9xKJjD+jteW^cL~s%B05Q+xZc +zauFt&AD&spOqV#L?B}HHe`q$oG2{BvdOtcg)z?IJt9!(0r`u_IH4w)dXT5ux8RbtE +zD%<%0NP+B(iCFHs9euXM=K!VEs!-M``s~?z>x-o6gv6U&VV~V#vz^r6XG27+lp6OKe<{zfdtagJt;S2&(z;6|F1ZuCZ_%U#Dm}NyyA5>FH7Y%2Wj#KHUFF)B2!x)1 +zc8OfW-aYdY4#eJ4y13i`I4a0JMpy2=sn|0C)SgFFEW999eeuV$$4dZU(Z29Th4|iv +zI2nIo+PiOKb>GKleIF$H=eG5C68aHo#BcHav!;DNP1j~FTTZ*YJvAT}I)Ka`KsOCY +zyd04FJ%CYtvi{%`*>g`egg#NoexlU$MCIj^jlZ8@RR=d89Nc)! +z_GU>osH;ok7Hv*hDw1O&Hn@v!sbWL4HQBAg^@GCvaT1I!O+><8*v4_v5jmV*2xz;F +z{W{~_>!-3>0I!Q-{D33kpbhGhZF4K6=Y$X`z2CV4aE#!xa06#*RPQ4W<6T%46MVZU +zWob@t^$VK#>N#U@4L!meW^}}*-(VZ|$pX+3-C_(yAumRhuOo5@iW_lvsn7jo*R>sd +z1i>7EjP#Z-O&ghr;L4jY)?k?s@4X$*_`|zeR5I<2S_P3rkm-jigmetT7Dzb@WW9Z3 +zT3QgV=>{Q})x0%gR*r>yFNzcNZSzEWb)ZHx5)aNe?M}8ZXfByBLk%ZAq(XJ+MwS9K +z`~dw|)s_jkoG;7?D!b5nVIC`rR5z|u$~vCwCd5+gj= +z`IU*8A9JN}`-`Y^HgCI*&|y*v0}fj$xyZ{J9VvJ6*Vpj5_hiA*M+v+n<1Mdt3OY;W +z0D3e_xZE8*!PrbBs65=;ykT6#m{+T~UPD3%%0Le^t-f80CcM4}03ig=?mCB(B}KtW +z5i{OykRoi}tcHt+-B)wmYP)H@jix#74OFIOxn*N5vIp2wt&4!2){7uA@yjGHY3?KW +z=xx#)r-YdrD(wb_zL2x)XQSF0KeUX~8MJjfKx%^KkBxW)k_3W&-|-wDj9q24o%>F_ +zR0|W6Wt4$zPo^*CzimDt0^#hzVqJLzDLyBH>hi^RnT$dmlwcv|1b7j;}m +zn35Tih4^60M&JpMWbUI4hp^qQ&7ut`k;xm|*!}CY%fPc3hV@HfG?ploiXAZ9nG1(X +z0D#3`gMp;X)H3h1iXYYh*_m+ID(qb4X$AQ@T@F);BobqvRDeCN7px~>OEd9|EdW)) +zV<4anJCKqJc2Q@zkHvObFo>1i3>X~@bz(y`_7WJ4+uwoWi4bX(brEMjf+Z69Mp9)$ +zWV^w<+Vqr|ltMGoSV)8-R?7ajRxbY;fzh+jAtDr~1W;X^sB!A(z2M=v^OtKEn&uMj +z(O&kvS{|!u`u_L}?Mu&vd4Rxt0SJsD2_^b=z{BA8mz96^! +zItWn?A|Un(@}o}vlO#G4fR%8?MuAWFzS+>Q_()r03?m=iSFhA-kt8EPDe!gF#w-c(U9Wf+G(?J*JMx1(K +z@N_S?P2AAkw=+8tB|EVsDnR7S$^&j_0gKIMw*7FG5r +zM)o9z-Xh;O7kywH#lV|3s~`Ic93f38^w)x}V|`;ooJB>*NEw}Deo@CKM4MKR*R{dz +z{1TWTagF139nyUe%>D6+We`I~Y`{0;_=;?od-LP{cNsaq8bw6|!hwB#U-M6@C~dwF +z+zM!WxxZRp$8QA7zf%R3Z(qYM&02@{po|p8rw_~54=HYx&{t6X1g@z)^t^Abur*Yq +zMvizqy^EgmTJ_5xg7DSwAHUMo><=^td|U2UGDCd}D#n5GM0pb#5VU;TX(9F=92N@} +zWPMqa*1Xz9GE2!5gIV0Mvmr7z8iFwy7+AkvIH^iK?A!es!w;6y3pb?0KAoCudV2gK +zOnW74diUW2@1=7{^^=AQ>s~iDt8JpQJeNV9jpkA;LI-z=I)H?2;fd|-w)gf#*) +z!&)OSLg{@V1{Tsx89TW;KYVyuv*KaMH2M4-Po8$)fhHpRW9gvoVM?z&6_Xx*$jvud +z9rRp1v`ZKs(-GPuPl1p4<)+g&=|R>Yj404#ZFC2=N9Rej_{cbckLvruvmxX +z1HtZ0cXi)u7Ua`NvJ9Y%T@`>ovLxg{Ar@2NRvnSMYiUqW6v>_4>ISppP;%8-J(J8p +zuJ1R!$etpW7b~Pk9kGrb&8mqhV*4Jzvkr49uIg-v9vFxcCCL2^Kyy&bqF6K^oSshE +zDT-O1q*hIlcALjub@b=4;2SAq9r*-lz385fhyYeR%Z))Bj&HOeWiN#CWV_;M-ul=h79wQ4|YQG@Zplv(mzI!2M7_ssnUf-)P_1*B%i6Scik@!1MLIl6*+NI|!5jsQHijZZBQ`~B +z;d($vG;2~?Cx+HA1)NWX=}t!;%b*N&+`u@7W^kjkEGT>QS^64*`b8;wyJ*LYNd}V% +z=uYvl0Vwag3=k;V$%FktB*Xy2n9&r2T7MtKRQKXoG&{x1A?7|g#%(HXGXi3z8Cc^O +zdnqPul%K%W49v8oUYU|Cw*@-`h-Lu2g~!e0CmKv~{gFul>KQu}t`||+MWi&7;UM27 +zau+}$rqXU|#1e}W3X#EAEJNC;{ObHNCI`)ChH8QkjycOLQZhL^ +zO(XWfbWXB7kHrU@T9TobA@LbEH!k_Un4*14&uJo~Q|khhVna=f*AUGsRF3%BOfh_R +z>0*u#DlSI;2CF8|L@%{d^A>J84NgrsfXZ&H3p!MiFsqq*U@R9F3?={qoPld&$XLnX +zyd+eCo_fI;Kbkh2_V7%obB4tEx}ZS)s61|(VMgY}BrMb-BQ?x9o^wHnN3o%JJ*g}8X8ZZYt8sk`li&L$ZI_yrdR)(2E5 +z6b5&YUJbwns5J@Dd-W`)L7{&=?Uq46?pVnEsgfL}xE%eGZ2m1tEzqZo{Fox?dztaI +z_33K$q$wOXYFb9C3%-Sz<27CK14X{VE{!WL%^2mk%9rZNvvGWKZVatIBkz|b1z$p6 +zJ>y5^fhi^QBQv?ZF?Ym)`343`{4TRJg6uB|pFZoSguBC9ybJ7s6c?!h=D9Z&8L(1% +zRW~`WqqKhH*4QF{>u}z(dE%TEN9+X&P%v$vUpL$ne`4;MvSs-Z`P+H5h2pWGn3}s9 +zJ@Gfr`p(xS?)l|Ex!P6m!=W%nz8G9u)LB$N)uky+_pIrYpoS{bSz{%mY*}4E1XO_H4D61<<~LsMiWtev51qu +z{I7@?*Ue<@j|({DUigPqo~I9Y=&6d3zw4e+pjlt6H(VU%e&=<4bVyucp?fVFf&3Y3 +zdE_s*-eOxbMXA}USIQd6N>Hq$kSk9hs+RSN11xDT-SW4kr?W%Y1DHFA17$PQwbX91 +ze$ma_j&~&hXk&Z7+*0;aQOU1Za(6~KHaOjDp{mEN!M22dNIC4SR+hk;If>5xUE5!nuCEVwDoMAWqK9Vs~$@(5RZ9?EvntAUVjWI +zv;{YP$Y?UEZ5kVCQY6QZcsxAemWcIy&`IVrpu(0#lC3wxW)~h@G)I4SC5;Z3I^E0@ +zv+>Kw6gFkTKnG}X^#L%I+`|@iDwN#Uj>*gyA&?z`TWD-0$EX!$@?Zmrj)RucgO#0S +z7l1UhsBFue)H$5+us7*R7y02B#i%3FE-P^%;}KsuY_mS=Se$RktiO9~wLw|+wlZIJ +z?JTnq_$wTgR0elOR9M)#TY007W5h{iK2Nv%+&Jx1p$ndB@5tN^ePrfCxA90{P4k(F +z_83j;Opb=UZ11FL`=p9>B}e-V&0mc!L$k9w)3-ybPIvW1cO`A_(gGYUWuUk7T`z)s +zN_x9Ke&~Kb51B~w_}7dEdkx03IkCSAeKYQI^>p7pGxR6J +z$EQB@uB~Y1+!(s6p8OaNBpDxJX8Bn}) +zUis(+)r1Sms)Li;p2!~^lmoun0V^8OMeO{!e-W5F|JMji(=hGjaM^uc6q?HA+1cov`mvmKxA51ENuiM`!=)7HQQXJ;yx7>T*Nn`l;poQG#|h9;5vHu!nWxUyR|(#{XdE+Y~>iE +zHg{_O7}yghYK9W|d-j$iyqnwmTQNI-4w=0WvE?4c+O@0EyBQl-AA;7;Xu_iHUF-Gpe$b^_ewz?`HgZeBR=BX2UfACn>{6r^hW_!p96o|jFCAgaXc{A6bQX;I8Hv6| +zB+$F9cs$Bc%25_MF51ejy?WWm@q&BH_EjvYX7w@PWks?CBMh)=K?2=^U<~oY=v{nt +zal_#O;=GkDO2VB9ELECOi#XQiuzq(vBttbGE+ygoM)LNr^FxLt=0W5|a7RS*&R}1t +zv<~q~CsU~M+>X8-*{sq9>DZu>%RH(>cbRpkZ&kPKvO4Q`eO7~k;LL&aV9ip~oOHuC +zbTF>ASys2US^9KW_v!j?Hx7$Bq=Eb~!r5hmArXdB*p3N;VG#;^BN@-~1Q(_6+)kSh +zy263TH8#Q>P~45GD02;+$QEiKX#+!lw67Rz3!)FuTs%} +zrz1wpl7CG{41jOyWpB{=IJ)zs)o}e-)mpofQ(Ff;n<_qyKhv=x0PM?njJnM%7dsP*GwvmbH@LeRQ`w%U>XFIf;CpM#WDWb(D($} +z0eNQ)fMFo-Q97*8e?4XlkK^498g4i*TVMo#pKy(O1)=AhKlsDE0@5F~65{p8}?PK#p|*VE(QoxD!~ +z*(O$`7TLTv>}>9|>GMkinmvZLOFhJ6Et-O|_j@-j$wEmGq_EdjgGpb7U(&72N{Elz +zU$fM+3<5(|j9m4vEy^@4ETXSS5@QX1S(UnS&81CY$V{t8(wWe0Oq4Y5c_patidxtaqG~$n6ju5_@%4 +zgMagN5zbgv;rz5(kJ7}kw4j_jhCWC96~`f%eJ#Ji34o4woV2=8KU=fU{owaAU1q>Q +zs9*ppE_|UOi#L*9!9lg;>)(d8>(-hk`ljTsOgukdcmHt04`LGa&Eot|&V|IU>TU-V +zp0zLLv7~bFqKa=!_^NZOVeb0z`=b8r&*S3CUndi*zC``L=ApOc +zn^rpW8}(gvXjp!PXQAGwPlaxj= +zi2tw$7VFbm8msfm#cJqhgdq +zhsZsPSNaP5o@}R|L3ro_bV3w#NKLwAl4&OvX;~CuK1H(ZbQ@H^3gt%qob(!!r(Dpu +zdSZ%FZXRViNgj-j!nncAGy;z*ur5_c_O|;ruj)dE)sF&Zm;)@S6YbH*(p{bKS8R&d +zU<=l-Xi^u6CCg=Nb+8{4N>5C(u<5Y-la$kxNbR|Z#7SvpN8~ZCH!x?ZPD7O9y(l2b +zgQPX7iX0a7O62-Jo%&n->I}N*i+nqXOu+WPPb7{pmz~DZ +zT43=OV@D_v=%uS-MI6OdZrFVa1)yZ$=>UZ;cMbQe8bmRBknA1-dN1+1g1YZb{1qzO +z*GS*}lSR-=cQ5Mc2zkqtQ{3?C?Lk^@u;s-Yj-0rcX30E02ioOrqUb%q5d#1P!53VM +zL-lv29M8BGWyW+uk;Tdy6wkf0n65<^?*z&I +z;G!v_n2!$0ho`BL9$r_Ay|$@GCTMUW`WaeoAz70#b|4=+Dn&bn!=DbojdIW#t}S$N +z>=F=O0)Lr`qFR!xEQ4}KJ)$$bFX%Du7pIjdvPEX>FXT8j|N1e#oADp6zfljNp?ovb +zQGmnd+bTZhAV`v88z?HwF2iTpe&AFglOGi3VAK)49$r0D*>g)bNJ!?PV$MmbTYiSO +zLvi@}Gp_H&Ai!d%6)Ef93EzUfIeq2^1nsxz$i6gsef`+gb7o=iZgSsbcve@=@`qc_ +zG2tH-eN^kOxYW6>&6Mcohs&7>b3Dt1TM64!#;0ynpU$} +zIjDKH$2O}gDC@=s-pgq21W3*WuNhyzIsQqdVg+vDPQ4-wLNa88t%&yM67ZSN`S02@X(ep4fs9V}?dx

{g$NzRi(tQ37t2q +zeuXZ|dovew&Hz#qOjfcjyb*NkVCJprpn_n00DdN|ywkgKDsyv5*=BZjoH{6f##ebJ +z0Z+}9V}-0}f|fzZ2!oulB7a@Hn=Uo-B)0HWJxQb#oP+lL7VOrl6p%StnmKmO77>fK +z4(dg_-cSlaSo4nX?`7AgU>1`0@p$UkQWLdn0Gwct-5K7%xHFpCxmr(|3(lr#>%h)O6?`JM=p4FGoT?7s1;%r^+i(lM5S6^4^@@s!X(~Ekca^vt% +zh|2SZ*S8_!(TyWjNX+TRsldigWtY;@i$8ug)QUG$^fbO+HHGzyH@)}?$-sh+&ms1k +zfpTGTZZ}S!5{Uz)pM1gD#zzkK0>$d~JGztL^C1sQ>0-+b4}%_^S?zHWV$Z_79S9`h +zf;Af2DD;2!5H#)p(QXd7B9v=%AYhsm49`kAJM|Jl8?~Ed8=DhOwTRQC9zFw~!Z|-W +zCAU551W@@of8(U{qm$Nwt$H_F4?b!=__5U#-lqKsAG&%mFnD;M(g5c +zb!9eoWsP^`taJhSB49dG;M`qE>*i;5mo#>ljd$N&=?11V6^1?coqMWjJ+)aq^^HA^ +z<2?^ndW0&yErz{q&b{rl-p;Jv?#5oA0=;H}QF%OQ*!}Mr$-ieL|JjVhMQ;r_U_esu +zRXAQY;<&u_ot1R3nWVxyjwuWz(KCoK#J@2E>#oc(B@0PHGs#o9T!h4T3#aw)O)#}) +zNz9sEb~aT6hG~wA=rH@<^>;T6lWLdUNoOd}66Sn6@yz%mP=8>DqGs!U +z9i*Tzhe2%rYzz8s;uR@8j?y>Ju0&dt?fW8aJ4G^dAt?^Kg8M!dazJA1@IHMT40LeIvue_n#|+i&C%%mEIG32Sf(5BA +z+_n`Fdt=;bN +ze*+D@+5Iq4Szr4qurG0Z{bzO?NdWKFnG+fMksk50S)06GCWD62+cQK(`>HjU%Nv +z*ao5`jo}4k^PRHg +z_o?L{DhzVj0_1ev$hR3K$HhfP)J0f`-9RJ)zGV>K<(-?njt)Vh;w$yy^x +zpW;3H%$*EYKPSEx;4qIu+<+dpa{C#}>z=)*D +z#q<$~>9w;MnOc)|r!mGDAt`2?Y%azhIfXD)hxn|ID3-`z;WaL@urLOCy<>!ABd&t( +zDRa4XY6hxu5wV{BQ|&E?wQ)!gb>@gMk+^T@Nev>Wp +z%;U4r?PU|Zs{fZPKYEQ5=nz1b@(0HIFK7a1o=45E8 +zzqA&D`C;P@l>qFccfZTg?c5LE)PE8HbcZaSxZ`^i2fZG&WbgR>0k|>h=N|hmXVyHe +z2S{?SAN!$7vSUd79iq_B{cDI4KF0(d0wd)#M8j)h6@cfsDE}JiJXT$9hm*Xpy8Jh$ +zye7NV0Hm}zpz;_j)FZ@cECt)5!_NNX=cNqaJvCo5e1~qyCysyErH8f^AmRL*FGZ(? +zg10-r=-`!K4@%98cRv2&buDB)Xd$S!>KuugPe-C0@aycKkXR3euTTzP?0*kB%VwNN +z0*#*)2V%-#gT^OtaP7)_5@5!4J@!5G#mWaAK$a5m^;^!bmC4<=S~M>we$RXB`~eB7 +z9}KxGjll{x9nq^Fayt~Tfli!uL;$0Ubw7)hL#Hf-^&{VTjJqLCQ$+*xkw>o}mA8I> +z09Gs5=XV1z0^-!Q)%wE%Qegcr=ULnSU&BSBg&d&he2dG_ss()mv5)@sv7B!9DpPO% +z6j2n6rJP^Pd7=6}BVGUXf!E)QnCIt;u!hqYK1YJH4t*&?{+hXPmvQ;>pDCrz){p02 +z&A(_p_c>JkATo~_IehE4tQp&Q?tnDqZAj?UPRFoGrW)&HJP}p^w1(WR|F(vXgCbY; +zew6>{`St1pVD*&=TBS}N51s0M-Gp@x|C4zu58fb8m#IA;Uea-B6@u~qZ60_aJ-kVe +zzE>SNI7x10k$;oX1DNoF;Q*(}uoDPIgHD0_b!eB!b7~fhQgaPaM%&{L^^7~PpyiersZHDbwbAm8Jn~1 +z^*}&k_E=H0se539yu>&?#=eMMpvNM)vl>_|?J3yR+MrN2y+{z#!U8=~k2^UR>%wA7 +z^JC+tV)pBSflHxRM4_%_~0z};97p1V>;p)g5_BgGGq~JOHL@|CrsC{ +zDm&s$YsrI-<0CAmeKg;qUnY`!(WbYJ>S~ +zdRdq_YoCB^cvF(4*lJt0?*r$K-WONr#88A4SGirYB3g#}<%NOc#?&Yf?=0 +zt^mibLGdPlOM*8tWq%z1`aPhf6W!8}*ri9lWNpVo5WVBIv0>ks6~cWXoZEz`0( +zV%(PKe+Y5C9cdLBsiiD#f?|4Pl?DD3=v5lMkOG*ak(SOmc=Qr +zMp9lP(~>MR98n==q~u<=xB^GoeNEQNaN=S25GpdU#UTR-{89#x9AJDx7r!-^GC7r? +ztRGr!nO2Kp)vwmEy9UyNJHup0x$mYTDwgOSmYlB9I8Z0|H=h+fO`X;cRK@?TI|EF| +zHs+9dswpxvOfM~8Kf`r8A)U&4xD=E~y|sHdY?o&4u3;L9AMD5G)gUBeN2xzh$$gf= +zOVpr$rJVt=Gv{}7GUK;9FF-x>=jhEAhde%ly%y3t{4xcRIyH;rzVZDI-2HP +zd*kbw*6*%NIiB=65QO2$neZr#>!z{lqqEKw +zMa~r4(G$-tMRH~moM*@+VV1N-W;P;QZnXF#s^o!s_7F-obQ+Pt;pk)_j(BDsWoLv9 +z2YfmcsZ`IqTAUx@UYbyRqq68Wzos-`@;1ivR_Sm68Nr%eihmUxLLtIRP_ieB;kO1# +zi7~|z8f7z@SsC@&Bz8eq$GzX3clzDSUN7eqOv0?jlHSz^eF&xzgY$!z!-_=rB#?sN +zde_+)sBlO|;|#b_lrUw$kR6NMXAx{R6Vinh1hDc2!*@%`cg!=xhgPRbYo>Wu++i|K +zl%oqJ=Q1i*m5Sw-kaD0KM!reebcCxxUIw}nv&^5cx_>JsM=pfXY+0~FIe5XXQbnt% +zbeT@cEGGQAQ$9w0lL?HM#E;{|OD!XSz`Km_zTp|ZU2xSNO0@*H8nY~LL{u4<)P&Vk +z<(d~eE>!Uy(qpu8f^HTiYSnrnAhZygB?t)^nto2nTnE?|u575A-lmwa%aOA&rjQaB +zB+{Z?)k;}%D;6NiqRa(Z!I55>#Yj@Ud3x|i^ZIir1fYy+bNnl^!a*)&=HF@vdLAoC +z$hf02onSLlQhOkZ6BoGKs%nox3JKW&jiXht%Pm(eDj(krZwv|Bs3igDwVzqaZX1Da +zi3#4@LsPn$2x8aX^GYw(W8a)EPMHln$ZvX_5l;U_GtmnFGM2QfA?PxzQmz=hk0xR{{w;8rsk(=t39x=~Uv{E10&9+53Jg|;^ +zXl>atuU9R>&oJ`H^^st0EDOMD2M)#{wyg5dX9p;D#j;gFUTIkPV21Z@9BN^M@4T)r +z%)z(A?AoWouKwp&e|_lcZ1>jUbgff|9ublX4%TFVDN~NpvICyA|}jt3P)2 +zn!&hbJvR-n-D~ug0rj>tcFmpcePnhmh0~K~)8jGF1D9_Diq2Sb&@@JFtq$}T^`y>U +z>F0RnM!UKT-EQl;-g51m)OMd~>~j%=eWvvlVEVt=^f7|^rsDgT#@&9ZcnU@ROE� +zy;sN>w^a#Ocg#R;TA%puD+Pjnge341+7%=AV9v8#*X!g25mOEICc0>xzKo5&yv=oR +zNA#d&;o!dZL8Ty}=Ds|6eKX|MZaO|9D1H7&^w!9xG`YD|}}3bjjO>MI?1WEqW)6P_Ns+Go_k{9OxEB +z=NBhHKqazSup2=NuKo^b>>>vd&AU@ +zfIfhn*UQ*b%KMs8eWYvsz8sVNi2A?;%+_GUkfess`^oute_{XnK5FiT3WYc=zeK4q)X)0&o&39c< +zv$Ae2*bEaCaI)6eCQH;c62I?{4|JFI#Y$C!3&%J@DGDKQGTTICWC-_eWgaM$`O&;F +zYW(r?XLQ3KsGOm;?XSb%?A5y48`XiUp%tCxwz%n0*aX<0F8Yk+&?g`5M5Oc6#T51814npbCT!(3=RfZ=`PzCz(AeydP;Lb +zNel)oTY9<$=GpIs7t$qVYaW&C7p*tu5ypo&bc$hlJW9D;Wg{qhHy?+*K~hsWuwkDa +z?BYh>Um!WZnvV24t?(+g$%HnRf!I)SX_kLvy|_9|75^>y`=HBiLxq~1+7s8g+7qkB +zzI7^WNGjKv1*_(@w*bdY*RBzE|Q%PP5o)}5ZsVC8^P +z@Mccow-16yr_{b&C_oT?2%?m>^44F%JDD{j7_zpxlGvk2$>1n(?1tv^XKc=+#4#{Z +zTY#EkCq&w21|+Y;gNelvA%3{NTf5p|I&c$HQxU@veKifR79Yr-;rPMm2qQcL+CD(n +zcif*SiNCZtv;7%M1tb=PgeZ&?nR+_f?ybBMD2~poK31S|VZ_)`bpXM6Z-33ypMc>` +zJH)DBA->sO>lOu4(vHu$dH{)hGk353bfoH;P9w)aTM%Fu0jHB-(#VN3|Bb!(3Tt}r +z`!$mgNTCHp2t^I3G#f4pp=szvRB)+)=n@-E6ht5iNgxdpLJK|gpeUh;p$MTVC{h$K +zh|&ZXSRjA}D*;sIkL!8f*?X>Q`plkv&3+Ga#z79k{ri0HJ5fBb{>nt#ZR!P>%}Uqx +zJ36H-v%TWPwIJ#((SIYz(*u-;c))_}_qCBR<~0C$Ok|Q;sY6Q`IUMW;h~9ARlFa068(braQEd;vB5g8b>?^vEdQ!X4%a!at31+fzc)0#R48N4 +zrLAkt;5boGjQ@ZXdPeH-VO5KS4dL}JQS9D+l1qwt%YV}h81jUncfA` +z9mx6q$1Y^=40fZ|6MU<^thw87$;&~?Ka!UpZORZS6myhM{cE{#2mm+G*n{s@A;Zfd={766_C1cfb8wr~;(BYf;S;cMF&b&9~++ +z9%hV<9{(>OPpe}F0C}GM*N|ri%v$Zx6=d}Ak16bvhQ3ROz8F$LQ#j?N$E(U*1^$%9 +z;ibNS0%(zim4qk;JXARYdW2M08Llq=F?Z~?nR?Wr?|cRb +z>@aUWnzc{1uAU;XS7we)7kv41_sjG4_C}r +zdTIBTCt0U|bRCXTJ@kZJZn1xH8#a10ae~=LikkZDVg7+~mt_6*?oUfRavPUS#K7Lc +zsGV74$V4P)m~_^ZWaps(obUihXVCHANXL%|4+&EoCcQBwO=-|e_+Vrg6>b=TnV?i? +zMC)Tgbf0Lv!PV@cD61iQ1q?z5(cy+vsA0H1FRHDCj+Ii|Iw1}d%Kt(-2CGQt#Wel= +z5M!|Pd(Aczr3pZ72qA +z2t#6H;r_9gdBioI|B6ck&5=Fq7`%WQxx!|U;n9K8$RI-OrL0&lQP?kWtOZ;4ieqev +z39H6~l`RS*U`ffMSRcc<8^icN(B!?6@YO3LA=uU${14B$HMw9qndApu +z<0*6c>zZ#unYCitc@HWq?kYkMwJsoB2A7O9ixXrqzWJwK!y`RG@$mTsES6s3K|c>q +zt^h8nR@lr%K`OH(;%{k~B|HslkOEL0chQ>9RJK-WxVuKGrAFLYN&1QbytgZ@UW2oL +zCJZCwUL+uYn358t3~ww+CxC7UPu=MmeOy93=^W=UpYao&>1+zAC3pk+U2V3~r7TW5 +zHlao%`jFREI5)!vPdpw#_S7UxVVn@>$j~^(IkPk}G1aeugmsRiiqdE^8A2(~;=cwv +z=M9q0vJ;zFaSKko%L}w4MpU6@So|X6+HkUiY3_5U$TXwKsP4!PliZggQh_MD!8vlk +zDSUOeSOyK!DUR9bQ{xZ!?S>#Txz&YIrg8eiDKAc-nl0)gvi7w55 +zRUg+YO%``Sp8B(n`9TRL|Ast#_6fiIYo@8f;Y@>>j5~;{(};rZ>4dHZzDa!^R!~6g +zEHE&FA-W^Sy0~2;(hjrSKT7kYasLJ6;fF`D8QCMr^T~4)D@4+^CFHtHg~5>ScT6IuF;+bik2l9Jbr=3xTyvvTS%3%&5Ha@; +z=kGDh)$bDcFXdW!WnalIMa-7{Z2%HRlCUtHrH7rK*GiO?$990rR +zFin}ZH)}M!z|Uqv6}$_9a@<9iSh+@q@{7t=e_=aY=NQ5({>w=a=2|H>v^z +zH5Mzhq{CDtQ~*7uG36a6vfJb)bts&sS>3o;bIqU@?@??Om%ll?xGWOCEuLD}n6k5+ +za|TPYTaHvUl2tQO6M2HuOz!N|WY!p!?{LbyH%pY6soK><*IwfGN0J6cqv;&pu>%pW +z8uR@RM0&XpmYtKP;A$*3yiAx)YzilBi5d+oh00t4do56$R=TD +zcG2V953@-Zhegx{k-#)X97oeKOfg@oGjU11jBKWuRtN}n7ia2I8>@ecs!pv06hh)^ +ztrl;2ch=V9ix(vg?}wO&u_9aADzRBZg;iZ7XL{#%hupgjQ@pgql{TindK`H$Ak!K&&*|WCB~(H^dzX))L5qG>=r9-I)N8wA?pMmgXf3T$#tqV8`L=N-*ba2qLq +zENOW~tbaj7T9u3REl7_J#3^RqKc8E9tRlQkC<+*=FIcD_H%~Pp=TTqe>GsGb`qen@ +zym-eKaYUTAkST`*1*4`fE`5hQI(|id`ZBOOY0;OtpBX}R4cf7Sxx%yzfw8*c5+JYl +zb|t>+%|?rzWQE^6+a&b)l%ltglP)zNe2m(E6?deUBn>$n~T3`}G|9_sE@BA3uXWbxu3+ +z92yX|9PD@QkURMOy!&^b|D~;k|KIbi(0habgf{;PZ34HD`5o#u0u`3U_4S(%Yopy( +zCS#S%MJfonD>~v?w|7DPOd~P@6NI>co#;}Zc!x|!m^}++z!Y(M+cwP+=35c;XNl>qm1%tjPf-6|7F$+N3czn}_NH7eC4bp|7z!w;R!| +z!Ry&>pL@9Jt&WYZpw?=a@v-}DI!;{IT`h_|?jW=^wfc+m=Xq|cHbmK~rfu^k@&O&i +z9ovprx$ge-K<5Z+-Ms1^xNg#*?m9YH?Q^VESj+lhoU9y}|2}1Jy#xdZUS6!claq@m +zK3DGmf?h$Wx_?>s-DIxfPuv>IRuidx&)}>3=A12uz9}PoFtLcVKh+#j_V4l=rmo_I +zcBnw1mbo2vtlADyGNcsOVy{C3Z@z7DwC|oG&Zp`9=HmJKwe*DoF2d%WdxyD=@)Nww +z+)>s0>g%>dyJ_#SG^zgjAbtWojjpJ$vbJqKU?H8zqC#O!g*(JZv`EEd`JJiT{lDMl +zMT3-s2r>`nVRz5;=*k`y(k*L;Vo`)0kY&azNZT|p|$Kg~)we;VOxT~E>9Bohdb +ztV~Tebmjy&Nl3R|dn9?s*ej^sRb6H6R+w?n8J#t!oS+?EadJOY$=}mkx9vq8iS2rT +zT*)!m$};Hieph}&u?Q-scP3fYKRwtP7#W2uaYNtm<;-!AU9`p|wRgJ7b`-empbs1B +zQ&aRsb(8Kc)N{5S6?X4F`gr%9q|H9K)Kf8CkasAkY4JqHxoy@eAsNRxCihWCK|_bT +zkKI9e`gn)uJ7J%>F*Xm^od0HH10jSwZavfvJu9+@L`{G-uGK*2)AG)3gD~uC8niRab+a4vB5${>n?72D(Nq8_ +zV3!x4T)GP}`RppO8CX=nfoiXflVyF;OZ|92BBS0S*;4UJliT-Iv%q@B<%!uq^2=Sj +z%oQbDK;UOwpu4p7U?5gr>v@qJq};n#a*PWZjLF=c_i50z9uqZ +zgzNGUc%zuLFAU|=wxML6^)##R>rIPOGbLDAdG?`iW%A<)n$yxi{oQYXL}u2=0EWgCa`XECUDP*>$bW&Om*<3NH5 +zewt81TKDPrVaRLkCt?1@;~(ul(oG4!5ZihzL8V83H?4hkYk#(Zfg#*OiB8Y0%2dd8LENcVcS5XQ=uhPkt3`S6G`Qzk?jIWWX36>#bd +z*w@j~%^u`HHtUxUhwB&SVzR37-MU##jS5I(;8kK$wE?w$m^8*>0aMf#7U!r$nPLhY +zwzEoviQ$HDyp&87P@Xu&^b<$}QgY^W^uwXVHxlIvETtTeB$%vvW4tB5CpxWB^;xh` +zH(In4L(?<!Dwlw)LIXljM?b!)${mqyno5X2hi +zq-R9xR;L&&l4#zT5>=YCt}Lz%5qr!m9gZZryCmgG>As6$mcNmXdKtrRHr^9g=xogT +zlS6>H;Ha(v42y6}Yp7;Njxqj1aF>ypxfaK}_; +z8K@c!x#AGnRVu1gp<1eM4kcQ5p5{Y`K+F!4yI*36WiiE<9@< +zUL&n&K2b652mPp-P*Fy0;Slb*aE85DmyB3xy?!s^}Gs>SvyOXF~a>tnv@_<;9|s=%JEY +zSk2yH!p4aVAQ=3Mtu$r?>Ly;l-k9P)ObQH0h2v`0Z{e3{rf%%6Q60e_eq4;xj-EEJ +z%|CIc9+nYPpSo=^b$?32V|bWjPRT7!yqkdXo85zcVS}pJiY_LRtjKs;fSOI2wwL@spDMyt+>YEI9BX|%8F%b +zWm5#QywZG2Q+j9Wo{$24HXR7vKWU=%uV_cyD5pN?tY&)W_0E+q&6WhplRUihWU+Pg +zjm>YcxBKI+A*EF+xeCYQ7>t~_jpExU{G%i=dBBWZ9&npFdauNy)cA4p*762rXRx^qXfIkX4&8`c-~(IDjh=M-osoNdKmf_1WM>wTA}Hk*S4L|N +za4Ln%cXn+hYn;SKlvA~vBkT@vmX`5Bz +zpo#C^BjxPkl*X8~`sWwtNhqwZbvYB% +z1z;Rp0do3^u`pCuF=8*q#LCrt@456H*cSB?9FXO1y|r@Ck&0gYRL|g;Z<2c7 +zD_6f~C;KkP_Ra+PRU%R*e;8ed=N56yqQxk`$n=d_GbMUES +zhumLszGvmmt}1rR{(FWt|ApX1?msKk|HI)MEmaHfa8c|!PWrjhV_+Y*cxaB3&=HkD +zHeRcE#ix}fkJ`NgYSY*8;j_yQU%){3-AosQRP``beLC0BHtzv_O2RV%nR~Rc5)-?$CB6Aizd6xUv*eqMmh$8ag8?J=NwT7 +zc8hVM&m0R^gekQs79ssr+opaH=MI5_U^=&9d%5Qvtq@<-ka9s|6W~oM2ewwiH>**t +zbu{PFA8yM^cc;Vk;w=@r4~(he9_{U)2g~KOYlZfFMs9hut!TpXPxHWMK96)(Ac|j2 +zs{cCMg>-lT?WUcO}`#W8x<=GND>+%nMMc>E! +zRo}6-cDJuwdJ2i~nP@dX`1;oOR{Lk-*uOW;%(5S#w3F)L>v{Ax-2@g>My`dhb#dCM +z$NfuhHvX&ZW25x>#p!{)6CK-3(kuJH$9?#WC*?Kk_sC6jMX7+C7qo~UDsDf3X2`&`}l6x*xkCOWj|>B{@M-F)WT +z7UkW292J#J^6QpUv)sDhem%g}OP``memWfa0G_XJV!{Mzod;V_P;0*9aS!hFDaHab +za|DE;HP(80ttIA}Foo((2xP3lTi2fy-$i{6M$Z|_d`_>ow!zXjYzxy}nG&<~(}Nk> +ztvK+?-I50~kszcsHdDr^mAO&v>^ilruijun5c+)ax~=h!lf?iG+ImLb@CQs`^p28D +z=#}ul-dLkotPu)Z&#cp9ztXhVr^_9^lDHz4k&BeAF>^^ey9aX$?d%hSQJXv$WvTP_ +zvXG_oRWVuL4RHUuLCpHD_-+)v>4AIO-c8CL;12_;j{ +z8a3^O1%*tgN1ANdf~cl@JAn|suA90~y4^qe{waoU0)KBq^X=;6(>q|rq2RUQPEhj9 +zj;rRElnO!~bcbm^jYZu@nNzPX4`+gG-LBnGsnXy3TGYrI^-+#%RX?=xGYx1g$xv!B +z&>yDpI>K`T)vCgc)!)s$N(Yc5j50etMC^v1w&KlBZJRG&*>dX$bDg!1?bf$W4I6~I +zaQj^s?b~VYi64NYtZ*i3FJ>s-t`&7z`h4&vfO?8UGbneu>cZwt;xs>oH@F5lR8wg# +z&8~l~N827uG#W`z`z*Kz(SNwfUA8Rz_{Q%Cm`Zy;#hQw6usv?J=7%1utXYv1ArE5P +z5IHF5&ICr?`ZJvekomf!T@$P)G{?@mlWzq08xIRZ3#s8~p(u_`vq+w0xZlMu#=KkNx_T +zQI+?0{eI$O>zntLBsjT<+CPmqby1biy2+kqf>$)k*)@67u72jH|D)f6oBOz-+xWzzkK3rg)$}9uL7d6R)Sl5c7mb6#TZXva0=!= +z2qQz?wS^!iUyGaUt +z2ZLq1fF+Jkebd7GOKPqRc$#3LVf8d?etBZd*HmR{TS(YN@4w-}3!>8@A7f8jjL#)W +z?*dC4qW9P+_%yVAkJ4oV>BsA5EBc}~=k1>mAhO&pRO^~wV!)q6jdcb5IhnmNDhVKE +zIEV&{J@oaOc8dgqYXu#eOulLHbH*(>a^Ub4j=IjzSx*4I@V0-e#Q#*&0uHMZ3!s&x +zpYIN>=9YGTuVVNhk2*Gu0mfWt?WuQ|R-3W6Q4wV|I0@dg5y&kualHJGrw`jqJglTW +z*tl=DgloHAJ^GtE_SdJw`yYa!VCi;(%v7P+rdNi#^9GL!+JlLCk`cl&q~bQD8<@kP +zRbf5R!9R;0=q~e;$txy+32S1@{`bFpEC#_l8OMMWHr$qj*eaGWas?^(-@g0JFIf8J +zFlBjsnliCK2sDivhv9#jhKEm+(_xSh?g0y@*f +z_+O~7L81L2puLIir58Yc|sst0jA?7 +zbA-=0T0$G|q| +z0rq75!n5>;DasONG@7skV*IPfB)%&3h`BAT;{S7{@o`76~Caw0dTRr}VK*zZ|Aa5TaZ=>4T0*N2U0CL$ZK! +zG?g0*h-iY?Fm-hDplPynI0g)lt>-b{c+h5f#AicEo>}-mMF~rOl#hgX(xRnaT8fkizuoN&)EWj|IHO$^ua +zO35BdX>cU`ge7CT(kK!xY&dl?i|I%V+gh5wGM&8M3Q-p +zcbKP1igrLwd1-uW*KeSMOIuAe@u43<^o3#8jAO1tXR^*rp8Z0KRFIbLn6;mmI1Wqb +zo@YFek{&uS+gA+oZ5`v|o!Qp{D8r>}?XE-uDzgB%!J0>wnGlZ=b8{UFAl&r*l8lOO +z#c@Q&a(zL|Z~@&27G}tM-4HDf$ZPhRYMT4R?=*Js^TM_Yiazm#cS>}gnyl2xgpc$Gmjdk +zH%wm>VvDK_t}{K8t4s;|UDzg#gdj7@(S}rm?!2YpbWdTfCFh!lIpwx9G03cV_4tY^ +zPZg%JMpzE>{Oj2{I4$atM>lpi68y?2URnh2KvK8(#z8Y#hj~ViW7N47W1d5He3gd6 +zVPbqhBcW;}%=Sd_=Q#O7ak>Sk;FmM;xOr)UrT}j$nACt%vzWFW +zH|u;;GLow(xW<&0;qj&FoFc6gd21)=iqcXcx~yub3@RxHW+bMMvNML$dyKF3nMY)q +zQ>t@FS6$e3qv>>NITL;(zO#Iw^O`W5@^J+I$&0%R^iVPSWjV(Uc@h`mV`ui_4DWjm +zf4}JV;Lt6hVb(!Ti6@dyDx(}-A}!8_5n7QC5Nf`qvODqYFPwrmT7(lCxBe{6qmSeb +zF66#+W{E>`FS4%PA>_^=D&;IFn%V?}c9eWBDH9gOUD1@md|R{E3!EQ`KV(k1IU2F^ +z05x_gjqZhy47~k4fU-9-7gK({mP32wLVT`8v$lv3xYU+MvT-xv8!?&Zy|{qpN&+gq +zSS)hcM;@$U?aU?ojwSNLkdbx1-RbooP# +zdX}N^kRKG7EbcKmX#KARpX^zWJL@I4?F41rUG>KPCGteRw?+I_3yQoG?bU3m!wBUN +zDxtEwAx(6vs#B!2e8&;T-3R6X^e6H_SgJ8EgfGf0~fpE^AfXY>UN6yI!Yap&POqgFfS)Ww{J +z(#9k$>hzX`c5Q-UIptLszjjHqM_g%phVk)4m92&XL7SG!y7uKUZxbhrW@sL!;ccD#sK61H9)YW>b@KD|(TZ%E*83PHukI_`|N`W?--b-YjUuC0Q# +zczD&jqbuo}_lL{)w+u@R$4Ivvqv*IYqH!{8scBDSJ2$IwOZWX#enkKY`ooFga3vBb +zXBPr0UXbFvw!((SHoZp)zN6JttvEZ&q$ABqkC#dQ%Z$MI@Sw-7eew9MZq)~tio~YW +zeSV;ioev$0u0D>1Ypg6?g^y$ojHdhR(5_mxxCV(_wL5_euw%`Q&*$#41Pv6@t&84S +z>Ha)=X@`t)<;RAulO~a8%GJ67d9XO>V|OS*BN)sIHS!DD==XRHKcu1JF_L>}K>YZw +z>t(1x&lmmBj-Jq8Hz92mj{z~y>#4^Rbs-7{y>@(LZ_wj-6U0bDC=hzm^XvT?`}lrP +zFp?WQUUB7@T*ypNu-*LQiK(lmn{*(Z6`J?qbyO +zi*g73{JCOeuOI)oxLyv*K=nsCh!wZ_!4vz*9b)Zue#-CrNqhTsDY7_F|0YVHn&QP6 +zUyw%2Q#sI6-Jd@djPw19edNwe$Uit9)akI+@a9=Sy5k)IJvDW~r|#UIh-bTwpL4!- +z&Xschu;O#~pJz|+eQtOBc^CEh(Ob_?CZ0Q?c>eUy^Jmtb4>Iw+5aFv|_rFp|`fqSp +zHU8r6&lj!h#_n5lq*G`#C0FH~!RW{Mo7TLCW}W{`lzovHu+O +z{0~EfvOxZ}>@8E4v(GJA(4F$=nxmg&WitcAm6dBG^9*@Y_7QvSqQ5pjfz@wXna3gK +zcAGdNGJH_D@}so-jEzd|yI`Bzl{NyAXml$0w%ayw$F{67Hwg3r$~#Erf?JV;RnOiS +z=e0gZA#LWvo)@=o+B}S6qQ{LdB&|g|%`;%n3N8X@haLJD`F?$jGs=hewAK9kD1elL +z^I9KZe=0pxTJx!Kf(C!CPX1tW)zaiKOi|{T@<#MKcib)YtgUp|fw5}`JFPBpZ51!C +zVxi4hTDJ<@klsDYHGRqlhugj!+#+z7fsuP$zdn=X(A!W4p4dIm +zjs1GW4xxC|HM|$K8v~YmcV6+&fn)xvdsNRRU)ohi5`fD_wy*<$+qK|07D +z#hJKu1ZtMP)X{Tg<>1#Uov~`#q<)LmoZ?sW4^k7wX6qP~zT3VdhThm}sOo0iQ_<9) +zu*&UC*P_!8X`}6O&E`Z|{0G~{+h6wAAH;&J5#_J9n?i>-wLSpf^{WLnXZ(+3ViwLzavZlcBm$IoC5C)=9U(I&Lz0^7ong +zLG+}gyc4=>Lb{xho7P#2?6h_T?Ak;CA@^XI1_%gJ0XxAcZXPzDasxSp$&K$^52GHc008r>`?ZO$1f@=8iLSTPV{r?#oF~^Yo&F>>YaJLN{_2= +zS`Uh?Pmg}0qfG4Al<{4ztZ#>!@#z{bOKca36R<2%%-RprD~Fir4^TS5hN+ma6^`X2 +zEaCS1HLk%Ga7EGEEO|iao9a!*t(QO5wWK#m_gxBBa`L2atOGF;_Ci6{T%3%j)4slp +z39y04UT5me5f&5!zezP*beTY`1Nz!30wcT83HAns^2i4@);lg2!L;CO^=6#fH@|9o +zdQbDQ%wH&b#ZXXtiH)7MDf8{$7!#Pba8jKWQs%Mwk9HVgB5SJJ4~a!V_lVBjQnQ{! +z2*EVRN*Ux)kv%9RZs|FlsG&POB=D;ZSMyO;e4~DTn+;9JNl$jl +zQe(DxWH{vVcfGgL>$oSfbuLt0I8!hcuuGNtbLC)y=goFMR0X$gB#;S>z&L^*4T4m# +z7^rt^POw=h$l}?As=2rg5pl8SZ737U6xt|-&eL9c{ohmV{BI3<{!N5(rS1O=dVUw1 +zeh+$9i%tI!A)wf_ep%XyQ>s4i->US7_zWz_hnkww_D0`gvD?kJIu-E!8hbB3atD0B +zw|m}hKkW6`C!#v5^W@u&CYYWx`ZYP4KEAsd#FgHQ5npmAEq%={X7`=1yP7_d;AdQO +z28bHVSUfXdt9rf_wo^dgaoz~qP7C`sN>N0>fG^nK>XV1 +zN89(hrl0Raf|rd_L%uh@I0e%3B#kZHUHG$07mZi`G3-4bepmbD@5N91c4;GnDjT!^ +z$K+6wY=`N~cNnLpmz$p~+&WkmFK%B(d2ou0k?9#q)$?#bOqN$2=SYT>?-fEDtK +z+v~uO4qL6fY5IHVwxjNkqM_z*r~Zb#PosQ&vt$44`2O#kaOZvoN4KnztVpG~zm_k+ +zR?U@raG)ZFn!XSCpN)Pei(EJzg#krNS>!K{(asamc_q<5OrzGKDJ5=1RkUo12Mw43 +zO_xxXTFHS6v>=$`d&g)ZE^@mhTniWdt%R1EML0x_N*szd_a~e-pr8K_Y7|2K4{D^$ +zCj3cds7dH0A+!%!v^|38eTD=aJH}8NLj$HiZgi<%)OiBFM+3gFKs&9$_>cu7dN8^} +z7+5F5DJ*E&fVLjaDnZHOO=W;&kO9uq=64k3NLLhg#ffEY$Rs#LGd1EUC4`Iqcnpr7 +zNuYE)M*9uNRW8Jh4#i%RM24{#;p~{`ELI6Gn$sDb$Bv-^?1;+z7dzsIEAgUBB~eR4 +zPKY6lIKd%_2pie4a|_{T3}{8eF;65>y;%wo=!6gTk+%);(OGd39&w%$WjH3@L70fa +zCemPVpih&w@wbdsOck&h5D;vzi!(AERXY^(stX`);io)fRHeyNl5lY=;#O{<4<{urX^~~ui_&r8%Qk0vtgB;+<|Fs2pVoBP`)8$>7(hY +z#0aQ!d?=QJS#e9F8gf72V?x-e)g`I=U8z`jT7X2^PCzl&OoMAi0uTRyg2VdlPJkb6 +z1mU|u>7|`9+u@3KGb~L6CD18)mK8H>NXmwBJ-gCIr_=NW8AY8LTSgdP{nC5=q0Vuc +zbbe+?S7ug;vIMB;fXG(5$}NNpb_uwXO_QAo=ZO+zXA*u)L$~>7Vhw>@Vp_6Of<89x +z!w@7G&Ptw%s)4hOBvBU-EYs}BUBV3q2qjT0uGU%9uTy*A34fZath-{DSodJ18ETtI2s+T!Y9NxbLH3 +zp~9oK$E6(zpz8qbmc=xzXW}VBrjce|hiCqUPRgVp;f5$TWhU1jk@qEzaS8-WI(xqk*X5F~~g8U*=M%zsGfB`=`=&G*wG{mdTLkfmed`o2 +z87@LD7JV%N|FZ$=!Xek7C}j4vGyXY-#-Mb~JhWD1p3$}V#oQgpLX1|PQW<3`fNo&U +z-O`wLTv`z9oUF-)tge41;d0#TmWJ`08)+b8lC?R%%)f9EOKIa4dvF+`X6a#z{FJyX +z%f^CU0`mPx(Q!_+7?hRO83PcfgFe@NO~7#rd1CBMixtnCMSdLrN4!&AsfM$e0dPU0 +zMoB;yYh9NhO@sa*pr{vHoHbO^+f_6G%Ly_SoXp|Nn}ak)3a=SwMw%CG@h=@*2p1TW +zaz+#K%vqr!$swY0Z_T`hxXc@xX(gqU_Uw|cMkM>ugbsE>joI}*OKT>wNqF-cWnMS5 +zU6RWBls63eS-8M#(HRM9J~DAl;tV16rIMEn@R$EoCcauKTV+YK&d +ziH^6=nq9+|7KZpktI8I;MzZo$Bi@a@V1z7bbb4Xeq0RM;e76J0Nsd0)NPFVtGfytkkw$E_OP2aVDFircWSXU}f+K#-1*h6m=z6?q{Y%GEDv4-SRC+%HM +z?7{5BtuGR|P2^OId}0HcWK^8Hc&pwhXJ33p4;w}xLG?!d07PPiA;tKThEq~uH7qGa +zqTJa;hPU2onaOHiX>71}A_)N9Ah&2^Kn_$j=XYn9l}gtv*n4VGo&V`6SZIc +zlmFz!Hf@Q*ktsF_X>Asb^ZK$bT}%gEGJuSBf#AM=BCF<>WG&g3xh3k_k=TSg54YU= +z(OI|MABtSDti2prTA-c1&$Xo1^Wn#w?927xa&FX6vYJQz(i +zHC@xqKJQTz~G8;YoC>Y%!hYRY)X2+l0)p~dkux*tJDY+ +zepF*^Y^*=0!+&7#QtnjH@bQ2i9MX^9U+`UFbg(}?R!(=@)BFyFC%#XC3o+yIfa~I? +z`YYQm8Lb;gj<~qTD!?#tAh!ds?F5*jwOB1NuJ|bPpmYrsR*|E>9g_ +zYv+63Y0&Rz1T?bF@5H4Gmj<67{{GxkfAFBgAn>89)zAOug}`kWF1;TNUN_{@@jSR? +zFl^mmpw)$Fb$^Eb`TxtsrvH7_&i^SQ)G|IcKK^pu&+&EP|K)7U +z%Xi~1Km2_8^glO5|NkaNhPLuPXJfQBFt7T1y&|_h*qY`4^Z?4l5;+LLn$W;&b;dCF +zb;u`G+rc@ZciMC<0rQHHQTod2Xz1A$TSag&=Q-BIM8$5KjwOZ;Ln+8~S8;cT#G%wA +zF2~F`^FZEODZRf{2Zf1$XL1#%!ra?uV-Hq{@Bzue_F}9n>fYJt(f70rt6cu`9aLzK +zGQa}Xl+9`{E;r3#sWgi(ddHLvBcLjDZF@}rZTN~qbCSGIyL*WH)UfwM#DTB7XmUPPWy3 +z)%x>!e`2rr;F6%Sp1V4AbPW=}_P(Ug?LPPgIx(kWtC7eY^tVlYkBE5oWsmD7(Hh6K +zZ%!V;RXD`w)eUYwx)OT8EVw=l2YTyp?Saf(g-lyNsiftQE{Tj1xv1$>+rtrRZVrg6 +zyFV;eJs@CQP&2Mkst?C%jcvIG^6?;KE)|852gHVlJpXX +zO!2(3&&dP|j)A>@@E2YG7;o1-NBv&~{XMN@4MR9@|wbR5ijzEEYG4OsUio$9Kfw?9YNYIOWOyoP{- +z);R4#hVqfG=g-P^BJkLq)auxK)`%kA2 +zZN;HZs&vK8i{;N*ZfdfdkTveQouUGERJAg6j<|IWaSWuRzX4zyC+WxY&wI%1?PuC1 +zsOqJ|&&l{3I;#MuTE(Orwn|*M>DHYYqsDDAT~)hI?X^?(8HO6r;-H&0Q4t+Ma1ea) +zfRD|6nL*+C-P&hmjS1Gtq+&kcLp^Z&(x19pfUVS^^#RQ1ixD|kh+JrRUyK`05dkW( +zu;Q*j3{UDoate8tqk +zqk5OHHps6H_3jxy3QAaOMP>-xXnls>WGmHZO5B(8sTMqkvr)lYu%gDVmVebBGaN`Vc-iDnUHrw4QpCPhSq|e9DRtnTN +zIp?pJ#LCjN+jqeO(U7vsTT(9{Z3Bnl;1o#eWJ>J^v7+e+p5n#K^O+ +zI`fu|OazyvWZ@$t)8RU`bK~otRXOtzdFr>EIV56N?zH7J7B~S5>${rsvB%F8&_YKJ +zUM++joWjZHh$(x+3RLp_?S~ehB-GVhAL#)(t;tdR9rU8LmHsQ}MOKjmJk21%p*>}^ +zD$enyrRT!fZ)>SVvnO6G4VLYbK~S2Gcx(#9mdXP{!aougd_m}(%lnu;=#SQTVaDgOa-67U$Hof7KiVk*qibz%Zu2q2|-h%6Q$M#+SB04eR}V^$mA9m+{`d$FyY +zsxqEl#d7rAK}R^YLrIZ?HR(^5S(>X#zhlakGj=8xRDa+wyCH-8Y?`fn6`4nSE;Jfg8Z#Jk~z$v)6uT>55n +zlnkWS!{{3v*V+*BT}5qd1L4WC(V;d73O@0Xs@yyw`;m#u)BDIi-27^7G!m=u>l0T4 +zq@#3C`pDzg>Jix+(;Xnd>HINM+3MBm3MpjYXG|+ZV!valW#Q*n#t;miX99Xu^YSw( +z82nBbINBQi^*!TotXzch;-(+#r5u*SRNM~2liTYT2|Ebm4XxinqJR9)PFEr<(UYG` +zc#xGZGRkxx^!lYN{12&2F!}3_^7swjf?AOq=P5;g)Pshg?S{0S*qB{tTBs9ET@ZPE +zIMQK0@(T-OBBieY7i#saTb)cLEO7^%0o<$|UGZmM0xHLB8W$cFWeECblGJOyDN%m}lHak4OO_vQ&c|+J#q#vn!-gfSkERLm@OIKA|KkP7*I`2+Jvn +zDK}(S3S#OE*|h*wLdE`zD&g>kK$e$Hy~E?RLcgVvJU0}zLqL9rjV^Rd +z{)I>Wz(&)BN%8`^x*_~dJzHmnN)*Rqj2NGtQr4iOPheAe&`c#ZXoC@Z+dQXg9s!78 +z^H<<%Dj*I)9H~FTHAFCEP|-`6lxctFUxZ|?DQRaz%4G?4y=J;@9Cf2nh8B)x5SPj` +zfV(UbyagHLp$rBvrs_`E0w?|hmdt)v$rGs&m!xU09T^y-xGx&AS|ibxU66yGY)p1y +zy8(%6#HO1?y%42@&c{X=vAx*T^sa;~H1%KuwL64Y#Zm$qNJG8(U +z+Arw@LNRSIC&q|1X+)6l;}$@xb!_}X)OXMHrOwnENh;hk^N+>YD*|4BH+c+^94z6E +z2~t%NiYEe~gWcq}lH`%atWAsb?une-X+@wy^%Phnq2wZ_3suXaQ_T2tMj)nFc1Ill +zh*1pF5cXz;{tC9mREx= +zw8a)?dKKn*cw5hAg;A{UavDtKWNB5ed +zmjCrR0~wJRdN@#yREBRs@X@mcD}>^8#;jx)a!O;0Edo;Zhqf3=K|D{vheNHJho6}!-d|PtvjOiePI;=8^g)X_sFgHnod0?> +z^5zKhugF`S=FDU8)G_mDPkzy%(tNNqKX!?KWaj!84+R%7eDf@T7xS!$OeVWZ3oYc< +z3nxVa2SldEBl6ZchR$Ph^-9Ujx`3jr6UB9!QI$Ex7LUr)mU6aIVE}GgTwGJ=jAyU` +zpIkHxN%CE~p`yu$y@++zOyHcjgXYv|iIOcycZ~e26mrUw%Bt{YwYE~?Dq#|z+Gh<3 +zF@sE+3KWs${&T#joUF4K;~3_5KKxJYz4upB|D*01l0ZTaO}a)v2}MOkx*8B6fFO!W +z^&?h7Q9%R}LP-M>Y6OJPLy=yBC>;%m^rj#LMWv`T3yRV;cdOrX=G<9x*W8)4=H6Ln +zo&6{5wSRc;*ZX;15A06r!o%}jg)^|E)7>RA`s^dLip8P|1#X#~HPEku8xN(a2uIht +z*Ig?kRW}kC-jolT)pJG_3z`oVo9fQF=9=QloHweD6xa84)>}BS1IFtdg3}AUe+Q9# +zH2zgReKXm}E)str91-J55|4>(biH>kCf-)AaSC%UDW)lQEY>dO{z}pP1pS&W9;{Nd +ziO^q`u+R|pNsxlGZ?I$}p-tm}NRpFP#v8A++RgO2v>K-&s!17XB)AD-+;}#+g}=CE +zarFVnCR(xg;hGI6zo^D#uJD%*-wjuW=@jHF5Ds#0{?b@IBTS$gSM0Y55wOk~?ky{g +z$tAw50hNTqKT_Tpk@<<@oC09M9b@z%*NKn(AQ-ome=BIN6(z$Yyg?qhWSMrd%?`fV+?o)estDJmG^($(N$K58E^wh2N8%mk#1R)O(&<<|b +zF^M3)BL0JnUKItLu2cGK46%G67vD^{rAc#K&(|T8?3$>#Z`0RJh*-4=d7qJuhd-5> +zPyN0jI_wzAER*JK^987qry&Gc6mYdOKxsC>T{du^W#C-qEh)rpTiL(^_I|Tl1Ik69 +z#gKpli~!-)0lA`E7di(n%?^O#{JxzSI^i)0JMVwCZg8s{^kHHMUNj)oe_P=7klxs! +zkSM$z0ncIwUSAD7@zpc;@A5cX2*5?lyM3@6?neb>Fe>>fV&+6(JGtbg8{ +z!x}zl>wV4SGk)D;2 +zAfoVU%FWHun-0&e{#_sMLU;zQcqG+c^Huh^b$HC{+?ZeF7@=^?w_^-@&NFO}2l4ea +z%K2*%*SS%9xbzn8<-jqVEq_|AXIAai|Ixzl7 +z-Dr8c_4@6vpKn3iC&5Q1q5n({{ufgE&xn+YT-ExX#G8aS9zQ0aQ=V%fzK!O9a+xV- +zb-@gw`)&3q8TgvPVI~UO_&?lUj!-ZeVRs7VN&6lXLX2EIhFHb%dSv)NPqGE1>VUOx +zs1dqa5Gtz67K37R8D@K;yS!usXJ}GHWkZG(Snu(Ax^tY-N);YjE`TRc9`-J>DvR< +zB%yMt*gQ>1f2d7)ulZ~Y5Z2K#UABX<51K0)svW3++Hi+pY8q8;-$$hhEGwC;z1OAq +zqnCVb;DYs29LOlhxRdSHY5Ud4o-q^6)#IOp!&WAEj5yfJeeKmrAY%fS37$MJC6)R> +zNPmjqC}Ii@MA;>6Z^1o7zsq!(Ad0#6U2ZtR8u5MdA*)Et3StkH7?|5%EoHu|xNN|% +zZ>J8}_{8qwDS^6Le=(A*pi{N+i5+ZG++?fxK$X~8rf;)NvhG*VQoqz^Q$9H#olu+i +z&?zEp{MZ|gxI1*|u+0yjDGR~(Qt?p9vu}GJgws0>5blbs$I{g{%JIxB`Mbhk~^jnkCY~cTpbxR+KwRaESFHzg_Czr +z%5~nnND;kWuZsETOV`I%$L?Yf6;Mb_f{qGw%gJ)b@(U15%`7hNU6+6kC_>%tR)9E@ +zA9bMn4t@y@i<+2TDrO6uzSS-yh~R_w8XVQvetuU$5H00KtU8&(l8nT__8xp5r+~gl +z3)OCgkU^A-m)esFN*(Hsj&Qwp(w%_^@R~& +zT*{^=dE08>d{WayL6G~wB1w(8T_G;5!G}QLaU2mc!65FQAj8hN +zo9j~r&&HPTl+)QGV}0b{2n +z2^_S{M@ED_`&bpy$`X-+91Ni&U3Vc&Swx(pPeKrgAwukArkj@c9Wo^tp{Sk +zHdiy)Ft-wW3X-SOD#XX-IGnPPJZ#@8Onfz}DAY@WnybKT&$b`tR=d7QH&?vz?wF1AU>_Z{|iGqqZsYec$^f(iiRsLbfc#RfYWD?|uC*MM`x* +z2#ElUj{n~K0#wi$>wQ1|B`#u#(w>grhIgstU(O;uoGp7L+xUe)5!jIdU +zL5i<;{sB#s(go9`~MH=%JvhE2QYs64x8tg#E|#-$T}F3RpR8hQHJ +zk9BhLqvnoQ_|{H-o7+eMDBQzgK>`%zZ~9r4QGZn6A1x5J$`i^ub%^n-vc%sNBTzItQgDDXd9`fi1 +zYt=;f(mp~bg(FC+Km1wa3%+kPY}ps_Ye_vv#PHqJTKY}Wx6})34kBp8(9Px}Xhg(| +znSZ8hy*GafHcx@umg+OtB&W60gH(IZ|cHtS^kbL +zEuH;0!o&%C5+u~kfJztRw&6(BAR&1iMY)(_3}kYAiR1d@T{w~eicHo4Z8xAS!H7r@ +z-gYibic8FbQKKw_6^nzNumlSoD$rpw`cnit%|U7Xo>B%M2Efu#*bb)%5FFM$6k#$4 +zJ%Xc3pzz1m!q+Sb5L$%5T+~+>*v25Dd^Jp;2}95#x_QW`wV((xCfFPvT2d6Xsg8GI +zli&y%dJdoPr}5S=^9L_2Me@VxU)AYoxM(tpTpC5ooy7zGofF)Mp(ScKj@H-}12PB? +zEan@uBvk?%&9Y>BgBUVS=#)LZ(}Py)5hVhLpS1s%i8@@Jau64;kBh}{!(D0gAG7$u +zVrm15#&M$BvEy^F6lo@`4Ml6mQJ!1IG+4$j^6G5S|l$2$>){cP9{)DC(4u!==cq-`P@!gEa^f9`XD* +zAtl2^emZqGPySC{$v)adg0Gkc<1bDWU89ia!~v(FlTKU?CP^QYNbZUrf`ts(C+a(q +zv_0{+h*8p9lCyItrh%lkcGnn_w2vNqW;}K`unYYsu_S?z2CUR^Izc-y$>9e4zH9Ug +zUGx*{bP){cv_Y&Q{O(uW-Rb%$96j7(oo!Q +z2$5-zip+tFIC?sSTe+4#*fdko^-L&{(z*x>Ph44bYyzyk;W)=BmO_)rC;kz +z>H1{xjqFLooNn};b8D=9FNwDe^6<25Om}Xy2UWo+|EpuZK^p6<2JP)Q;osSki%ZOc +zAhCQsH%2bIOYRxXiAud-ikp6xWke=`8(X7KWQ~zX%L0$UAxo +zkc1dTzA^Uo1p?y*L#qX8^~|pxcj5&ju^TLwQHcdRRiJ_1wti1hP9#G!dx};zTvAl; +z%ZIMc+hZMK;hV*8RE}|?2u7DBDun-|L8BFH!AZLZ7H-VhfY_hUO1qPRY9|jBR$>?;Uy`aYqE)+(0R`O%$RJm}zS7lHe!c027 +z@+DzzBY(y-YFm2f?&v)9ROXO#)dz)esivrn;9!AGlB{rqWi)jNR54K+Lz*lGZDxGd +zXG?dL2JnI@?BMu?=NsS5UMhDzXuEeN&ANIDUedKz6T!S+*$sXgo!y`i +zerl4A>AC-TjXhhRHC`lcXq^0%OMLM1fes<0M;;;!lJ<8_&6BNeNqt}+e0O|-X|7NS +z>}#EFN(|%{{zTRM@=UR&RT1VMj;9vlf-A-?OE4&^Xnj()yfj+gFDi&(u*t#;MZ`8m +zU4VlHz3Ct61r97>E$7_Yji}3>QMbJ4p4}zVb0q{C`%y{mq)_Pf^%TE(211@?TFnE& +zP3DtKs^h|opfAesV>anL7ph}f?y03@w@n~FTHjgMkTxg&R=Cm0CN0}LyabDCuNL6iVa5GA6cw;Biydn6xH2?caLds +zbgrH0dNMxyM4+Lub5$@86K(m7{I}93*^EEj+bmii3J2ByTCd)zz%jCEA01~$ctu$? +zzi!bL*r +zE8(QQCmx^CCcILI4;v@dHCOSU$O_CP&`*$ZV`8!8`16yZ73G;Tt7%rPzGADeu%VQL +zI)VEbe%RDo$2^ww=}b$}$@@+}&;IdcR%rWBrU$hgOts^ZyJ`Uj-)(4eW=(=l*QS2c-s`dgfL2 +z7bYQm+OOT%2=u|SeSQh}46>eUo%bo=v4!^jeBPob1Sp0Y1Vr?IJ_oW##{I*!qt6X~ +zdSi7)%+C)W{rMct8ZrMgZ28)|-D2dao{;U&=LMh_2VdWGe(h6l&!_C}3or6@$i4QL +zC*fB-!$z^WqoX(aJ$#kNaC&2**T#Y($NbNY1;>em^m~M7VM(?g6j5%(VXvgZF?t0z +z(}LgFUbLjv3wv(-e<@Nr{Qnv$?N$4)THAj*h@-$=@a +zBlgFo)G@4MyN*vxJ~i4^J`92)kW(a$vsVz_XQaE5Orewy9EfoIo3SsR{ZL7a)6^s|V^k#oU%U`BnYQ6oq-xrrXZ>_}L%|&NV`%+oD0HkX;O6 +zQYUiOO$REebLwQb$;AW0ijUGIeS`qI)me?95o-qQ!zVbBuG*mQc7flgBj8p9_CWus +zDCIy1znbL03#0kg$i9;?pK1q0C(w3H*cn}@oH%-E<&C{QZ?a8~ZzOJ5=$$lM>@6)^ +z1YkiS$Tizx>>CbrVd9`b>hODQy~$NQ5(~3;XSdGl +zhDvnr7JZPtW+bjl68vDwK$*;jbP`(mJVF?wc$%vd|cO;THG3PVj1%W{p$I# +zpN)!!TOlnr(mGR-aLjGqijvKe`@q`1r+#GF`Lcb15c|nt(XGAvpPTP2uemUST%sIC +zk;C^q>(JiLFy^xm75s-qZbQ^3-eNqF7QM|H$P5(JviIFJgCOq~D{2)rLrWXVb;8cJ +zQEmEisi9@&h|Gs($q#a6?I;%qtlcR{Vtyn_kfX<2yJM;>>7#tq7rK*(lXUFMgYVOw +z9ymP|^L~G|8i<45m8X9E_D()QN%k8VD>ITNmK<~5Bo%hso?j~Vj@k(ZXyZ9;ulN}- +z@eqCcQsy5!I&sAlLE^qD|r`a;Nf6KE5#fDd7l_ZXq3a3Tp3jPeJxEHB&GV +zd1gAnGxNzVorhKlv8u2<&?}YUdz3v&tmN(0ZM%@Ihcy!8ls*%$T8<*<6%~nh#Nn-y +z0S$L{>P`vp6{vpWAPg>fL6vHVVAMSt)H@R?HZX!Q!m*(?&#OIexRVuY@OOgYt)kcy +z`S8(ds-{wOrfl^w@RI)V7<|(0MFwOTy8Yy|LR_wBJ +zukcY{qNJdUM36fucqM7P!(@o4FFh@=;tPUFqWN|D+QbU;=gh^3q;6-i(5{U!C2J6$ +ztO*NbBR4PMg@A4^&J}f*Gc#x}I!y;MQA(K6SGq3X_-Kx>-k}IFHD9o%feQRA>I_K5 +zKAzvFvRG~%4Q^(fARAuK)t$i#g}GC}Cw@R}na5SA9I74)EFjO~6XY!10gbfr%O^WS +zKCmBLF_jilX))i&K~PKKsSb0ox+ +z6R2D}MluMOD6Ze{2@Z@Vne&_`As0C2Ue246Qruj@Ggvbj9D||6#8{0qAXsQi+tNaZc?aVD?0wb_A{ +z4x$U*!y6*lq1wtSj`z*M4UqywU8Utt@6*hkB9DsAL_!#(7PF^HP?{-Bdndoi|pl=@Mc+0d3% +z0>44nFCxDnHUx=;i*bY9|B$2JtN%uh7@@o|oG+3H28D?_;p)|ikD|g0)I$&e9x4ve +zFbLal3`b5v^BgHNW1&A_I1?-mNbEqJD2q|V19JQ#PQ*A_eu-j&R6XUP2b8z__kNk8 +z!M~9sR_F;QviTf&AB})yiGQdgUV-D|WGSAwa0{l$=4_ZzeWcGE6Jl3S?ZJk +zIVx@raEriMbHPP(p(rdhN}V_-OZ|m}XVQXG9ZAWkXfQLn3=;&z1Wf<#(GLZx7?5x2 +zP=9pd9%G3QaWOfwbQ=S59x5)!G1`I~-8dUlh$UAs$?u}#?nZ?T8U&F%VyrNfX!mS^`1Gq3PF9lt;4EBE}s_1BSR~kiJv6 +z7*C$c$7TFdPuy)lIAj=hjTR|nK$~78?nuMwTG75oB^}m?-?b(PG~52gi4cMOa8a4G +zNQ;J;NEC&}BoFCCeO^sYu21@q3JXTXdwGUQbthux!Y(*74tR!0%O%fvq`XN@Is)(} +zAQ$D%7Y{6#`Nq0vge~ght(@YbyJNn{B{p@%HOj_|GDWKA!hA|n%}~T}EH%R^JO@L| +zXGXq6#V5-Xu-#NILkIxUG~WMiww*B`lekRNZhQ+WEm`i)2amK9a_~@W*!8rOC-b2> +z8u4{$k@*_&7L(LV8WG8;sNGJP{7exAPAoo33@A7L4IlaPio!tn$j`8FQ*N?JciN-* +zP(S%hI3~HWE3KEA90~(FfJ7Udv-7MNqfY5U@PuN)`0r^1!XHS~{s$7>w9Mx74C$K9 +z;lV@~8dAy($v5Y-GYvuh>lxqinH{qU1M<0&G&(FnX0JFpL0xvSGEu>`xBfMnwA?qp53vU +zZM%_6fWhmDBES1>L;34Pc^`t(li~R}9tF{9X`e0glHrWYT@*J@)<1&xzIi}{#_2ZW +z@v_19KCUNaj^7RaLyC;N1g`Nm$XHF1R}a<41%INHY3CBIt&{KnvZ%WAUi5sfc5#tM +zG=CH(H`=P8u`bQXlfFkNeSwK{59UKZV~IE71?1BxE=7lMMY-vDL-SF}8>K?r%;dF9 +zbWhM_XGW4W{zhro7B|*Pvph^6Vjz?~-H_-5Pg`tYP=#o*-I;kWgENE538|UM@X|$Q +z-Z`O&J4WQ*Qe3}gxUW~(haT!j%XoMx3uTqT-N0>FRtC?ePOp`78grftML4;aep@Z> +zaIC8J%&YDs2$oegb*Ftv$K72hZw0Ya)Qe6SguuG9-Uv!ZjR*SBN*iUX)-BoJ6)N|^ +z?|&1FfKT4vXTaac166DaR%g}US?H+-BU#?oEK^*GpLNZ6Pf5sZ`hNFANB!S-;M$ +zig2t0hO&F;bzX#r_d3~G+{)5XDLQ6rsLiq +znY#_8f#%>r7kajQ1);a5V7>+n#Mu+3bjM(cIT7VR8n@qHbjX$bT(h6B9DVN;{*CyqsRE)AiF9BLnG+YYlSy~K+zpz$idXxT0sG} +z`h50v0V2SlJWI$1*2`Z9PeKH^szWr9evR6<%q@F`JbJ54`XH9KT+{VAL>2}SgKwAE{!+Rrw*_4;F9oydS=k-u`CACV{qUV?_(2SS(ZPLe#+|CKEA +zsSXtQpy!QccrA>2VL5|8-Mr=nhWIMH(C8t4TQ4+mh_7FyfHNej>^11ik5C?hoPSo( +zAtGOKL%9E0{>qSGi`S<1^W2reT`gXN=w~vaLv4}5GDCddzFXW}(UUo2A@G6?Im7aZ +z7Vrq|z+PGzj@&(R*zbc(@Jcam0O6IojI +zl067M)O`!3>rSmy1v4KBs6gf@+D<>}z*+H}nZBB0<4G($XnH~NO9?G;Q@LBh0*4I17HU4 +zpvFGc$4z{n?ACBnCBrdDKEB_vqyZ!Uk*@+W7j$gSDA}_8+Ny5Q@}5W*O1qbf>Tj{F +zcK(9~SHTde_H9aw?VEcg7q4l9)s%unOodwJa=+MLS7U(np0I_#80|7Q@p~%j*6i1* +zJLcoxkGSIB22&M&*dfR$AKu%s_{Qh^GfYt5=Usu{m6BD?6o5>hpK%PWfiJqXQHRmL +z`H6$;p1>DC!;GZh#xtj>W>62Wys}K?n2P-iV4|0k74WTleTFwR?4Gz?ihMt!sVjfhGEib1kGTq6djZ{hZdehJ7 +zVqG}ROms%)qK%mZ8CK4SU?5B(w~U6%MOxeIDmmtcbI +zvP4l|zDIgil3H$;nz1lAJlSbMJ6>`MK$K&W(0y{WDk_cwp2k-VJ6jy+nvm0 +zxI69QeLxUB19j!YK(U0DpQYFh5@6}@qO4Q;rO&P@AKBj~CPJ)CWS~s>yx7g8O9ip@ +zWvmYFAze6*BT&Pl9^0>yut$66-AfMg&F`MxPifJudf<&+884 +z9a<{6T>iMF^*;J>A7uyc65$PB3XIEw+v`L?Qdojwm`O>&x#Mao)p5JAF$~=b+Oqj6 +za&*gLnF@v^9OixoCC!qwaU!D(CL?X!rlBncW_!$l2++jzHq%KGVuSu#bN~Ah1e<)kAaHUa>kX4Tg9%^j=2l+MZ;;TkPl8A +zla7mEA>=kSzG5>KDgz?SW`?VNb8labZ~z@N0S%=ZVIoOwMy +z@yi*nl|1wY*ZTtgIS6?k;-NE(?<0J!$>H_ANB>K8L^b?Bsg9mA2LQj(b>czLlu+4U +z5Omr#eYn`}hy0NJ0?<|y8pJ#-*9S5}^&6x4Kt||V|KbSUN7eZA!{D})qinHz8;1)WhU|?E) +zZD18@&xQwgq51v=g5YvKSgQc5UGs?Uis*u@{Z_&%He2SLq9Q9n{aa!|xLBNZow09Y(k-ak*#CLeUSx%F> +z_||kRsGDwon;dQ;s*+Q8w7{J%)(-5@`&u6cPu|$m2h=LzO-KLujedUoWx%fl<^1&< +z0Sz=e65uxiX3XFM@ach|I>2O9#AF<=ABE&MF;pW`R@gVdSZKZlDqFB}h#Mw$Qmab30@gx;}& +zPaq1P6Gv+$T>^tg+-2H8RHf5LH`}U|S8tvKFK6VepzF79T!)$ba10mHp4h%YcHiQy +z^oB)mbASBFj=ih9D=AB5&9sYpH<>lVLvUp;E=+Z=(o5Es3SFIBhnm5C*nm6XdyJTo||& +zNm?tyZ$kxbUn4DhkQ@+{T+1LhlAH+$ijZN8FtQO7w}B=v=;JJiFu-XFU>_mOAM7&; +zYeRyx*TM@tNULLH2s?Zh23F>hEjZvGj$}O=e(zk^yklhiYRKm((jEiKXGe;~5cwB_ +zvcVubO@<$pi@YocA7Tan-K>A2|9i7u98QskN6k7?ym$sU134N?3=#P`I(Igb=kXg$+lEAz#>jZb +z@OUoywK|;!1GAYV#%vg#LqCd%u1|%Xm5bYljXsJE?V{mIv9WJfqb(3I*j3ysZqP$a +z=zlj`j}Yc!>BZcL*K6UO+z1FZwq`CiCzT|FCGlwC-7LO)W0BJa@vhA1yY->BJqR0E +z@*Zs1fKyz*{_laaN*QShd&o(K;PM=!!ZAK6DB-9RK}|krla^q`Nc7WZoWq2Fh9xaJ +zG7jLwig~GwYk-kzj4=X)xO}6yb&2X$cV=Zp6p+y~9?&|cfAJp~q{tptGOvztel3D0 +z8@YEr=n9U!PcC-9C+Si{*pP3;=hO&C4fy%-z<=hF`eEb;s5`tU@~majtIlM=?-J1^ +z^=&970hC%JOFz%07+HlCG3b_pabhJvOM&^$F=5s*_Fz-FmT6)oLI47K@lHyD192e27-``Za_L}J?Co{RPh9f0c?KVyA#afQIV%0A +z25fAMmW`)mtkZks;ya3&+@SaX`IH*Nz-}b$4H9(N_mA8N5RbUWB6#>P^L5G|_Y5#g +z+)9wKkC}A4D}|#GYe&r9?MeT&6sJ)iyN4Uw?40GP9#=1$cyv5CjRBMS>o?K?RAE3R*St>Q_^c=}af=-P=96sq~`Djrmpj +z>F!o3+C8-6J$Z^7p>6;L%4Lac6oeVYwa6FWjSltq%5e!!!`GKwl8Y)|h{ne)2H4^of?Pl +zB~WxSaLA^@)pWAWCdqHs-Sthl*Rd#@`mApNemV(OLPuuGdsS2A$dydxuobGAhTI>kW%9K0RD8W3{$YqL!9ON4!G`UVQ3`i0oou`t +z<;bKcrYbB3zOk=MWZyAujoA|e^EZf>Z7j9)idr{p&S}UvW7I&QH>%Dx8bxQ%^DNVc +ztx|q8hK{8@P&KG7vuT;}YVn9^@rA+5@F&fuAi>7q!KS3Ui%r@uLw`hp8jP8DwD4ye +zX|VaS!^ZJ(8721t!F>kq0D-c(862~SJ8E3Xw?(>BCQe*xfG<$(yOPT+3x*t9Z${TX +zOKa6%Cs#I+K;bd9cwXU|%*|zx9pzDTIQ*ZiG3?ZW{&!8`8-$K65R`BVq0(YSH +z>G3qavm6-BRBoo)I!QlugcKkH6qt7fLjtETrB!2`9Zns7v$^tea3+sX{kgaPqT7>~ +zo1}@&V51O9&Srbd +z16|k>NG49thpsSLS#Q~!^Rh9#Ihfs>e{CbFfXn|z2hvy0-x^hiX}xhzL?le7;^5G2 +zouJ$Lskc`m{r2ey?jO5-fPL##U;mGlTjo*yyleg+VsD~!M9o11+{}K=*ud&a|CHP9 +z8IgeZef@JLgE~c^eW2SbZa0^-2bZ-W=IEi7Sii5CgNKWT>>Y<@o8gkOzW<_0?g3DS +z*r_Qj<1bB`yq;?T=#H)zuZXZBpXWykX!SqOYPG9jf06-FZ*d)|* +zG;mmF&+xGdPraYRhKJ!+S)M?Dt&jEmr0s~K`v@v-5o6e^C+| +zOJ2F^X@R|3J9_TJXoSF+-=495EXK}W8*_;qJ5xAzy<_au=x7*lyIOILV#{?byn2QW +zdBMWQrd(ktk0%};Pd@*@Hd=J(|9P|sEr|f_T!#umKs=B(2zb8pedhzgc_5yk5{Auc +zrNE^$-MZMhPofa2)|r^rK_U+>>cl +zQ`nzj;J+}@T~joegQf^6TR$j%mXAr&Jm2%6WVpya-#W|sVd;x9ry8%`?Ykf8)c&G4|!&Y}=;VPp|6z7vJT=xY~Fx;AN9W3f_PW<<Xv9^R|<1%0=b(;VKy9w=UCA +zc4xQdSl|BMolmsSbnCwQSku1eRjz;E!Thb|XF}m0zYU?k`shACIn;K->u +zlS4p;sJdVLkt*CDIb!&p+gFM`H=mocJNB);Z?g!BfQruuP?_; +zc8n8dM(SP@t3>K1A{0LdO-ESgdLG{@@#(aV6%BrW4M@+{Vv{gv!+~`!yunmfDR?j9^~K9S)#) +zF57Nd@)*+k`}4=?Hl+q=M +zIP5W{-FKpFM7kxi){gG7_^QeTsmOfwc&Y_JnnNhgwy80oV%8S3on$K4$$WGmTOv-0 +zFJCSKYhlpwEHYqdtYrnW55MlG><=$Mjvwzb3ucKG;_{UbN=7R%RklxSQiXS0bst&E +z7iWHi)ZSQv*!?29j5fAD$oo-#`arJ$z&%tB<;6Lh=gIoGLuBI75tzPja6;F;z25uF +zoLhv>?euNeKKHQHd1R8=B|<(xv8V7=N~#Lmx5EnMqYTPhPjUOq@(}mn$K*4g5-wO@;OYDMrhO;-m7okN +zWc{MlXTv%l;#Q514x@uAYo>=`Ia?SP}5aTh=B*nfH4Wo@ILWKW@S=i9qE3lB7EX +zVDM|YVJNQhb`gPJR~v{F9pcD~aAN0!$C?~LW`Ug<6Q-c4rn!jEGECDqD_*jq^R{1P +z&Y23W#@97`J^n?e)cftnY?Acr{(U&hXKz1>f#sHE*te@dvEa+AB4`YooF5aTvzGT~ +z5*~y2?S)Kl>1}0lPtAd?@N2(K+02duOsK?e;w4#NJMDM%YPa8v%3GWy +zFoKg1-uF%#hL;dxRNpB&@$R^GL-z>_z!k0Yes3?ZG{(8lJf$P5f@tVHKJYdE>BJSe +z{4`#Vx&48RaHV&$=Ii~I=S&qUO+2;%)hkWLfowl*$Um{19jDpmuuEu=zX31_532q% +z3C~j9w#VQppn{&aBWw(_K77{@K*9l=&q%S|!%_!Pu=d*J;oSIu;zRvlhpLUyl2I5O +zDgk%u-5AR|a$M5a*5|A@XN>df2f#<7E*TpOcG(4%pOk>B8)R^S7FOZC3ZHQI*3q$Z +zhv0}MuNztfp-HizQpz)L|6T-`)f?I9JYV(6&bKj3-YdvcG2JD7<8K=xcbPK~ic@q1)zKr9e}np_wl4jrCVG +zVwXN;BpEMEjNTT$Ax3te%Y2_Vy4Ft;TRG*r`9W=;ammHX8wm4@?{^*L*Zn8^mZ=D6 +z@%iX?c4(Ln`>_k8fBWW>IAW9_CKt9%r|}fZm%M%LqCn}W-kYR~`O=#q6G5w+8nt2m +zRVTB*DJ(UBy6l!8=KoygY=4NX{}njWn?C9zzFK+kFEcOd#VVsm?)3E_2PrSKwW4`onNA3poLDDMo>Vyhkw>;z|~Zulx47=<8?inH@`X= +z#f4j<`OGBv0Pn2r>TPR`e-VeO(s|=B)7Mxo!U^uD?if)g5YVqr6;Y46f{7B*kMwhl +zJTi;lfeJotKt9U6bsT%c!ae*Dg75csm^~kZw|bkbu2Dw~Vr7Gb(owfO +zJLAgL6F8Bup}*l6dp_`_D40mmt&9IwOLXAUdmN))7UGMDQP*V|x}DKdMX@6WL@5|p +zPCiJX7DOcc(UOx$Ma@7?7~!ubv9#O!g7S))bo +zk*9N=;$%_uwXVc~wM139q^XWO?Q$YKnwPo(c^fnK+`QkvufG!+{-2!!Ej4ah%|{l_ +z#)??s`?-lO&J4GCA7d;3y9R!h*yOy~yZqhBQnHB%Oyd2Z1l2Wg{h#S~M-~V=A9)KI +zEGQ2QaV2@oM-HgR9hVRCm%qVqW&rB*kK0ls=lzESV$WcrIdj)9;=SPR#5FEBi75Pc +zH7sU{@Xax4e^8Q4y|0xV@oHWAw{y(2u7K0yAp<2h45Q+fR_-V|rkd!3vM?F_u)9(` +zk3_$^fEGMZsDfPd#Tueu9zl2@4RdDJf7mMWh5U`NwRn>3-9~3dtve)W@!@gG^3R?Z#G!vg%+2p!g0lg;s=+Y^xvIQ +zXq*n;gY*DZ`GOw}QM-D4)4Kf?+%vzOWAR(vJ78VW5nXb%uKfL2c_B{dS$){C9{*X* +zoAX}LDlT^_GzHyI(wpjqICp69diniM@7oJGHZDH4J^sk3A`T|t;&{Nfg-BRKmcw}7 +z_0>F+*@U_UA9JfXDH|V|vhqWM@F-2O4)ros4japBtb%f@3hVDLkCu^f1xWewn!>6& +z!K@cOX*T~oEvR4v1b>;HzlV#9q7`bR(;e{TV=3jQXS0yL +z_n!(xqg_IG_Eau>s0{2b+AmuowDHhZuJ(Iy?sn(me}cgCM3!+Xjxac5h-Z@AC@#cI`&eU$0gezl* +zaQUmTxIJ{xi5S0py#F4pfF*d2gJ+q_Y>@-KaX=t7eXJ3a_Mk-Icap|jwh_bU31E;* +z(Km4{=oyV$>l)#YgEJkh60GFXix&Lrco_jJ(f8Lh18SXvjI;uH3O8C2?9-ObpA_zA +z&50&oE8Y|+v!1$$1V-a0#Z{Mp7(AA;?P_p`N9f8W*T8F#5D)ch9N39#*J?QiwU@<0 +zI8r)Sz_A_&>^a*eF1Zwfg|u5mCt9@x0MRVx0J>GJ?~*E#sz&3?)(7AK1 +z{sOISApFTCSd0Z|9wF2G-u3LH@I|7qRINLjeO|2A4W81zz%N1>YVSQHBq{K;Y)RzA +ziKm_K-5gCW+Vnl`{?YE#EF@a{^ir(Ah0Jyr?T+(q9fJY<`NWP(eInl4?jm|mZ}_-L +z{K?SB6<$=bkikE63_au;Bjb91GNLQ%UKi!6Q|yE@<9!!Nrkj4eJN;@m&|i)??wr!+ +zlKHVaN2!N(uRH#DPt;l07ev>_0LRL$9=1~NeUsh?ZoRc3z4e*Bjm^Ey6TLu6h@;fk +zX42R0*4Giz*Ol4V)7;lL(KoQwH>A}6+@ycRt$#G6A3#JeoBJmw`rmBzPbv*en+&{j +z8~6}100^=dng>2k41C@iSW+7NW-|EQZEz)Ia4mCiqj_*^V({12AV_%#e0T_Yen=p6 +z2%a@0)G{RUdPwZ&5JLHx-A1YCF}UM`Yc%>@6XzNX>=ff)p>W5}=AqQApqZ$zM0^n_5-e@ +z)}DvQoHIVvHQeaEZvTety|-dIe$%00Ha(-V@o@F4&vjqgnv$BAFS%B=7*1t;soDHS +zNP5guP-JrsNw~3VTkK+V?1c6y-FPCtC|%uNx82R}gzROvvJS<^Bh{TXWTjg+s!2ZI +zy0z|o{Mw^k^zJ~1PG`vTK1m=$V`Z?fe0f0ihuhL1I!f*PV{_S`&z`OCuDES^VsP>M +zu;TI8pGVGkT=~)9J(@vl+?O15%lHf>SWZuBLZ~p#h>eQMOD8xKGuEtwYk)}kGJ)G +z>?ZH$+H4i?*VgJ6;0qLCfnXR8SYe3>Kk?t#d(W<>A8uVY2_b~gdliH9qGF?p(g_Gi +z?+7T6h=}xr5=cS|y+cB8QWXJ_4hB#N3QF%NC`z-S(m^@#|2$`{z0TY7V(-1i`T`jl +zNk+zR-t(TzQXr~Zc=p6%Fs)?*v)>}{4;4d{s6DJ9Taj?)5~x0+mos-OO4_Id +zW<%}eZQqJkNGxGSBKid9wqlfrN?6gWwi}QeDLR{>!Ld+JA(fQ +zaRS%;7vf}zEu5Vq1t#g+zd{cQ-5xLzPM7jG^oU*;Cn~UHm2Ou^>jGi7 +z*vAWZ12|<7k;cA;mg%LmDH>IMGe?zCpwp#FS3(w6kv8%098dX~Bu`7LSLU@Bv!M2i +z8zy-hlSMVf6|OI@ +zHR!YAUIW~x7Z4)1I8<0lKYi-H$Ysjnw_Cg1g3$A*j-@1{t=nKnfEQq+ft!oH7*ab^ +zbx-WI-LINmZd=pyC`~wY{Lto^1-k-jM|7SpRH+O0?tU;^zT>i~A#}nQAM_rfm+!x!Q-us>G-3kwWUm$R#mO{V^n&w6>c#*3>_ID!#sDp$L +z-%J!?60A?)%|{=GYe+9aKE4^oaexF|HH~u)KD^xM_n0+P52Ng +zdu&UsW4TacrsiRKT6OqV|Jj6_U)`-D_Qx{5d8OywSaG}KI@?FDc^CcrJ=j^9pQX_6 +zVieZ(_D$7Rj;Yv(x?Z#Y>+s#l`(h<%OU5?zzRS(rd0H;fQULh6Y?wXwj^5|F|NIvv +zrsKZx-HTmfiT81#eD7hOANRc#TEMUFzOOC-@ZGM*mzV+scmO(gobt8|9MpP0o)ql6 +zXK;KBt6gY*<$i%l^}dfYQ|nUlHC=Amu9chzyq$a=^>2-te1V%CgCkY@u@_9{883f; +zac +z2YTah_zv5paf^Fk|7kc#vwDD4mK`A-dU_PC?p`Z_}dK$?{Mmj3);Md@_Y +z?Cd#v-y+*lQ}1nJt>gNK!>er$tWQ5<=lAr!xJ+G3W9VSI?qYURRUqEx{Kr!)-a`EuOXrlM>#1P +zb;ZoT7kZ_?s{R~%I_Q^Q(7tPbdrwMAko?HiRVEuBuLkFU#XIkRz#zd8rQn_E@T%zl +zGHRDj`rMlG?s<&z-OpY{k)&KWG2!yr;S)&L5G2k$$j4QY +zFc8f^lZ~(M{xfE?;~iH~f~IJWRlM2hL*^|icnxWn3cg?hyZ75b;<$V4kG-%zlf=Jsc4|zqRUCIv+yhLA +z>VSWO^n-2vV4U=W>-?TZ(hqJ)~WAJWDi88x)OZlOXn_X!LW96zy=h$e5 +z>;QRG6s6Vw^yeVTT2$Z5gg;pUQ&KLUS5d7D?%&qJ0n#qiU5`Io(f-hcs%Q3DkoZ4W +zV%%v>&SJI(IN*bH&?jP$nNNi00BE)~$rzO~Gaag=6b~n+z2r?fgr?R^y25ey{H4=| +z*5a0RQyF4tm66EhJtP^PBqkY*U9)vRLHa4l0OSo^tuan4>*0cNLBG~e*Jm9y@46XI +z6M9xthHYGCY!a7uVzn#5KE*f<(NOy>=$UmVaf2YIb*BR)VPGv|O`mw|k-lt|DHfF3 +zmj&i+eV{}R`Z<;2&zNO5aNBa-{h*lsJp;G+UtAC979it{C^0*$8PBy&ZjyfRrOIIs<>rTDy}5<{clDq+cZiznx=R*Q%x|;Gch64Q +zxZ)qZo;l6KEa(%TaZ-^tO%F;na1BN#=T#=oAPb&kWxDf1o#4#03p6t+?#+IBF&u-Q +zPZD>qN6+2tzU(x=)&keyTa8sNiR9bf80;;NJSsBjTWnH^9$7EQL8R?L#!}7)jeA$% +z^kO4Le1}Ej0>xRu*|)5Tr$6TnQNt2c0%$cJ&$u9vZ5|lologx)Q!#ruHt!pf+(utC +zpqt#Any-Z5#!k^;(Iv9MPN6D=DZ$0fpV;W_3l~=JUX{fP9Hb6UNB@)z^N%CR^xI$B +zC}-LTUkP?P%bs&A4eecGXb39O?YTn}_`Q`WZKxs*(-*!~M)sh~hOL4*H^PnQ9A{Lb +zd=+zU*~h*+8T7gxQWB!-7#1HNo=eWy0tYuE^Xuz(-IHt`d^Qwe2+7&ic2NhaX +zjK<`Y&)j$LBo+j_hA7`Isc~2~sCHTP*HZvA!kKGvcXH#oqj~7Mi;J%4MM5_U>jJBy +zKPQ~EC5utc-#o~PuM8Jt_6?h-ky}T_kkBC+0+bKmz_0;6oWI_!<{Rt +zxw+lVvQu+|JS-D5tTCU=FS1*j>e|S-+xTWZ5t%f1I00ZK_jiG^AmSxMze?Xu7d!qJPxX +zC*EYy0n;#jW-Vu=0e_}5-{d&|%q;I&=Qi`5d(VD>n~&U@oNgEysy&;(41I6|=D`l> +zUNU8uFtg;)_iv)}8qu$DLsVrNM&3X~3n4-STC*}+(_gd(ue1`tZE13C;W}+p93nfx +zAhV;5kkFR>8uEDCuwh%TBDB3Kuf3+D{n31T-BEkJP)DO$N0UQGb7)6vUPpUJN9TM; +z_fZE$sIynC^Mylae`qH)uk%$$=g@rTo1@MVp{_Btt_g>(snD*Oyso*9u7&xocSl`I +z7DC-0)Vfz3x<7|@ujO@bbaa22@BVhwy(RSgr`q#vhv&PY&-e45A9g%Ho`3%5=s7@` +z0=h~8J5nHF6lgvL)=6Prps*fO*oAvIul8^|_WYMQ8xGI~HUY%||5+~3#8WvXkI^f| +z+L6p>^upFmmwRKlWeoF4qs6*zr>=N2+ii&r#tI4K>!8w>UlC-Sb6@P6yBLwr-ec`- +z8!I=-6N`XhTWe6Ga;R`Y+xBf{84sx>ky8)1Pg~GM;&ih@bZf_IkvN^-$n96M=Fa3o +z14BQa*)qhqqHf1Cd8-1*qjRU+05e(5j$I$x;Gb!Wu~}-68xsw5 +z{*+XyEWeL>D{abdrrCJWedw;@^W!1aZKg3%V>wd!ncRm{p +zmcPq7&&Al?=)cLRgAu!K>?Y`+-YV6aEvjDTHCL^IJkf7Jp}$0ANy*p-oI3JZ6fZ)A +zOBaV@+2u_NYSy1bMEthJ`J`iMp%N8v)V15a;sA?6&3tT2A}-I(jAod?tHu+rvqDQC+Ci3XF<=v*F^PM4Ed_3UOQtka?%)?& +zmR=P`41cb7rX<<|U{{oks<^3L0lFci~C=TyUm$+eq9DqsBi0M2L$maGT?)Ra?X$pfg +zq8>1)CqI%%r|kMo^QPYVkM@>iFVn%FoOeYp@4hoAhN}f_vR!5+>;CgN%P||90TMaBd1@aGo-DH*&7@ +ztC>2=QxoVEi-6B{d2yI?< +zQ%Z<7_Om3AVar((!uD)lP#pZvNmZsM%iL=8WX2o=67kSu{XxW<;jLQv@+ePoQrIYs +zVv&>+E2V|9x^{ImHwbn5<>EK<6t1bGL*DCR9-e^hO$QE{GwLi+^$)I#<#V3`T^v#W +z7SobI{??CF9eaH4&OoRz|Glj1s|`NH6EcC1kvs9qt@@;Z7dh;rjPlX+-w1xVJdwwe +zjh1KYBa_Dq@bU-GwWpp$g>Lh7b)#SEw?DeqCbS}zfq|)@?q#$bzXi+i*PJbsPHUH2 +zXl2c*S&dH9%e>>x#27$=%w>RKBz87fO~dP>D4#`tXC3 +z>>aguiOkpjV%9Hj=FFXhs_Bzg1oAI!qMF2cAyl(SsW;~H)yePWaX0VWA1cUyQNg0F +zRHvHOpuuZYdPs?Wkqd=~fz3VM_pf!@3a;3ogvGB%4xTd#d79gqS@lhI&h)|A3wJJB +zId$^tpBD@J8c)e|df0Dp5o#cfqW=Nm-V_^SO5aX-()4!ic(c^^9}P0#8~l>TJ%l%$9IkVJ<4F*i8Tls=w+=HsTIbuf|TkrQLB#iff1Su;1*;sfVyUkjh|(hT#E8sEPI +zl-%_-{xksRK8nprK#giU(n<(MPvpEF5R`ZJvTfM`Fe{C2rMVcUOxu?}PZD3bKo9y-0 +zn7#eqg-c{)v8TolT>0fFTH&(Hn?fsGPHoVx$2+Y1O7`%YYLO| +zpWn3I1JyD7sAsYWN83HXJ;3pN?+Jf*4qtygk-h?*Aoy2CcA +zVew|KS;#Lx%GA_@PO4;UNur|oQw^Uz-#;S9EkWwqA$cNCo^{K9X_)7@k@J{xW>Y@k +zRm|NlPbbdt4TxOVz>%RX)|>ZzXx+$^<^bQ0(U!HJyFDTwy+a4v*84l`TEAw`KknSW +z-R#}v?!z2&^Y<=4g#hod=RCS(V(n#tV@Xk;vb__}gSBBpt4Y@cXn0(CjVp?+ozB}_2_bJQ9{^3`mmDl&>-T5?AUoela +zMNj{tG3wFIJLcgNx;N?^;P4d1Vs39`W8$4NToQ?wN;z-D7e|eW-cHp?7ded`VR@gA +z{&uih9DD9?tvtv3aJ{Os?r`H#%eTYL`hjytUz(=9kG{69*8RV`2Zr;qTRzM(T|D9;{gu8UYug{g)=CfGLwaU +zTH(?+IUo}uoYuglB!UM9WquU&)za6{a!UzwyCXtJ*3)e_OF;*$h^;op8(RN_p(t@c +z#kN5j?_7YbVMPL;%4HIip2^DbNwyAPXq@|#nQZjwx;$-NaJ4Th|7zb^i)uro4~-c` +z%nzlX;0)vVmrhp*gB9v1H|#-WnNPr=OG0C>tx`YaKyVKoxhccmCIsk-23|)}!^E$Y +z2B#uLX}1p!8&tUnen>>9495|4kF_Bzu9YCkk`tWn3yZ>EI<#6|y-Cn37tZsP!K+d@ +z0!>R$#5}rstw$w|@whCXg35m< +z>zeVSciAtW15xjqXyT$H^0m(VfqI(W^o8-_D59#9)m)$-<3y-cRV4zXlb&m#e0j3^ +zrh2q6;lgCO3BF2WxaK;$k7fA=H&`A}YjFacv~VeT#8grP(0~(xjys|^cNoknU94*$ +z+N|i8_pF!MIcg+D>sbfoE`ye%3Y!{5Ek9M??8~*{y(->#L)T0DX9%n}$g0x10E(pR +znS5P%<#DbP@%oIaWkly>lWG#dUbJ`C;zS2n=ljsp$)a|yQnHM`Xt??sKyI#5OPem* +zc*8Y;$*TXF8>F_XM^hO%UvS2y(e}Qab1f6$`A?r+;51)rQ=M!Bcya%`%w<#iGEfDIAwZF&kBq2i&NBLDMsIp7)Y?IT2 +z1E%o2rH}KH?c8P4NkqTJCS1RN06oO*ZR!GmixEk9dRp3tIBr2fI~jb$>rl +zXm$O3iEZ-j{uBaM!#miNbaDRATzNmyB)+l=SO<+~Ms0{n?9U-mTWBj<+H|hAx3zzS~c$e~r+wW}ngB +zuWvhRiINxoz_Pk;S~9fAKixK9cLHg}r4+EYja~V8W_2s#SN)Ge)~pvFS=xqwH_Y%j +z*nYSmCEM}5%OWw)uFgeqzjz>Y$*#qEcwzX*;OXJjtP^x~Hfu=E$+3GSn-%LFkC0lG +zywCCaQ!8AjYwp`QdS>}|Z93ixdAc{u-uOLa>9Dn)qceQ#I#cME$lp)qBj5N%n_8VV +zU~6A^>WGSD1AOGw>zFb#*;eQ4d^gB?*cLntLUjtv>G2;@&NKfqL0T=bGbk*T#Q%~% +ze?=hUobP9J-6V)l0NkdyUvf4P*HzT{f%oJ`r-YW3vUneK&s!diH#vIdFtg^Ur#^bO +z`!Fw7eaA`(v&>R7F`M^ivpE)X@-4Fa$UI?gsrt{;PKW!p?R(8_a?QSyBms@MUs)oXuv^BWKfKcr7b#tG9*zd%6$(Rh>CrLj@FqD0PSMD +zTP?PwVzb>{9b&@gS0js0UWOw-d1*{OV6ly2gW|PyW0Kcgu>0Z8XlyXmJq79N!{<>9 +z_2ol^M%nr~5!prfy?k3ElSs%#hPchic)_U$E^8qT#JHn9XZp3+C4G-p;_YHITE`~d +zkT3jwOlSx(QAaB9RWmwMKi~!`un`?#8A$9#SWeo;*iR+&?k9ZYNi1yj8Y0q2c411; +zWTyf2iGILlYh-OJ{#K<8x+1&}>2tO<&ZISZK{+{OCus>vP@73N&=!ZW1$w82{3i-DxW(2a};AkX|r@ +z7xTc|A)*{G;c_amD#)~}cEQ(X@$J;K^8;v$!9c6^Xy@20t`hp9R0e;EOmn+T2tGiI +zKV@Sk^AhBKEQF|6jYi$g$*r)EyqjhE@b=le8N|Vu4qMtv=Pt;@D>FN1-8SaUj3=#3 +zd&wsowCnJoCA*Gf6%t4OWgGc;GFeZ>#z&cSQ6}EhCL^rvPE|JXVKZ?KZP&9$^hJ~N +zFuCj{Y>o9PQ8r{Xa)#QK?4`*EU;$D!rF3xm*6nSvO` +z?6W7;`K&-DY28pLVc!WxFfo`IsqIJ*9qF0b~5ycWEf#Q~3LRcj!ZJ_vi +zkDZ|&$`0eS(r+Of93?0fb-Uk*5}fg=C0b-Q>6U@FEF$!V0P)fR%B&dt_%oNR`^^n* +zGliGN7mESseM~+!nT~6KGe*pzqGlHXCabgTywPS{sV3GT4^&vi2pd!W% +z@Or{12h1D^FwPe@?iDvI_kql8GutQ_Ply|$imSOstBO{tnMw@bUal_lG%RzesjM+1 +zw^wtN7?#8vKZI3hftgF&YqB%_a5%8N3n2UnCeW&)+z2Y1*KF*5&er*YcXQ +zR#Wo=#EhPe3`?V{G*TFiQR@_Oe<>j`vE0)klBqDD%5oV1QoY&X@ +z+w}#PEjqRtQymUdZd05LtFpo)Nnzp{O)k6gdlT)c8i2up(+Tj4oZ@1@oiNhZEPYJ1dHZB5?BCBRYKdw?|VSJvBI+xMjr6#kr8MJz*)< +z^@h#G^uxBPXRB!7q)RL|5K8ho#m%VVGosP$+M>Rrs-(;rKdUh*24>i-|GN85r|(3a +zi8?vkhXah6zoCE2yz^<%ESMG0x +zAOF0$h@-PYF#JI^66`ea5x><0!NQbCdS)NVMR=_GRGP-gSy!f?m|`QxzbdbKf*uzT +zOoJxTp%f$)e4<#cLA6TbOu1;JZAMqlc=oHsz_aqXP~8%U;Lw%?3DwJT;=iUP?n41d +z?w8zv0%L@hQi)o0xk99RIf=$7j`{r_t`h!>;zkW3)@}xXGouVCz8Boiq~cDA@aSbp +ze}LvGd=${7fxVl2C7g@3L|3RU!<<0A=FQ$<*OOlBIohxUe +z5SKSO6|Ay_7ECXgt!dbD)JDk&L!!^jP#|my{MQxDh}>HV&QsE-&*1eMIk{bV^BiRK +zG&pJ$ZF^uwOA{4}S&_m}O(+QlWYbzA+NJh!pjb4yase^T-$Xer*IqK3q~b`1xQg7? +z7xG3@lW7x?AW8lK;7S9)pZ3{)^3kU?!jhpVI!FlxEE+>00;E4M#oB5Mf7k-nhRD{Q +zR&aIJzcDwKyW3P{{Mggx_1skZ?z8%&$6m-A^K&P2yUne`kG;{a=ieRfwoq8=d}41b +zeE2_ws?^N-Z&DJBN^D3(8r>1Rv;<|QHYB5mK8pNm?bl%SOT-$joY}OcA_^PQa=v^N +zmm&|Ec3+^&Ph64KC%>|4{+Zq&?{@Y9`L#xNV@{k=^r^qse_NvezWzIbbwlaSS!=HT +z6}F+fL4wQ_ny~HQEe>*7Vao`m3XkghAlkhogbry|RdgHN&F=W)$vn`4gXL +zr0wB25*}Tlq#l3M;gFez%&uahA+t$`6B?=8H9v+DZEOU`pY}g3m$|>GQ?x8lOVOoQ +zHI3%qhtqk5#j8NsBUx>=V1E!VX{|$?r^C_QO!MyFej&Hf()hru}LJTncnFevf#aiMt0EnCIV>xvJ>~PBI%B|RmN|T)$2d9ln<(J%~ +zH#>D|qU>hR+h<;e!;}Mlf4Ol}Yi8ihZuMXGwT9Z9CWY{*<@YzUf!@(iZvxp}dcL8N +zp;LDC=(kdE< +z4v*Bwr>q9F?uFa6gubAWl!`GNGZ>gOz=wiM$O-Q$###8_m-qs?FhMlr1#Szhj?+Q- +z{v%og1XjXv3#(wdVgepXfK>qp+3}`Q3}+GH>}&WxIDjAu!;6Z#pMxJlVxc&!L{;FI +zy+B)1v_~aQh8iWmMyPZFORTZ|L}D)e<5{z^A$u4a7mh|x +z(iaT?R0T#;(PC?HSp#tk$gs$&uoWA0<4kbFUcCQaK*vmQ4nKh~f*0YB>&#(z9vjny +z2=nAi^k#@ps!DLSjeo}Q*RrPT3r^aKn2Ak%R+%`m7SyL7Zcra3!5>iqjT~r+(XtIa +z1b}pulWiG@!%A`6q`*EZ_OlJ)6BTPl#22k5AobB-_`}BaQyr)2X@^Parrfl~lxa)K +z;7p206>yUwnn4D;F++^VNvYJpB(23}p;9{aan7jpPGlPY8cvX$@}n{pyO-uHoh~ZP +zRDWX1owOB+n2AHJr2`C7T4MoUV(I&d_$6Yn_&RGN(33XLJPCjOa;H5G_-M`mjs1l^=! +z&l6)TTEj#U8Cj@|7kmk~4Y(f_gN^M%o7eDTRO|#I-hMFPWor(%U1*O+*4O=%&X!F6 +z84`(sVGT;g&W3rd>XB29<#VWN=0>vdP@Xl5TANv5dQff`g>Jj<{BOpA8Z-S +z9p08>K@F!63rLt8-rT$o!BHy)VQDQnr|m<5)kSP`VG{PF9|9@TcImL0GHWI7CI$9k +z8#Hdch)20hgv2c8i2>W^utCeTUxgxr!v_JG!+hmm`O7|$%P>kM+NiRxGleO96&9olxlFI8*B`Q +z+(NeH6N9nb8x>H4YCm*sJ_Ab>DIw?;M#?_aucpLP;Nf5mQBUKc5bEIosi0|&telH& +zQ{}cM(w89$>JMs51jBaY%23LsqE*$t>$S_|lmS`tsA14}Ze>d>`9|(zXf<$@v6Pua +z(sjku$Hy2PU|!kQjqR0@kQHYRV??R7nDx3h(wY6Q@?H$oalWdiBhl)Yl`#WkXj{#T +zfqF}ZdL2CL@3{bgJ;Cm<;aW%))8@ln)%q;uy5$pc9qWzqU-rRXhGClr)du^83P+fS +z_JMi|AafsX+z35#wfaJcW}Bd2CA_z@F8pHpM8> +zdr^aE>VhO6Zu>39h&imVP^8Ff^l*HtkSDejnlAVnYa)~rm=NS}4~Jh*vti12sm-Lw +zVteC*&~n&dhe}yxpkr~8o*CUu0Mvm$<#s|UJ|XCZCoU}^n18eCbxo$fTC*S{sm>D{ +zHCwwo*Or)2*HVRh=+J0xN>_v8zPiRh6Js6bhOT0`s?x)oF-OPi>mV1)#st0QZ0& +zCV$}D)IfMX1m-jF;L!uW6Qh9-%|67$fsmnr#H-%t0aX04Z{*d%z=&(Y|R51KO)><}Qr1`mb4!hc^W2>H_KXxJQBCYqLgD6rf!dC=~8>n-TIY&a;ad +zMS%>pU4_w(;v#}x_ZT6cH4hC9A!+ThK*gc)FUSF}VVmNi*)K@inOtoD>-R&*GTcy` +z5o~M-S?&UHU1E9} +z&RMZ~PmEH8B`_zZAkJH@K9MZDmLYdRmY0{ID*L!sQ9P%mwGiNM@-ABHHG1v@_R>ti +zVAJ>+peEhs3wtd(cA|?hEZWE(_mGq3apHNV%&Eh!zT%=U+^<&;RAU;^(0Zyz?u(?` +zG~N9E{0tEn^=w)ZclMFLhB=i@QNyfSlGF&)W7&);?ynCCO9OUm-q$bzvK-6(q>4El +zdD2Ra8uvu=7VL3!xvgm$=R=`A7fkDvZ);P<_r5%R4&v*4qYB~cG66naIObwaJ_hu> +zHPtu%sahdk+H@0mISs^Vj$YhRRhOo7Rqg&2s@MR|h1F +zIv;b<8?~O`!E=EO;fi1>+Y+=11GGfDg}{)k1M?#KDw});F9Y5tjW1HzLct0L#sV$? +z-RI=-u4EQb+|M@SC6_H+)5BUP5cid+ry<5h?=?ccII(@yl+XnDs2RuWG%|kfyz#Y% +zQ*%V&*vL0OwRe2*#@SwBVK3kBzb?%B#z7}+*mfwKE5qtW;`MWYr4J|nxYvBH8F6nH +zM>2fXU&Jw4hFv(h!Bj8&=Vwdv>C3Lr6aPVorzv2DqaG=N(O^H5O37{|2rJGk(&;xR +zp8@v-@i;O{P<-fC?r%A>DCn{)@@i$d;bGCx_l$T5UcyULqoU8Jg!YHt(k9Kc5^fF@m46Di2L1n*UMsdkB>rko|uQ3S6?wB0^ +zhXA-vp%5=*yU{%b2nbfw;~A7e5N~k;DXOM?onasIAdM*GF0r_F978m-iRrsKT=( +z8tAfV0O^EUEA*hF;6-CjP7#G@QC|3)#4tQ;Ag>&1SRW;;{G@>IZXd^{!U;pECLl|y +z$(lWu_%?n9&q80{!?JHibk(5HHU31(*A84^oN>*hFQ7suma6S02rP`L0EU1ifb`4~ +zLo~p-r(XB6xADY1c7!I4^JrR#Wt06FvdMACKoPW}G^6LJdpLscspOvqZ +zS=5T@1ZI5SR;R-0t`RkP!Z;w8QhNbJkA7L+;Wa(AxltPcs8q^$-OyofjAkq=Q|elm +z1AH7SX1!AS763NM%N@0`eR)Y|*wc`f#8f8l@QjRX*cais&0diVEym$~+=y*vQ5eWmm*$VoLXHsr`@hgdNm)(_<1-aiT +z^~N`#HgECd4W@JTaeVU4{P%xOcn_#nsWF5+;pGHVO{&$mZw6G{(s}(>SzeQ0=+`cs +z9O&RT-_=1^ng5;IxUg2zQkxI7)TYAQJ0iaqY6idqnrZn==;9`UpCWUtx!aic`wQy) +zk&OM;PIHGkn;(DZ-0HPG3)*kj`)%lSh7YW5{P%aa>n{P|lEAKxQ()UQwIy08^GN90 +zcWC|L2jKfX8SXznoTeF|1dhEy3lUb*TQ%Y1ZwF1Zu%`!#ov+k594m5S#Ar~YJf6@$ +zWWd=OUH}3;HDEPDnqMo4gpDp%kQ8xoC-7$P*7Iv2D|xqAtDUb+JB);^-YDD};{$x) +zd4PRdM4-=c?mmHwzsEj%s(u7o3fn1&fQ^(n1H#TfpQBp_Z%5*GezJ95XCDdF7;@pc +zq5%N^huRFs@SGuRDF^$K54yP;f~^euJ{@*|4jIkC?xSgO3jH!)NNy}(!6rN_2b+rw +zBwLuI-2ZCs@{P{aHS!vkS! +z*n7nAeA+xuC4SeBv{446ui@%3_@2G+5)8f^8b830b +z7GBI~L|J(rYjDrlD>`gjeyr5pxWkD*fu +zvqlg`_QHiQ(MOeWDLELR0OmbIniC(>-^2z$yq;$KX*b!2lzO!_m6aO8B@=uc8xE~c +z86pzLR>1{m{0kD6-XMtwlWu(?#b7r>kQ~HA0iZzTFycVEjcpoSnU3Z=;|6o@{Bvlx +zh=?AON)JXv>`teOV_?pxSl6|fqSmlrc#4g3Mi4)<83u3O7S@j-SjUAsQ1K3JVNSt# +zi~6ub94pOt*1F2|vp&Ie9eY2&R3^c$%Rs#$e* +zq`t~v{oGiC_+sj8`J4TMtI!hCbQtYO)tFN1p#XeRogyq)F@~wKn5%3*A>;Kna!<9E +zIVqKYfL3a$RFa7FyRl{BRrztsr1vr7U(B^_DkCek +z`s!Nx>%BU0;v)i?SvWo=y*+R%xX!b+hBWZ_%X&!=w6yzGeac><#9{a$yjHfVL{OQ4 +zp}^Fa@~@BP(3Dp$n9AP6aKJov5ME!?Qt^VZL5o~$gJ_sPsi}`Zha5;h7WM=y9+7!( +z5E2Ay+sOt2yuT_sB3s_~I +z_QlfQl1tVz$O!Son&;-NiaZSb1Q(wnJ +zhFo#M(yujWgBz8jp0OUGRoC;-rh+xs4eRtnW6P0X8dBe|I+n-c^vFUOm +z)?7^p;-WOID{~X}!8l4cRH*BVQkVB?cOqZ;to~EukS5Sb*PFeXTOn1s?N5+$_~$c( +zmxA?^jOA_#k+bt{S4N6-e52Ht7$lE^kh!rFb}8P}XIe6Zy5R24ghHmF#C6&5xYv;h +zwD+XYtK->pcn`Duy0F=yIY$`;KZ?~i!>Qw*zjJ9-mQNNws!~7dBk>W~~df?Hm2+Lpi8@3=}VAa|tag?gJSMCl^9+imD-73gqFfr9tCE^K7m +z%O_m88g}au!RkxU?al^#LQm*I(wPmM=E>_wmoGu+g=n|1#~xp5KMI8$s|G^YaSt51 +zZ%TPfsL`!0v9uM}o|n(NsoeK&v-wkXxrJBP-J|q-^UrJY+ds9p$A$%|qbT_(?}3h{ +zW}6m6p!bJehVWFdAv{1E`GBz!#6Ru-4dT0zJ+yD_!?!wgMIT&)8yd^^KELHZ_2q#q +z&VT;V5Nvu#ea}Zz>CIk{?}Q7fKsd2?QKqB3U8uLkqe4!1%j;nx +z_eVA0V`A!~QdqAc4Hzw`fsBlvK3PPe*vI9Z#?G>MrNxZPu`sK#c=4Yd&(s^g!ZIdk +z44eMUWL1BE(VFKP#7)ZsX{v|}(3>>#@UUKVw`Fmsr8N3elNOLEJ!|Cc$L?-P%t)un +zyN{>tIl0~cJr&41trLT!fvv7)Plt3(hb>Nr|DMLtj1}rLQBE^4*coEMOnlc&;^Iv5 +z@0tHS)N8K)5A|C8e{!P#emPOt?s}z#0P#*TV??j6!5Pi1k$!d|dDAs}4)C%bP*r?H +zr__3lBVHvX*PUMI%!C=?${pI_kA!7ZDuVTNRPP&LLHLO-kWCl^CqUUR*PGC>U|b{V +zyz>>9AP%(aW#wNlYOGYOEX_gUBYI4XoHQ$aET57_i2=xEsF9v4WZ_a(qa}U)01G>! +z_MCX@8#56247Yg{Yfk-z*JoAMfbnV{Q;#f#yy`o?QNDbb(_r*3-@?h1<}V@GwQSRL +zmFf=@G8sKvM*%&;H<&aTMKqRPvdu}d56Z_?`69qy*gYFe1;DPa`cF+WKkjK)gn#Gs +zuBW-)JYQ(WDW7{j#3g-YLF4JLk^Ye{HG07X#?XZ`i)J^H<}`V(_pzKoNTz&#duG&J +zZ9tVB-g6U}>&VI2z>zu +zoN%+Q9XB!HphOV!`!2(E@Uq6wl}jv|qAt>ne+(C0TK!IkQXiX2#ZxiiQleNKj*+8P +z2bHx1PDii$gKg=UGCWt&@Sxa5*%w0zUFg;vqD%t}yP08wzqCBdz5N!&`z4YxwA +zMANiYb1BDXnNwl44py;jV%sh +z$HXeFbjURq(Vkc^RwaWiG_+SO?@7iU*L}}DW?)BbRYVq!g4;295sGpHSkYxRnsAtA}Se0y!Q< +zDyVg@5BTC)UZGlHyWLG*Jm$$b95LW5vosq|zvTh#WLiDFOEFB%$I;Vf?vn0ilm +z2xC;7$qF{zdTv)_TP)5(mB7pkJgN6u#8?p;lEm6cK+Izq&nEr~apHQ%AnxJ^7v0o6Ic(IIybR^uRD@-gq$_onS3 +z*B~nfaMecEI622JG6wq%p!-H+vmMFN+9KHU~XPP}QKUE`zA6K1_LW>kd>~^`vF10nPS~jM7NqLfdRgb>yVS%cbFkTav!&=!N?) +ze=RNC1}O=sFwb$Yu0#a)6414J$K|$3+EJ+jhOy@>c|S=9`hg(+_r^L!(uq;3VYzy~ +zR{ei#M*}z!r}yoeKb+{V9nBBmM1SmP@77ZQJDO50x65;4lOI#=BL<=$CX$mAE=v2( +zy2R&CdIC9~gAYO;)LvQ0pWOE2d4Hr*9Z%{>v{6_Z2)74AM6qE6@c +zA+s2#VM=a#V7Wfly_GkCtG4P+82DOnJ5tqIKpvn)f6)^2rd$@tV%J-9xkn>a)=SB7>=6_M(>Al~wnS61Vy>MXJIjoT +zP}EyF=k8ZGn%6|a7B*HP&gOj+w( +zQ#HB&_V%w2`O=Bqdw%|KI`U(w3z$d-0Z#f~Owcd$m?e_@zVVo}NoczkHD@q=fzu_#Qm1pRbGoMY?er_gv{HbX@0P_;#y^m2i4_7tM?i +z*Q`aSix~ahbYnCX2=RDK{fjhh_&u2WCmSS)7Ler4aPj>l8bL_WouUF_w&P;vEpNK3 +zFfOT)At#CcBGsZXSaC|&7(Xs`ev;~2#tNN{J?fKie2RKn!n8!orvK$kgndqbI1>p? +z88DCIViWHe!@>!yTayuW!kAy?@k(e}L>cRoYh3nZ0*4%vRmSQf#{e7ESBB!0rXsFP +zDMQ`pK5SOH7@WaZ4)&3?$E)k2APoQ5JB`VoISCA6l3rO-A`$wO9MhjMdr^#?uM=s7W=m +z2w>8vVoGf<^nA-?#)4%g)+b69o@S+;VB}86W0P+ZX;W^Qc~jKrsRY_o%tK}d0u4Vg +z#2Vy8`3tEw)6{>o6HFI>JHL0MaKA^x2jFB5^Jr7n@f<1y1LvVHZwZ$#A;)<6#NFe@ +zJt$-8@FIS3UM4V!S0Dbv6KI+5;e<4KovT+Q9KE5u +zL`hz^b~pyd-ZGu4s}ue@An)B`^t|v&3nBtAd}++4YzM}(3FIG>Sw>|!L}B)FDt?GvJqS*R^hY{uYa--3onXCfbFc)tjSlfWm)x{|KwCNFZP(Lh}_b8|B@}`gg58xco-U$z^b5Zhhc1eojhBZr&F6YlB@`L`nIAKmVIMS8u9Pp{xwAU2#mDqZYyPPo$Sc +zMCu5*l|3}7xjbX3Xk%^xhREL4%+GtyALLhT>8|MauljbdGO41fg?V*wvS3U3jahM7 +zhY$CcF`R8vmC+NCl}Ha>imsW3SF^8eSK&c&-RW;lbKD{#XA|jSohu)6 +znKMg~YDw`Llw!|6{06s* +zor&?D6@0}W!M%fp=OeNkmkNic1dBFTe>6w@T&g^}lfI7%s~Uhv7u71<$w~0UEiF_J +z-H2WHOYdr@Xn90phO10k1Y28DPf#LW?9AIv;jfN}YPPxd-kM)h5%n83v7PK4z$9z1 +zHYM_cZuxde{kP{=c!7C{7T&qan3}SN$3kI0vq51chD<^I8IROn-%US-t+pa##N52;?%NY`)*pt^yPeE!;it{?!e!e +z>7q&cZ_G5Vc%vmkS(iFMxY5PD;W$)tN>cMSyt%Wy*(9mugw&>D|8t=uIyZHRe8%SL +zlMfVKN|m!sbrKWCqd<2|tGz0d-Atfy{cd3WD4(`n6$hle1s0(qA~C2og{iiB*sUDX +zmIv-FSYpdn&Mikt^%w2D;4?KGOPL7WbjMeae`cZ7*;cht&HyjfAtK+&FCGqRC;5uX +zn{Q`mb35*b2XZUdS5+Tm)&5LkZ=>+nex!J@KrVvlEAkpmGUH(Fi(I&a& +z&d&Fug~s}C0k!q6?I#jDQsK!p0nqbjM6vhlKlh3 +zh96s(eLM9`?lQ%qqTzcAxt%*H_dXviWw{j@BsHIqR){hyy10|&yYs5h>zW?qQdNy* +z&!L%4d!m78(q<66(!cG*`z#|QAUw;TTzk{VuY}m!RlTjKSNqzSUt_j*_c5K8a=l2B +zKgg`C+gXLI$h^JT9I`esCe^d@^N+h-NP03yTzjbMzHRmmy$B~;5pW-BGD-CQJ`IEE +zamUg}wG+!e#=MwSe5n;(oTsiZ7!orMJ8uoq;fG!73Nx7uBWjUCU7=d8{T7xo=e)!G +zj3I`W{mWIM{>*+`K4?=H@OoGXvwstggbWVVh7Ig79$4i%5ad6gtWVn5*soR)s+2Nd +z%kS4w409TM2q@ofqTmGrQq%&;t1z%i`q!K$kOzWD()l29!@1Ub$P&UCJG&rah-|mq +zlje9iAk)+CB45b{GClG*(PJ;OpzU@~=bt_C4hHp8pZK=PdyYMAzlYp={RvL=^l-t` +zeV>8?KRt1cdNvmZ6Pky}l0p#18pOZ)G%7M*lju4ga8+}NQc7eT_KzP6NC+k-UH4D1 +z^XJVEMQJ`KyF3p${hX8X{Cw&2{7u@y(hxNwEEs8S8)+RIx%G2Iq(9nWH+tvr=)I^>alvR$+i36D=!2i51Nvi+?8crP +z9(xuwHdHV++%`5kHul2j|KLOgFW$Gk_&D}r=I4u9{g+?uUd|nUIUn^>Qt)!A?d9^= +z%U?fVf>w-!?Z=@<#^s{N;aA4xZ;c~gj4S;bN3D3JYX3_8$SX~y|AkHeBVp5@Epg(` +zHBb>$P39z_gBOs}d1sxf8|a={Aoh`>Y;yKB7#g1iYFCyO*0?HK*QvBy<}FL%>kLBl~gdU;!vELv&;va;W{Hj(ff8?i*@Ha)+Lx1!nd^^zXnIg;mkkQ +zvvK3?dgn$3cUDuN=}w?`1E}l!0eT~+>PASq4j?OYSIGALh9+{>RfT;;lNRqDn8{|1 +z&xqJah&$xQyiA<#{wIPRy2HJXtM>w;F)snsANj!sfLv+V2DyiFYX-bL_uKio2~y$O +zjkfDl;r^LH;MD!Eg540%Ud1;r=9AZsEsoxY +zgCNe|_JLrpCQvZ!gnh7@Y~Z0sxTi$rGv*yAAwe0ozqcb+!@OkGyypx7FM1xZAT?Xp +z`=KgxmrAY?9wHaPh-w7%Wi^PG(XL(}^}6hNv8#gD&8PxkX}01mPoeTUObKd`IWH&8C#(f-I8^am_RxlTo##HJz;1BC_#d%%`K +zh4hpxIOr?Z-QI$SN@qE(G26sAxXjB8ofGL?w(3c+s$s=5v|(gw_W>J*e*C~2Bj)#R +zJ4t`GN_iKi4{f*G@mQoNnPojNNd5jPG|>@%Xp`>VR5M|rQp>WRd@80V!2T_0CFYpE +z4oJp7>^wRU*h8KF>Z3fel~Nz +zglfNfQ|j514)DgIc&O<^7`IO*`{P=gojE8qZtorFj1@zNKEQxljmtfs74;0kHtd&YAZrp&3{Qw?W}+t*+S;yG>IP8clh>+B5*ob2cdOBCq1FN188KA?aV+DiWJfC4utbD=9kb;RKP7xT^k +zU0~B!#{iMirh#LyAObbJvG<+XsyBaO)AGCZd5gWo)1aKe)`=}G*3!P9)Xm(No|9%P +z9r{930cln8q-||Z|Hb**D>9!>J4hEFFkEU2yL&;-{C}mEz8vJ+4#GB;(KmDu4`Tv{svS3 +z{bu&-LGi5)EUWM1?o6HM^+zt%Cg`6b{>asSyuzaT`mPTL6}F8CqU9P}QsjJX0VPL+lZTL9K7F(yO4(1l*T&@7A{!Zs9#Ye}XmY +zM&HTqmUNs-{mL5An}jJh-3xzlUcJOY7Pno9T=NBdS?%YmwaVd-E_gxt#gWievrP4M +z4{2zWNY6`UqP2+cuOEx%b6^ZDk5!VfZn<9j`ER2??>15ja@+?}4> +z|K`X{_yF~z|Lu98jJ9}jcgI}$)%juL6+h0CmZkGeFXyrQS1i*9Y!?=zBW61f&%H6e +z{c=p1WMy>X=TnZ8`P2sL(3_*5$Rx@P4D2JIJYilc!9$>U52s^b%>l?>bD3_}h~UBK +z8Lg-!`DKkul9NiBC?;%h>42G>6j|QqxUcBg);xJ8C6b6M1Wz%X0Nz2Bd +z;RSk-7W{{6%sxVFB}v|e3NmC-pxAJUKNL9ry#o+U5f$ekV>|$>e72o*HPVVppRW66x_E_(3-y|gQ43pYL;rufY@W*eNzf`OMn(HIot$dt0n +z5Ob3c^`cKi+Mi;XwUfHN6e!?P<1B@7L;Q#cNz{ySbchclh!w%*GgeN86|f>MhyigI +zbDRSU4T;%6WLk5fRp>+(DLxmS=z=ADA5FhxXOnPCNn9ZlWbEEJH(Ga(k4`R|F +zoD>fF-7p*a3z9H2OLEk5V{)q%sLzVJjsX4Q6ZMIenmkBHG=&XNQ%Br|G}^qV5#v;O9l +zItJCg;**behj&vs(b)J9EGu$~**uliW<|XQXMz%#r*p^*p9GKT*k%8mLRQYT1V#WN +z<$-Z*SYy@>1Zi_f+Dna0I}t2`$_Y12V2cx?db*R&nUH?NBTVuicocRC4ab|r8Z;$*N}#MI +zUJgNDEwj8z(nf-GNEO{lL(lWE2*v4X=C5+vL#rY%2s}F#C%Y6mEGfwT&j^VZPeBxe +zXLDPH+2h{a*__z0fRwdMc^}GY9quuf*6fm;ggp_l3X7?q15&mwMPf`j)&I_qP$dP- +zt9uaKLN0x5F+M(rSM8e0ffpNbuQ>^?&3IpX1}}(4%bpEPp&z^)+FYVF9e1mVY$^?b +z?HVsW%`2S{mu_N#Vk79Iizy!6`8`&-IRS+cJ89dy(`m$b2)U4!P_nA5t-V5K?FU^hMqT_S(6a;X=ikm +z_T`?Qa2NCHeiH8)>pBmX^e7>rtdXZEudX7<+j1~!0CyvPXP##S&{3kBRa|RMjGBnZ +zuj5v;nIx?w#us_!K3vHbxZ)9U`J1WoeyuBZ=2z^$a8L5tA@8G~nZ{I0(MboIYw~jn +z`#CkNo+3TJh?Pl{%{tsE`G_s7S_wbBy6k2@DQ}|yg!^*&*ukjfM9QxSI_OMzSYoDH +z5~EB$`ulT!#KA)0U`DdG`lf($n4z2*PW@Zv +z&3@Oax3ID}bX{CyymC@8?tadR4+(lYiT*9*^Odv@OA+5quSF(AEzKehP5bQ=r>VN} +ztbLlcx>s-lDsoM(>yYcKVL~0O@Cv$tVN>fKQFdLYCegIY4bI{u$j(eEnk{BHIV7Lt +zLo;>B!+}k$Bk?(u*oF63@)sMj{hLorr!YSVm%SS@%UU>PwRuvjx-$Q?jRP)@UsweK +z_=}{h2Q1-)q-buk_K0uu`{7pJ0w>@4CPdukE4_}P{vnk4oPn1(X=QF0acZicS6PYTFbi1VBI0; +zyDh^F>vy)%n6#FWtn#F6YfnnUnefBoipi0AqCxIXm2=;K@DcCwX8idL%6hg<+>VlM5|3NbZT1gc{{8{qQ1-hIX)P0PJ^NAF``y;jD(2Vi|a +z=E3O3UWhS7p4s<&UEdSEUipx|@z!&!Qg;o6EMsjUbwX9d_c}K +z84apJxg4;;bGFGszSiPwnTxzXZ{Wr}(tBV)>2M$s^-y&zP(*mBl`^0^7Kn^`SUQi? +z(SLMx&Dj;728u%-VS_>XyB=MWL#~&mJi58&@in=J8wwt2^aTs?GWvqEpXSf*cQ|zt +zuec2-dm@`i4D(NFc;Xp+8mNvP*L>>l^7NnJr>Eneo|+)~j`<(@`Sj4erx(^dOOboF +zKlqt{Fmb;C^3*)w@|shjc7qoW4~9hzk_-O3hLYfaVAHn0qB+F>ist@Di01w@)_^3^ +zA2MkCPnJMx+IdKc7L+wnMs%X6Vg8m}() +zT;7IeZR)ad!%4^6buSBOG8Gl4??N@sRc%HZv5}8)h$B`Zcg$A0<*xmQN5E5IA{T{q +zsw+yyaYka(mXG_?gVI4zl-t_tg4OCpPG$~M#~w6z?NN1Bxv*?{w(hpl2l|>_1omC< +zN7L2H%Bov19j#Dhhy&sTOV!cvsNTTmJqMqJbm~F9ZIj_2de1{(wX;02t+KK#NKP-y +zJmfCcY12o=vsQ-Ape+pKeqBP)BWkOOR`M>y2V0DTq8~xEws%`FwOw`fc>d-d!!3eT +zMOA1zWQ`H3AX9&T32NriA*a2LkR68%w<6Vj4)23Da`!q89#)QZMxcf=L2{nd6Ahl< +z;pZJ~{c5Fs7Nhul6hlX4^yed#11`PpWwdV4O%r1RfoT?2lXReBpSQEx{4NG1!9cbt(<=IB7#PLN$mM?P65+ +z@V;(t%WgYW#X}jL%jk9)Io`O;wdr#gI>6vTq{j6P*UhzB~aADiKxzpd=Yw7+J2KS1}+zqmS`EU?#d!qf>7l`XrVT$7Pv&Jjfu4@Lz^fljVt4?^*MyU_#1)&%q +zWT?Rw6dejkSe-X1f;mHU* +z?LWm^gxyRKK@m-)E6IP^y8hlyur1ovVde2uD*{2+0}B}qwMxFEZUA@U5}h}pn0_DI +zWlXrS+GCRzM)^4SDYXFSo`>;%_(Ii4^l7V~4NgcAu2dbt)G!yx?Q8vXwc-p_t4svt +z`5NhYibM!`!Djo48_?$?)O{O5)5@-Zb7pzRKoPF$}C2)S{w8IIfVu7C|Ni4OV*}%WWk< +zPuKxJfgqaRi{)G&+G;S|h@h}$=%ed|3TQc<{zdoWN-kBeP$x%j{P%)G +zoACcZgtGXa?@T?kwf`oOPOb9;l1Mkcs-OqnZ6f8!oKULGy&g1SaVA{a7gBmNZ&Gj4 +zCNuoOX_wl9K>!Z%!}|#{wK-BG(L!OV_vq)zq#I>OIXQCH# +z+Emd8pQAMG9HoZq4u^w$jvmwKK44LKc-of +z+w-+7Bu#h+k_s#rJIEpXtvZbz7y{onc7XvCpk)(UG +zJwkgO{hirWEBVs%vpN0%R3ghfxUk|&qS`T4d%m^G8uN=q`}Aa;@zmbT)KJ5+j%gd? +zrYFFS^Q`JauUDaI+e-)uN_VIr^vRueVD`E7j40^OdFF38)coTOkRyr*%|BSU_2cK~ +zeO7l(K6*(y?nlkSB`vQtl)>vp$7b35mhpoi`dz;xpd}DZ4ZKkzc0Dp1B5a+K1u-8` +zR($P-GhVm?K_l+3xwzk*xAus8wYXV<G^kvh@r3Sg8udD+u6w5yA6H}H~x^CB#VB`eAb7;&FNn|V-{ZL#e5bYjgjnD +zSz7pPhxj`9%LM!i`a?nV{Ub1*V+?N_1kjG`doAh(%ji9L>OELw2cJqG1phJy>9Aws +zw8DOKBet*Gzj33XuIL|wF`r@JMkY?F^1<4#`4)ncYLf?S(MB8x!RO +zX_1D2A6ly01BeI{Pq(5ltRmn-CfkzbSr)z<9=8w8>>h|Ebw$l*$h%x?Fj9NJO5|NrZne>_@KeG^>$4`Cj9n+3Zp>yS8P!QkMe^N#` +zV6;d1l`wu9nB^HrNz{s|#HM{_#+1SpPjTsQuqk^)Nh*tJ?;7I)RW5xfb%M#pm$4_h +z*myMjB?u10i#E!ttKbsI7}i^Vb_^$d790N2C)U!6sUc;>Y;K}H5XQR-GntktDEG8A +zWO=QCXnIJ>BRKQ(WV#bPW6U_i$tNCQLR;}xxIg)#}w}N$T!d=bDn{IavYWq!Z7h+VR=+ve;k7m&tAv?g9enu%(ODzVea}m&XLa +zGjBHn;4)q$i5|9Mlu4p1;BmiZFW+KDBdi!(17Zfa*_mBQ&)jnP<~b8xoHbZjIFDH= +z%$BwKZHWKM%diWHyD-g-989V#%N>Kku32SZh4C+~7Ei{07%h{R57#?Un7vI$K&AfMEo7(JA=p?-Ko^ffidzE=v(MJyJ7pZ(Cr<~Az;~S}>3U=KL +zd;Q*IM9K3^Ic{`W`Srv}9x5@fIU(Y;X~f%#m|w$DL#8ap>C{c8vMCMGE?>NXVw!HEVp`I`uRv@l;98qS*W%f^HKbAJ})mpB_Hzyh{ +zchFsei0Og-r5D#cf^Q>ZPJz +z3$;5A)Mh)Cgn$Ejkn$YmgvMI^C$&6bzKizf}c595UEU}4g^P@cJ +z5r{@|78PpnfzcoH8gB8Gsf5caJJpYH^H$*)GP(Svkt;2gF$%pAHi)=W9(5{*vhMg% +z5WUPL9R_qX0b+i0FtAp$r|z_0$A{_LU~|aZaIlHHI%Wt`V6OOVIL8oKV|$1a+0*h1 +z1_s&2-V9)Dt)hf9wT<}PqUGJK;nZIh)^BUQzITWE#`F8U8ZWp`_Q6RH<|F$b+g>_E +z>N^VSH*AE@nfELF8#FrY3YrE5WVi(A<7I@RGyNfQuNwlZ!_H1UJ5vla)~*Lm%s(tD +zJ-ezcuz398nu3QWV*&cXk19b>L+Q^)c?TYCYCF3%_%TuRXyvCz7S{u+*E}xLeB==I +z*#26%y{C#V4~m8(}%A=Jz8+eNB`MAi)SZ;pM}Oh +zJ5u^=uf^%06y&Mv&(50-o{c&c>>|4jub47V+&TZ>tg)u||CB`fv`R+iKkPst;4vpN +zC-Wc1#Q$ms%6F?Nx|@XF95^bjDeg+cI3?=2-z@3Q#O|v&)P3{XeUABwo_zP((!RX^ +zY6q$-dsyt2X?nP)uKY0{U%azmXMM%fN}t-mv7Y+M!D>QhqP|B%)$>~7V19>#w +z7;7Padp~x+vHIn0Dnwz0r%>>!lcl!yNUyNwO&5EW#}&_}oA2)b%MLWz&vQyzv8%am +z8n6RZ9_a(@K!bu4_pj_~Y4|iEyzt>g-(NeB!b-1J;g>h!%(Z{*K>u^oME?$qDyE8( +zEdV110Yqy0blS+)zJ*uYR?p3D$EdPc)hN3%K&u^+Q$J>>n0m=Uv)Lh +zytnSL5~N*Ewl~8`Zd7xJ*sQ237TPrDXh?@CJMWEK`$@hYEXTnOh=kv>*E%9j7=h^M-m)6w{_R{jz* +zooIGBO28u?SJrwhDjTcY{n+vOJ?8Ux6aO$>t9ylG$7i*%3rWytsz)P-pS?#h^kL@8 +z59Jz-sCVu>uD0(TlLfe#^{{2u1I_(e24uASOxH*IDPvPiy4B-6$ElL-pdb}0sYv{RY(#NT)CwIlZ(v62* +z6hZKe*_GoW5NsmELjBh{C8cqh)s9m))o?}e7M_JL>6Uf~nh_$4K1o1zdRIct3omLI +zF;+=bnpS&#K2j=+ReUj9tF;9YdxMfrpOxC?VBx +z3BkxQ8_7VDso+DLltIv{_ap1oQ;ozZP&U1P +z&rJf!ufq(??xv4p;I3pUjE!ltUD2!VfO0ZXe4T1$1(shEbPXMBkz~>P6{A3KL}e4M +z@AmQ_5>Srw(E;oT7A@%9>`7f_P3k6O$f;gu_!?w7m92l%_naFJHNfd(R8e6BEFDHg +z-wAZUf#Zk#tTj56wm#vho_>7li&dEv@v`rN!5U&LG9AA|QRxjqNhH$xHO*cJszQ<( +zFWcl%3}y2A1tr`b&5{=|GWq=nDwJq8cSKJ*d{uPdS&$6p5*QB;IePQE;ftLh=cb*T*qe@(klNtb +zEjpVs+RiIGvRw(AkMDII{Ze%S=_YW|rxI-iv8@OR?KMGlgRd9&gNx64{7G&RceU3F>O|M8|CrKuapV$hdD3*fn~{E&ZnL;t;SCz&)W4=8C_KPP@nW +z4itsWX%n`EWAHW3lLgOj(1TDgk(WwRt`87DGHtWaO>nl)|(wdozBQ3*NT)V<} +z-{*6?9c2T`7TP}l21cT>pV13OJ!Z2szTe2Gm!zdT_$*T`e+bS2NB#H`{)9`P&=W;NBdr-0R#CFX`_aG~l9p;L3e@?kET`>6| +zu#DBuX?&W^6JS<)Up{!+r{Ct`_0+ghkDnj*x>C2_xbgj_*4X=fpqAmn6-aCL*vHe` +z`bAwifhVK-%2C +zI^4Itq-~!!Y$uI7waE^kqd&ng@Hei%K40ele5&v{5@lq+RCUgFMj2?w5w`!xA>EQb +zel`2E$RiIJY!=;P9|tSUjU%Xn9gAAqC4=7Ek-|s^T%(l9Q3|ZE<-sTwEDbyr2_=TPP+`D8DX?1f!TV2_Y#i1x +z0MPc=FOL^`8ZL1c!kU +zFhnr$@ks_SXcXWb)jl4zjTH{WkZ@emZ{iaIR(A$~kL+#?(iP3%<7oR>vRlc(z)n=u +zVAugMIb=2tXpkKilb#MT+|aT+uq=lugrUa1_&@_s&Pv#94(u2ZG;&Xc1wdB~ +zAhx1pes{|7GUcL_RxbOC6dP`+%`ifQ8+Ru;xuqW_LPOw@{|v>YqL~&ZQI0o?MSB8cF;y=Q6nvrqbSf9P`wb)+o& +zmNx@Fke*V;CcwacZXDub>_u(3Gx_q_fN*bP1pxOXepdq)Yz$ag5s0w6XhsYovc`wH +z(=|yMElbu;6rywXTBa!mTzXL!{*{~+o)dfBhnhi7rcFb4z@sc!vR47rgZt+<>cL#p +z^2rs!QZHJQcqW&Auo#=*8C$G!trn@LyEDU@vZ_dlx5~q-rZ_hOQU%(qIQN(#IQVxI +zh?lTk3KYrA!A_*4$B6KDRC*5wxk!w>tIbff%4fj}o_B$l2RVm*a+t8>i#b=AmN|uz +zE7|UxTDPnC#w5=KS+D=hddBmY2bqV7mme@MFHI_+w~i~*X1F6F!4)wfn7Dgg?88G~ +zxcsG$3;Im`_mUqfiw`N@HD98QP)*ADW{NJg2^edfT#5aT#Yv= +zxx|;*VVZtWKI4~vY}iiH{+uYCz(~EGBs{CA#+cb-T?Cf$iUH{JHsH!5S6*&c=~7pu +znJEavPSwdNw{hphG>6dwx%8aae@vt7#LSB(u;+`^o8hdL@)@M2vZIy|l)QR#cV4`E +z`I()?Ii{fe_c<%e!^b%MPvxad|MJ$U(x1=s_mo$}3{~9Y^PW!@Ua_gP5SM{x3tnln +zQ^T(uvMIwHOm*|gVv=$Tv=cA6C5MhP72t~EVN#E2)TD$}sh#-6m-MYCB4@GkNg(%X +zdCJ^S3W2HAkdSBpLXTaVIx)IS;^)tNokdENNAYThz5olR4 +z=Up*uovS)hW52DOB_H`#hw^&`%<=`!4NL1P`=$zte59k_^_>#x&@gMqRLyge{GvQOp~>%W +z_4YWT#ko>go0mHuSm5SSTqymJemk+@l})R%2j|>WB} +z+|86Q=kV(ncgB2JN;$W-EPS?dALi!QoVF+?h`W~jXHyz1a<}2%t3iOfTsLGfh+iJ9sRPFp0h0zEj +zT?#>=)OzIVwjWh@BpoykGL-k*RPfD@ZcJ3a<_#&qTsW><57|}^k%)qaJJ}1JWZgT7 +z+wLrJI?wQ14%sBd2|LTDgiWNo>Gzqw$r%$rF8<~}?C9N*>f5v+w*-PTHsYmq9D8k4 +z#*a&H^GH8^lLDoqlz*xk&w7fpgksKQ#LczcgYec%#%Zz0=z+kJ_E|CzAKGl*;5`U4 +zj@kD@?qBgC3s=z4oOe_&Ao$or#Mq;Ya!Adk +zHu>L32p`my9fUuB{4WwZG;!*v+-XafCw`}&xW_-)D0gz_;S-)?Cw417-J^J7->#?s +zj2-u~dwMYVsrU60hwV;S{^3EV<4>>7eiqpFENJZ6*`Los^aua@riuDS{}%`SPcoAI +z-{*(^4;&OYeT{p%V!9N6*(m|NlI7rToo|1crtd&~cz<=<78QoZOqR4+9gB8;O;s4e +zeT`S}_(%h0S`2q|n{l#U$w1wV6lkzhhv*L2cFk)ngR?ln19SGt%3y@sA@FYKL;2PB +z%!;-QfEtDaw|PP$4}~UKyjpU0)J3n`2!l?QR?gU*Gom_JewlHO)o`%FgKsUZL3Znz +zdrI<>exbh!Ll+g=+JBb2k1%3zk4QtHqf8P&zpRp$_=1S)sr4DQ#pYf@~- +zWsFQJ^JmO22nRegO74ApB;Sbt;)Jj#!OFZ?=i*E#!oEmYnXp-!(`8YUf$)i=5mgEBy)b;oA; +zb~E8T&{hFhcG;B;Cx{fku=HfUkZ?$!!Wg72&QO&KMR*CQZopQQIf@x!ZVsvyIy|>` +z!_8dgsXFkWkem<%{A5}jl!1d70b59t4JsZ)kdkz}6;gSKC+9S~QqI%hj)4~nY%&$6 +zvFt4i+ww3$0{G2Td2-V7LSG*v9-J-OA=L*X^JWR?!-G^uL80QW&8{e;@uscw4%it(?^Yq%)lsjdL#&P +zS;$=-H3-@*JeFCoP7ITukWs;IN=9DLG}s%!wt%*)7wMg9$UeY=iXd`zTi~+r(rw

=0#oGf2eUAu?o78g()R;(D(*X=;htsiodDrOI-5Og{=t58Y3@e*CGHgU2}4+l+lfQ^j?0 +zFJWt(#vE^@xdCCrO*$rDB-^!DOLb;-qMzRJ8XXnahlzqJ)hrR@gM`+iPJ5mr1g4pQ +zeIsDh1}1D*u?3wDuslx3OM$A|KW@$%3=}B5_t~niD3}slMyc-CyimOBMkwe0=!DHe +z(?H6Ni+qo<_r43wLwz^G8t;$IL@fM&SS~y7Nc>w3+iwMi9O!-h8wYK0iT}HJjn+ou +zHzt#MPsoi*y0*{MuumZ08upXLZbtwkS^gU%5QU4;c-zVHNCd#wRTR}mMhkF!1%Sz6NWv+#O?6=J-mnke^wp;;F!P1 +zZJlR0?$+yz$w}stBKeK84szxa^$RGBoW3M>C3F^nYX!-wu^Q_5yFLYa=*m9EbO5@r +zPiHAVM#i$+0Xdlr(eKCTGZ$d-8Z;BuNKrNA#y36O^_f^nQJHx_YHVy{g%9Sv_&swy +zS8(q9?eUjzWoe%}044?fuImvtE!po9%+%jYq5D6ny;c`g-C;;@$rF +z$z2ON_SEZAHqUAM_S7vvU95L#*$tA`5Uk@`oO=hV~ +z74j8@P0G~cnSXImUv%@>+NS^zG6OuDfdYxCVXi7;cqSet$OrV +z6n-p0zE=6x^Pe;jYN}YWZ}+XQmFp$16+j;oyzL+H2|r6=PD)w+Kcyz)13ycR9ll&K +z-k}KR4HR=lU^_Ax)B0t(6k@lWq-+Cjjv-IOZoeNLjA}OzANCJx^N*VJrY2~?@AyYO +zpj(b!883e!Mz{dI1Za`edp}PF8iOKwq{tQGr|B3!Yn?v?8 +zBQ?mRB_@qRB0uk<$q=Jnl441g^moRfHDytJ6dWjKMRn0!P_oNfvI7{#DH2Ty3%9e1 +z;?Kt3f`Kt^asRl{$4CrsOE54oVr(7(T_}}|Ha;Wel@-SeYznDq|ic9su&PyqQD4IvR>}A&Of-(T!LDLB&QBWxlxgeb0TLb7s~$Yv#{aE +zxzU5cmQABsc`v)|0K&su^Hd;X&t@&&;wMV#t@SZM$o +zC15{y;p&QL!fMM0k`p4oV2U1244%hSggrcEuK?Nrh@__I +zZi|F%BXC<`oSnY1P>{wpPH-Pd5)-Zh*J1YOBN@X{!^KxNP9=@P8Gpe^2TEcOO(g+> +zC?BnKt|t%!#cDI3NT?gB&z9W#lKin5NPO;ISsgjbUk!2%5;rYU)+#|KiBh^ekt +zSNI|Zi-f$&C0!yV#2^@PQ&;6l1R_Xr7swpIL2J;U7xYXrH@T#nzQsCgEis9`7;$7O +zrV|lSQ9^Z^e`8p6B%f0%h +zHuuKxRgIAxA_1v068oLYD&vxVcISrGCNjiV+u+$K6WG4ts5f1VJ~n4sGt-GK?*dZa +zhL<(NXFOTV)YZ(Y4$S}EB=dYO0=~Y8I{jH1-c4-e$ABt-d@z +zD+J&X!iLDy@#RX+bI%AeZ)(Ku_KhWQNpCYU_qh}H)^pY%SXN84A};BoO@0Qq=#W{o +zW<$;a%e)yA5S_j{__0OFZ81)2LojZ4iQ-l!MEO2>TEp2Zup+ogk&6#CwSg0cBt`U) +zrL#qAw9`eiEGp0kJAZ>@1*sDX>qp?(qGSS(w28;+^D1Qr2weje_GpRXT}$q#L%aDo +zR3tdVmzU*E%<18~ce2j$B8rA%(-vu@$m5rOiLhAsJ{|HEWI(D|5&TM +zLbKq>eC~=@*^|!5KgdxsW<^gIGamHBW091ljKW7#;ge&qT7SkKZ8#*iwpRa#{sLh{Fs$#47H^1ngYfET4KTQ-rAjpsYVHG28l^qYG( +zmCp2}WUXY;8vRNP;H7e#N}c)jNrKELWsxIUw4tf+B@moUsc@8u*@a5Or;G2qG>_L7 +z1&L}F1MYs9tQgPAzdLbHVdU<~p}Mto<$n88-x%Ni?pxTL1xvPs{3L^K*s6aVf*hW$ +zm3~h-Xw@R$%Lipgf`b`%DVcJ?42t$mOP!(uWXls+<}q>|C7?EwSLJGwyl=^&(jmKm>*D@b#|P#U2A28;el$sEv-{!uFC@r4 +zoTW-v0S_p+)j9aE{b{4P(M4f`n{xotKpY)s!d;iERR0 +zuTJ7@FjY9MDtje(TfE7cD^hGT-1vB_@_z4^AKkx6&`v1TQ9v{F#i)Fn;X)<=hLo>E +zlh$=T9hB%AoNWqeZNq|E5Pk|epItjnOXby3eU+d4zbb8KEABM;EzEVQoYA7E)bz5V +z$L_MZ1LP94QX*ZKc-?i4e$({D!Q+mQ=B;6F#L1o18PZXbY#N!vG>~197`eMvlj}GyU8LQQ~LK +zJVqh5;`-vFVz5X$ShX-Ln(4RVitqMQfsKC)^4+CCqYJV<=WqnN7I> +zG}RPyJK+lIGEDYsABZlWGjRwFYo29&H>j_a;~2n3)r1gqcQ=bsrlIsIcg?Wzt;0`y +zA)ausQr)~VhXp4~#4w;541M6q-w=o|7=9h-GQZF<*m3c3PWWw`$KY_oRc +zemrCR7@MU!YY)T09oGIl|2S6giV{2H*Xzz#ggK3`C4;cPuTPaV;8kCE!F1S)!Lk2J +z{HDKS*kP+@XBB1Yf!xcpt;iQSaHz-Iq~2^;rda)Eq!36ak5`mPb5b(qSD! +zgv%^lW$*rSCj&qigddZk(xIwO61EO@C!8(F^xtfdyfZjo?kJzGV+ +zc3>(jpyhs_#LD7B*uM)#`UWuc%%Q%w7L6Gz4+4lC8LOJ=yhUUn5ztfz)3+-ywIY>v +zuQ(??3R)raUzjpLMOlW%K~k3g3~8lR1rc(aiGIUm9gI>^Ka&Nn^> +zYGt~OFTNeKsG4uS9OS1n_U(g3i~PwCZc?3>Be^W(-DZ498b#$dqE&m!=8#CQ(ed+w +z`HnS0Ft!g1ie6c8gq_BM!mMuS$$)Cs`ePvB#Ow0lE>gb;kEAUuPhgcBq%^?Kg9bru +zR&(=PKtL>h{jT*5kJ-_@BVziL_a6;vV3ltrl$=R?SHp0G68k3Y`LqbmQtQ|fx%TLf +znLj=*J{;)}M>zpopS&g01e9#y;E5?TRs!nFCFJK7$kGDHm>t-KR>%)^ZCC1}MpXR4 +z+0)SD-XsvJOkE8nlq)l_cw&!{-C%X{9i@}``f3tr!;YU{J@c4nYmvSXue!d!3bqztW?|MYLTL +z(1+9sXuI5)$xy`F$+<6FUOo9jk5?``Cr9Er{}VrTYn@>|n7+ +zCzQinCW%+!*EpPV7(|y{(pa +z*m8B{ql5WpxvFlXyD&6=#|uAHEBUY1ExC$RF7?yETrG^lA2wMlyf@lCZ+PJr1PMQ@s*%)lcUcvf3& +z9Br6zOF%@AK`&eJ<_SqOK~gm=8CuLA9p)>UQmLF|#{g)mH{=;)Rrs?89kUMxW@0$1 +zU9?>*UBo>msbwzAJ%Ow_EAqjvICfV&zAi<}jZ;`h-mRHxKbf{PpNQ&at1d>E0W=32 +z2NLx!aJ#J+lR +z0EtJ!Fo{#jM|{$MCq)W0Ad2TUVK_?Lxhcev^waauI%he +z;~K-AOH#&MGE@UIc5&04Co<@kiAijjR0^2~w9%e2;4wvCuS`~5Vwq*Ksuif6n0f(0 +z72#v5J5zQ}rc}?Tvx;TT`=l2XU-9CkGf+7Z+zi#>Yc(7;u&Yh<{I&I|n?*Eo)9;#+ +z-0=LbfvFMP)D~j)&603FH_3i7yM&vQYMG-d$mOy#bBQ@$GQwvwC>2&huuyq{%{KaX +z$|K~u!7>L-lkfWw_j(C>34&ac*rv|BT$7BI^EoYa;hoMXu1WHYDPzovDjSsjri;DP +zEfGkJtx_H_@;A@?g()g};Yv(*{#ry9Z8%Q9C+i0{f6qd}`jUd3lXhm_W5N+Quk5<(HPq{wNBkj_gl +zH6Rv#>2!eR1;c*it4a`jEKn6SuIM +zP$4nC5k6lL?<;B=x|v*hBgHq8Vx2H1%E=8Z&$6yyEfl2^()V=9`w9hEdoZ9)-ykVT +z2`c;=0DZDhNGuT@S$SG8_Aa+afp|ps$kAQqyw6plmk)F)+^6BdFrI+OE&gvA0 +zekW9CmYhH<02iIh3u@_IvK#2IcJ1Oc`Si4R+-__E0a==?Q=zMq@S0Pw!>3-GTpv79 +z-h!wI@2sjf&Wp@s`Ax*`tZSHZX9MmrV?XYhKpXc(_*6 +z$pmLFuS5RHz@+oV1XkR3^_bYDbC +zgZNz*Ma?{NdW3)2t?>}(c5WbaA&aP@Kv>=+ptuRD<7Kz#<|XVS9KhD*5?g(|T1fMW +z?WlWZT};Q9BmnWe)*$Tai#$HfJ(<<`G@fj=Ot)Qb+%7M|>L&bd9?5s5S3S#tjL<@{ +zO`*tWr{!V@zP$_|?e3wDS;ajKujg};*=a)yWnGPx$n3Tnzsf^)kw+9d0?n$=uJkou +zx9%$T?K)jc-04^MeJN)H2I4Cu9n4a{D;InXbH{igc468kZ}MKNz}pdARBFL33G&I24LKe&FG`KaR=XMalVNkwHcQ`%=U`Y +z=Eny2L=M^&4FYfn^M24k?y-r{V*}^YP8ZKOCn~tDd+fabaWMXzPt7^aWB=de=Ksgw +z&VNHwef(d8o_{f)e`7wS?@G&S-n}amcdVFAmsdXtny#oTAYiQ>+fT)=yGVWb09@a2 +z_5k#cVn&Pk(QH0;D-I`)H2|qo9?9I~v^W{;^n4j-s;mx17@95v0|CI^7>jtqG)%UV +z!5oscK6-py0x`vo*&`Iu3@CJ(C_b=W_p!vu=6w+GY?Y~Qfymu)6+5YzmRB8spwQbs +zbm+uOv537p^jX%3BTkNTSBCYS6z>>fK54oFHM9=hq2pC>y-`pcYR8pjl&w{J<^9b# +zL!q0@7T|JB_s7$j@CR+!afV{m +zzax5qm9Y<&MDB|G3PwO%ES23L%T^AEXEURR-4Okj%8Z?eg{YnSRipH%xjXTmt*V;3 +z_&a*d>W@H+EfmmRl*;joof{1>%J*htJx|>km{*f|Fvo6Cbw$IxE;xdlgnPiJFKKSX +zW7iD7tlH{Suznhkeuu_GTohUY#w1u3caeP`7pGbAWyFptW<}`UWN~6Zuo_HKYKjDj +zptplg5o;rW%6rEqAJB?}m@D^L8azGbq6QwTT4PuM`e-};98FK$hiM5g>SE}081Jnj +zfug?5`sfVL#%V-8JP$~R4De&y9jwM>wCr9D^7%})>Xv}1x>$&-iA$E^86{Ouh%B{8 +z4so_T`M?G2Mpcz|R@5n-Eu0Mo)*USe +zw-doC*{Ufe9ul*CMVyPv5>DFJvs-_9{T4DSRLy$*x(@9L$6y`?sH0lf##-N$!{*C9 +z$324$^SAn6lPK;ut78J(x^0vy_PrR@xsS*%)*vX0x}w$~aEr +z*qTtd!trrfm`RJ0e#)L34rp0rZzgmY#X=7=R9^Bseb=iZ_2~96OQ{!PykTooW~*X~ +z;QrdjC>Xx@vRHMSi!Kurzb~*=rU-LibyOO!J<3Gzq;j^~{8GX_nFzlzdpJ=XtGm)} +zuQR1-c)`*jS&_v=2{<6zn+^Y~n9srg3VK%4WLkKr9P#fonPd6ORpz4$fF7@zmuii} +z_sx?kIe&P(QtVym*nD$aBEWn!r-_~GK5UDpHJ#B{&gjBeeCA#7h+QXTD9lKoA9dx+ +ztZx@0*A;`dd=T@tP)2$IQL{`3kR}6c(dU5O&0T=`)Gn?duVaBhkKJ2)P}>8d|Ccp% +z`-$CIiwfATDxp_M?C<>60nToSwp`A{;o~xjoVlvZjFv#VHtR=G0O+9~nslGmf(!b0 +zplsfs$83Kk&A(PvAR7fcepCqgCGL@U{w?m2tY1dc4}u(Ik?!{E6C +zUv3_0p4g*7k=oN`H{hSo*odsfkPrmz|;Pe4dRls|X!VRr~EI;JbE +zwOBn+qS<{Yw7A8LS`m30c5-${GV?D$xNfg`V%BxfUU`f4UyEZ-PI(#c>htrQ8+^4q +z%@lrrYmbV3(wI9T5;VS(#eN<5OK;wMxVbk61rJ=l`z}i3E@%u->g&y)Erqc#&MHuF +z@i!AAF;+Pj1iw7#I8$BwW99w!ThwvO^&fT|ygQpgqvr1UWATX9o~0iu-=9}KpTC#h +zI7`{Oa&zm6Mb9DHPZ+GHmI@|DGEfTNy%d0ZDByf+$mO5&EnP3t9Rl4=L~9L*ejgAH +zEvC*4MT^idWFYO6h><^`3{=wpav{%)nC?K<3{8o43BTmR+z}WP0Ry+t3G0bSeG{sm +z1`AP4HOEo^zTHZ4S-ssFooAkAgT^PwE5=kAO_pOFW$$pbGbr-AW!P_;1kqU7+yv>I +zCc$Hf0TIUm7a6{Rmgot}GEWj&rU8k~qn-u$vIf~*zuD?F-E +zgJvHH-5f}3&}3b#qxgBVAtNz*^*opx*&u_4Tsfay3McQJ=VLTe(k)Yf6w*Q9b_>hV +z;YMsnkh?w8vIuFfYS}^}cQF8(X+mYrCjIHkD_WpPg)+v)9TTLEsQTGMrQnT)4Je9JV^#!LOF+SBE?Nh^-`^Y**sKWLlMm +zY)A&}=WxP(4b}nDHH22=7E_pDBwZFRIOLXP=_>f4nOT~C^#>b9l)@Rnb_ba5jAfqX +z#y+hhDOhJ^*GGQ$PH0%ErR*~$?$=5M>UM@Ef~@(>#oC4HR(PUyG)qKHA4%*2 +zeu2a7gYymuub!}!Sq;#uK+4b%xrX>|6x`5Sa6b^bNQ&9S<$fB@9jWK7GowB?<-RbX +z>Xhc3Dj_>&()J=#u6L)^BX|T6)9e@IfxZh5tmn;SWZx&oeXq^)E1|j|3wOGc!Q-26FG)~kT4T0sOVH>-KY +zhj5Wc3i3$>NYGs^RwyW2C@6Xb%jCWbeN=b7ZX^{mnR{X=Z$(SECqHvZnu#?o7P#bn +z56I$K=bo-F25Y6lwFPcAlzGGrZ#3X8FUuM#6LglC`H0Bru`NDvQq!C%PC8aY=|EViUcqf%Mcpf+%4XFRHn4 +zL(Arc-3>(;csh?;u{*O&*Ejn}xo +zX+g^Ft7-bk3{8a^EoNNtB?vQV8h(UNndO&D3-7zqmI8}k!Ni|@#1kvXm~mghYqM+b +zd9jJ5k>`+v4VWu?N7UXeKuKDa{$61lr<3jrZew-I8)n7cFmNU>)}(}L{4CisE8>w& +z^rA_DLSywI-?IA)Ri`zoZ@8zdwT{07hg&~Ow%tcICf7`!FKK-mvwfa%X{h?NanT~X +z#(Vybw<|5@U1+dn%}=kIs#?*caox5h%FpvjlW_GI4t#?U+Doi!3n+WlS>ZEZ-2Le_gG;(WAeDs@5k+bn6FU!V#Wb(dY)>T{e5di66WQeGy +z?uL6lUcCF7g^e1LFNnq;K@9=5oGm_0WyHq)F82Zmaep@6@gBb8J#^2UQZ&kKu^!4( +z8>6EH>U`j23j^LjCmAI~1ZB1UNx8HBd`p!}Ynh;Je|qJ$($)xA>q*N>%w+a;*CLhd +z+K7oZ2V1b5C*(=3`bGfeNMtSSz|wjK8d2_Sa{o(jYkp^Y;Y9mvm*Bo;NfWQbon3ss +zs|BUqyq_R+=CmI%kC9I0g>@76W~tX+yKoIHDxS +z7aULRxmAv@r-ob!?iG}GLj&4Y6Z4=@1C2FD{m=bACVvViSM2XUfDODccg_n0&Ab*A +z+Hrnp`5d+5L1>&@;oO6=S%qij{pV`@-%uYsE$6exc_8a|GbC) +zpQHV`*B$_7pwG(>C>C(Y^MFE}+^I47N{b73EY_$xpR$$6nZ=!TD>}O&?x9K1LxU%$ +zbWfbti+ZSk@yrIr(;IV68Sgq{ny6q`^GHGZNK!QkDc{2I){2PxckQ- +zcHLu#U5|krnRC?t-h-YK|5En+pQr5kw;Qeh+l=S`3F*8ooxa&L_ip-@q{bO6E6-7L +z2dy7I1yhS`XE@rlkar|me+b|1wDw|giz8|$3)PKCcSW~sY-Z6JYgC-}rP%36Bpkhq +zm=s5YQ6_K$rfYu4O+~p5)UCVm3Z_#>*$VG~imbbS$r0u>+p^Y2=+y7>)Gg0@$B_yn{UBAq|URPxhB&6o!SZ2fL?)iBjwykJLf +zUbx}t3ALUA+s5z8s>-v5TJBJ>5WhZp`|gi;M+fBGY%C684;z38$HkFMW#~RdRY43` +z!x*gMDpC3dy0%Vr_8y{UKNKZpp_=B)ti&zyhF`{BXphm+0*H6QaEG!@u|vg!e7L6I +z*{$lAa2-Q&obUA$iYwJJ<)P4gP$6Xe$&-g{Uns~M25(ih6Si0EM9Ug;9Oa_+GB@LK +zN|j{=-F%$AZGvNrDkxtw(TEv=$H~isj_DrYgSBs}sOot;pn7D=L5~ZR_cU!VboZpA +zB_Qvo#VwFjldjZ?>+mGv_^nX}!@yOXvhjjMQ6iSZWo&>eiC<|0f%uE?k6tho(+G{~ +zI0RhS+$&|Tdo=96?FD{q0{jW=0Ep{*>>)%+g0;dMbJYty`3{XN?S8nq18Yp~o7!vL +zOP&ySr!w80#ZHJVwH+1>`N%JvwFnlP5wY+nrfn9a&gAclkfrNd;$Ck(0g97s4%Yjf +zkCk4rT2W9T3YCwJwaA~cV4(%AkkPN`r#vy+%%uGMH?f0aFmN7ds7`VPbVC+4a)%yc +z70c9~?diyua~W=fpz0>U`cnE85^kOAY^x@3Ob!FJS203OVPBtO_?X*6H{#AcapE7k +z_A^*1Y~i^g&y&89;{Y}ebU=mTIsA9wXd|iP2-4Gq|KQZR_4{;G%Tt*9?Sv(|bxz=7 +zP@E4mAL$^D(S++O`Jem~JuimHC82rX;V~IYv5dM|KE%>+44x#|tH=_{*;c=l7l2@( +zz}fmn3nyKlE`~mHjDvbSwpO!?vFgxq-~>_=&>6Omwk2n;+?=ba +zdr)zS(EIGg(p;@X@g|-P5YYd*${r1a9XOcQq!?m`e)X$1uO<1rHXkQu0=0Qr7oaxp +zkB_yqd#ONKXmP=T*e(eP%2{|!)U66pivhkjD1^@jsr~;M0(al7xwAUs@nAlCe#~CGb@PXF5I@;x +z&hzCLnm6pk_Jl5F$P2Lvt}I=&u_2mIiA-*S3K-(i*G +zbn9M32=W{tI2c*A>ttm5D(G>wePAyE7uP917r}ff6F8eR#sGkr#cyUmP +zX)^qL33&4!64S82h8_ezD1odMQ~x=*LWN`*N4__XsGO%XYD8Za(PnFj;q$a6mvD#) +z1u))BbVfOeqVE~gv^kNRycsS-VIu^Zc5!(9JOeWs0YH>`V+C*E&PwRJy2$}WturBz +zButY@X8jPjn;&B*3WvfYf5KvkE)j;D&|`s-sTwptB4zhH1#t0v(TLl>5PNAtIp2lp +zjU#Ilk>+TMw=uj_iuiZ^4joNt5wbugux~>wz$d(ILYaw)^%$a|11U#V<|!4Hv=b(j +zs{v5$$uM45_%i~xh0nFO43Fc42TgJU9&@9Wib5@)z +z7&u3RH&Gkc$N?F{>G%aoi)WNJkx`${Fo6?Fy~DDG6GB{=IhM5RU2$!2y0;1CfF}hI +z`T#dq4hyRTfrIF%+R1o<20XJiKEjeF@}#y15|aevj>*J*f~Y!$NYNMzW;|+>{+H6*BAD +z$)GOQZI>9i49-suZD~FV%_Z9|B7>X`eW0MiW={z0?!`aoCfGO>?~6!_EPgP4-^FCWA6~^`eNgn$g{otavZdwO{4Ae@?KZ?DL|WZybJ_G5i36zL}n5Xq*8u +zg>83D$DL1qF%nV4&WdMegNTVO3whrrg`tFmM4!S=_k+|m@Oj6EXzISX2^m@b +z2>OgEAzF)xX&@cJaeYg&<3(v-Y6}+Yg@=t(4JU7CFI?Yknsc0cWldMUo=x`sdJfMm +zVQpq)gXsl%=UcaAw3IZRU}N!YJ+3gHLZTd~XzA+r?ozHW2^hlFO$Uhdeo#t$-Z>$E1`I)4+)hO0)AW;v)ICf+9BC +z(6oi|{C5Q|^I0v6*Ft=FyLik%8uP@?b|0dMMqf(hW=HwFvH= +zB~ooE*Otp=AYzCT1A_mc7blDCO}Q +zw>B+O3P{(%W}sfI*da(f3RAq%K&&hc+km>_xH6*lW&t{hXZ`7$`${}UN`eF?Dml)tg8N;OMSH&wsw8ed}qY!C0e +zcz62=tcvn96LBHjz$#VkJ9&?MICP1m{k>AUl>IQH!0SSU&-Wb86yxOg&`(-PuNKtE +z#j<&P7@D5{Wxm!!qpppRQ}#3*nG;aOUsqo +zH2PwSqq%U`GU>j2xczt1?;ge4!I3c;lxOiR+066zXE-e&+Wdrb! +z<}QOo`1)%X8RlKBE6&hb|E{~oE)5oSiH#udj&&t1cXG|UeqO^THC^E7UY0Y4Bm{RS +zHNk%a_2id>yPkA6#dW9Jb+h_H6E{(^vt^yp-5DkqrPX1^A1~^G`{xKHk +zJuUpn{^xz-EJ==!^0gh)m(1Vvv$Ux8j +z7YPp@nm@ow`d=vYzwGOK8+>juyZ^DmKlDau6jW%mM#cgM&>KwTnRP{HRMwtGHsf}5 +zNEP2om2+^a>rSn?_)z0LPW{9~oz17TqHycq4`y$s6nfoL7V15yXXJ=A|1L4>R-m?yaUY*-~NA`==yIccK%=D8@Ww0;`X%{ +zW-7ak%4Tl&S?tS0V5;+-4U$+=)`lcQ1D1AVbv|5ali&T`9K_!%90)4~s}^GgnpDXTlVBfLhuVfiVtZBgeB#ac|!y +z9@O}8Y8McS|Fl;QD*qL@{u-9jVXg|0?p{o10u-u7k%-}@b+e8tO2=>SM@3$FWsX>n +zL-XJp(JEgad32h$_9)_Tv$?>(jo>$w2xh92$$z=u)yLP +z--jIyl^>V*SGpkGmGhQte(cESh%XwVnecwm?kZ@|@t>PyJ?a?Hxe>~T7F9OHc3`ay +z78||DrvN8l7|?~Jx5aILE!ynub3lkc)a_Mq2MiT!T2vB1YfXaiG1ls+(2aBlML2*< +zW=^RsQgCB-I39Ww;=SIAp}`WhC>A8IlbmKk`O+rUr2Wuoyra_FPi5p;tooX;{81JG +zVJjW;`aGsRu@Y`oBd1=Vae6Z6mIaChM)5hP{W_H;v-ke^(JwbmA0N|0vEWDX%FF2d +zwHY-^CO8nOnFrBnlD2N^D3?)!w@RYVv7u)lChh}tJWeSw>wmh(652QP-87bYAp<}r +zcp+M!(R{L9-`gyF%;1kWmO5P@)KYpzP6NS!u|TjEUK;|b?`&@YLRNuX#5m6wnJ4{; +zVR)>8C%hbj!@>@#zsA@BU2_4}d%btAL1$O1+VClFkK+zoI!)?feW&5=x51iAQY%Xy +zPAO9YQe$L6yuA~M)iZ09HEMMM^s-(IcU^DO>x6`s|RC+ +z0X5hpuUY4$HTI`aDw?6E5 +z305&Egt-roLllWS6_E@mm%AfqTHuHjq8Zxo{OFCFx~%#RCBZ$%Q7&)n*93v$HW*4u +zJU3%yJn|t3Pkp$bro)rgrzkbS2Hkv5C!HCgoW5a!+|DruYDNgdD_p(BPb@!cyl7&fK>f-$sL0-L2pES(f5hqpz2kd+Hbmi%PAKx7J +z`A2+QOi%cw*tx$2QuDWB=dTAqqRa2ZEYNR%zVQ~hBL2l3c^yIBF2u%v~%d~;-e|(Z7N}xrpzlv82A^%>yIttPtc*n+B_PtcPynvKo=o}Z1 +zan-E4_SFWuUEgVCAZS%+RhbX~Tu2G??_0f{U6X77N-u3ep|Goxe+z?LujA7$LH(9w +zPd&R{M7-}Fxovyq=ouZCjQg8*!>$HU;{uLr^#%^#P8h&}{L8@_YbLS3a_AI)>q(vn1slsLnqmhOg-w8{p4XDV!C<_xde8Qp2|F_^LZ~ +zU=vm@jVM#(5*6s6_VQtF(pSKWqv(*PJ-DhvKKy(NtGx8*bzo1UyMKDm>fscA=grL( +z7l2~bfH2y=oV|YrfqFUA`Qc0H_6NaxJ2T-(5cEGRXLe-)%bC6md7`HMw%=6bW$L~^ +zpG~rC%0Y3?F1&iN&=2(66F(j?w0{94x`6cwGFoSOZ1n&Ni=K^(q05R)7Fvv7T&AJv +zFBLVcz{~m}yn}Y*M%Lju9jp6U{Fz=m_69RFZz_l8 +zWcGf4sWMRCvO!Na1z65(7ql2xgFb*BpD{H;V-;PuF;%h+UhhsObcbyDO38D0D;Tv8Tt@+3p@U@2cC~ +zB*QWODgg_&B4rOqA;%IVa1ngh%2RtjEN%WIr0rJYOSgMpUJt(bHp6cE>~iw^v-y+r +zRM_tCZ}`#G(<_V1z#nUfveuHO)kz)Gprv?)Z3mpUcyRL0UxlmLsGrV6NB~vOIBbUt +z<-Ulq-iuy6L`8G(pF0U(k1aWpFpO~Q;{yjpw4J#d`NX|h?!=XS9-*fXVgEm +zMIx&v30d$WY=#pNL$p)#j8AM?xk)@+N*u71Z9tP5;+So)NUCMz@#3)U3$e$#SP(Iu +z>J>9UKpqyvAnM|dcoOJnjG?sW!x)K+?KAmBPMt>h;T}ixT}ls +zCY{4r2_REzqX%lqXNM_V7YehB^%FyRWelI|j2@qesT6|Op%5yQYARR;QAmiI1k!RZek!KItb03>=Al2C1t^qdu<8#_zVI&jf_< +zj8mkV?1F&knTf=}+LUol97B)_9G2BAuslfAZ^Tr!Zq_Dlq(*l%NGo#b7j0Spt9?E? +ztGGt%@)6Igopsy*#AWzcvPWIKk5}{-pDUW03A?z-M-hqg-h>Qd0!t8ch(vYpAzYuQ +zYE9AB{o2X^1j|;6ye+-750Q9qDvUiz9WYM!8HxuH(`P4@cUdxH+(M=%Q(7mL^-1wI +z!<1NVBzua<)r!OsFFW%u{f$`&Xh?Ah{o>-KfrZq2uw;4D)O;?8H%0@JI&)Up3SFL#Prb1i;-wYYLeHJC%<^Gh@qlw`7t+6K$+Bwm +z25PSky5-Wl6U?piCIfSJ*0MT@*@2yD&qi{vzX;O+;tFs&7{ShoB#~!bxr=o|qrlwV +zUP(S9`Q61CZ$@(O!17}z^LMigCQYcU&Vt3rtPT9E)m;)MYOp&yW->d>?aBZqP_I`Nv2% +z71M8a23|SkQsf_4ByeM@1wet#;1eSIZ^4y}MwSPsQmx_o^^xnXfzYsbVG+D?*_GZD +zs&QqA8*vF&IcWkDy3u>AnHi;*r=GxF@VwpjEZ9>(^xo-6kivR$%B=}s+s5viPdy!%UaS{hR +zX9o_%1-bXN*Uq)ysDTTM+V?30xBB1DsJU?1?*9JK`zLkp`5$4PwPUAFt>EOgIiE9|JGai_ +z+*u=kqc}NFxo({}xipXNjU5W<3f;hZCabS|!(5l3{1nEbr!cr%RDLScqbFy%o38+k +z0reXC_lUB4YRpfUkM;nono{%Af3IXNJn_G5v}Rz<34pUej^X6uaAUH6NXt(=3O)&d +zo1vK#Bh(YHng>&VKIj7VkDB+t^5~yC*FV{G;<0)E#CW +z1IwcWKbHqU>!jdaQs@b(T$B`^BSkh#6`o6#en?U49s;6e0NJdKdZ?N6P`mk|?(>Iw +zKOUmjJu=w!$neA?qo`H6j#={~^XHGY{P;I~bL4+EzS;XP&hoFzr~lsN(|?`G{9ifC +z^h}l1dDHvq$Hy+bzjG`xEy-48bTHBOZ9>duCqsdnyKWG?@_vI~v5Jey+k{4xY`%BM +zK1E|!bdRc%xjs*)xev&%=W{k)wimqibObAn@*N<0L{EqIX<|qW1i`Pm4|~JM<82~w +ze5{JW6;CkK09$ao#{i4_%>r<%-QVoNGElCWhgQ1CO!qO`d$2G?gsS2dPmU?#v3RfU +z_|biwt}{e>JI29FaYwsBI~6JyM0aqkwi-3Cx7&zyxbtUVnSwnetkAQ2qy1}oca@H2 +zX!p|qQAO>DD{%#ttcdrM0afjIEs4{Ze%(AjzQqyeu4c!ont>(RdCrXsAe))6tZclz +zLU)52%S5j*dvVGvUE%_+rN@-7Nl^zg#*5F~O)^G6w-GA=?9%<_&h_1ZVVRyE*V!(I +z=~bSFexxEiaacs9QLw%}WRLA1jc`R*N!x=~rYclj@4vA3o?lHZ`rmCDDb&zIQDdQs +z3KqH=iZmNmf*=M9y@Zy85=aOTdWVD}1W*#1MoKnz-GR +zYER0h%*SaKF5Q}iL?l6@T#nrFwr}VS&4mxeTTak^!&G+5;u%sTWWHhq9ro1ViJyix +zPHGcc&*7qMA}cNqIb^V39`+H0K_5Wr>Ga5%V`0gd)9Qu%q%God>4id}d4d@2^*<_z?IJ +zkeFY;JiB#lTuR*?e8N;h{g6OREp#5r5|ayQr_m68=vehRxiddzY}8`W5EPCsvtGn| +zHFP80p??$NYq^q{b{169?>ijLwt`w?9rHGpdG5~y!D|WnzCalJw4F6_J2-wPTk?{{ +z=27z)Y_jG_IvOEwX9^*JB^@o~9Y?Gky<5`ZlsDkKOxosH4l^_^4|_DtYEvvaN#4~G +z5|D|5j10yJ4BD=d_a>r*vNRby9d(G$Q2dTde5vto;Vh+PfA?qjbg(2Y;qR2OQ_J8# +zDPyg>$%9MwlDnc&h> +z>t>ELjXp0Rg;sk@&Di%4WGlLUTs@{QrPyCM99?zdB{-Dev=U~wz9@q?&g$%DJv_lA$Dy# +zN6Nm$Dyyr~i4dVO5T)T0Cg2&OyNGl#w7&X8EC$r7vHdH(X<`y2lK?k-_%%_>N*cS0 +zFjlv7`zy9|(;J<|MFs4uVAcv0tdtRc8@O9495#Q=b%aRlq^GMySxUT=#_i2tD+<#T +zrVTCc?gh4br`_M}Cx_p!`TSXlZ~k#%UndBdzM>M>!H&(FjBtTG@w?)WDyz$*N3&qp +zZo5FgyRCG*M@!yrFhhk8uRL=PATw*IpN)2`ypScpnmwr#0c0br2Or=p!gAYxaTbdY +zCyv^@{IwlaD-r!6YS9eo@mW+_+J-dV5%tLBTu9H@**~g|*SVF)D}c+hA?o$7?ei1d +z$hpAb<~QNzf@7WT%mbKW9&zt8`D42pjbbUI8=)Xx%t6#*z^gOupCfNWW0tEH%|3EV +zDGg5RV5y$_x~q;Z6M8Iue`7*tq3NsX@~=L38c|VP%9lmrFHfQZHzGDO;3+qtcQN2^ +z+VbPXAKHSA7Lml4Z6m;xh#j`1XiwVQViZZ4ib$kwm?r;~AA6=LW`j26ycY7xbXb=W +zLk1ZvV3D>XLjhUF1Q`rCHMc#Z1_vP)BLT~q{~|Ar0i{Mk>+GU{QF1j=jH_)dR>ZLH +zAzJm&o;JmjhGUI*;BZIAAT3_r2(){K?AD#&KAljlES5G*JBEv}MoJmkB}gKWz?RZl +zfQAT5n+R7Egps%s5H043Rc2JYCo`QLU;~Pa=0@hQ1KxLsRU%?%J;}Ssj3flJz>XM5 +zA(@ZRvdE+(SnAWJBnf2+plq7$NC%7%Od>)=3ih^3HkwH|Cm@<@CF~p_wF-zW-OTHP +zm}x}p8GM+lHsd!T*0m`bO9`oFigobFndH>feBgR3o-ZIS>qC$7?bp_(fD{LxUMh+% +zGp6!cX=G!n9w(v|osPxFgV;$(tyoXxv~h|!9v5*!Ke`hWnbMRtj;E5CNk%;xI>5%X +zBiM|;hG343q`NPs8Z<>o*`=Lgf+SI-lijhGwHe7?WW%25i__74tYoYgtVR&e^vr&` +zI+OMr9&-(!D!mlF;FK(<60!@OC_zlwOMtcD1AFyp@t$d&_{{hw+SM6mF+TYVK5QE* +zR;V3mv_$DLjy*RUVS=J+pz`-Shgo<>_3GyuOiS7SouU~;&XA(Gc-jT$$o+N^b@=4B +zn2-T&M(|(?N5I;)7-5Xa*};KEc~p$&66N$#R(2v&yr*znP-H+3yDBIk@_-R9 +zaWjxY4^CWONQ>p<{5Fhi9i}YKke9S`_3_C-_9jau`^i#3^QsZ34;kC06UQ~l+~XXE +zZce?zDhOfanR+t*PSKfyU +zwCe-s8zK_g7qD7&b=@>A-ifoqi_jh^|3QwC5>;Rq%QOqFSlY!p6cj&Lk`Gf6E5H?4 +z+Fb?SwbKQ``pcBR3W948m9pKXXZr#KItANTO^ba@sXIs6mZO;>5%1Cm`uI{dcs3+# +zs=#g*F(XV~_PTOqx-w%rwDYY91mp4Rs2RS4YK!GuHJHD?da;v3ORzn24ZF%)FMC +zgu5Ru-(L>f3^-`S-2G+git{KR@2Y(*mosK-z6=+gWtSN^T^YyMNs#MqF{@A6)`U!7 +z2QkT>iC0e{^2TRmrk0~ewaEa2R8~t75u3IjJw8*N5}{%r%%O +z>*<-RS0k=gD`yM~*q+KaL>{ost5~ra%s*MGvm6HfLqt|Yk^WZ3H|Gf^u~LWfMzzhO&A!GYre9FtHLJYLY)=X6ubwEyn2=#r%wRL{2h&9BAo@`dQgtiy%em1;!P3vx-Egj?Y>EJst;3YoGlE+^eO}@c{&MEJtmN$DZipA-^clW)f-!8=llTLa=Ec93hozJ?A6ib?K|$d)3NLMLYKFs{~b+e+(P%cR7eU3 +z60&-ydk4*v?C0aI*Tb0W&hYD^-|3E<>yjq~teG!^{;!o3r4|UXhVCZ}JvJGNZ-slP +z+lYJ8`ZPT*)Q-D*&G+$L&tnW9sKF%}#0MIc=UeW$Uz>ZdS@S`w>w^yS^Udo0cZ&O~ +z?({YEOSTvHKLQOjr1rxdJv{S001$Jm%(ef$E3EM>$m1-;@ZrbjZtoABotZoPvH#(x +z{}HMm)jw-@sXd{@tAAzh1Vw{~rqT +z9}4qtpfDpIHJ=*CNh@x9XZ=9X&0rWV$?(12gumsa`gvPLOn*H>@#LI=1Uv~Zi-Ug@ +zKbf)B2PWIAAw5ED*W9j9-z#-MF9?M>H3Cj=n$)LTK%^BQ7Eq1;$HH!@{$rL41^dyAa)cfgiZ;P#-+(a1e-e~>4 +zcjt}6;ve}ho8NBAoo`=n&Wo%y#IPq@KL}*rwn`n=x1a;CB?~0C=k2`~l^Y=_83PweKZj{zsxl}yLOUY*lJwHc +zhp}BQ-5Tuqt%ra%ObBlSa$S@3v3C%i*B +z*tw`nko5xhb-TB`(;xauQ{@}rli8gz3hZuo90-D%XnjM|kdY*UGSc;0)vYXQ^`3#^ +zbvrhR(s7pOAJY(bM=oqnGe)cfz)|Oiu5H%fX7^ZP4p5qbXDEUs^~ws4?tZVn{g@bZ +z{Xv-_$>Lqm8L&2iCi_6HeL>_1*^>H{Ql!_hUT@y^l0h5VkKbxaP+7&Du$+)kTf8uWW9!{ +ziG)a?{Q~KqfS&_PA=CMBQbafuRMCoA_@YQNcq6SQ0@5$UgzNm|m57n{6-9%D>+=)x +z6_i$8l78G-^EcwV7##-ic|{uTtuzDEZ8D_Kj7&XuoV`)<9?w!TDibVc-Kh>sv_R-> +z6f>W2hu|OX6t66Uh*6dv5Ttjp*AU9ZrGBp?4TXOizqr+YqSCiDLB+6U>7lPptHfmg +z^p3T?ZvL(kMBV!Xyomv7!nZAe;$ms0`oLvhX!%?7X%uq#v2Vbxa;8FH4{%c_jZXZmC+8f&VX|qahwyE-^->7Hyr23?f6&NH*=@5-(JYpmA50^`wn3L +z%yq9dhYfF9=*I(d-G7?H4OH>XSk8;J=CJHUhHv1?_*mnDf^gesk^~k**EedJPMbR+ +zY<($G!pQ*uEZpkCizOu^B%bgo`svu{F#=gwDh1I~7@q)U$t4u-5y3Pe3OZrbaf_O) +zbkcP+e2~^<^jT)>nCXTytH6pB#PR4AHVw1~8(;U85Elyl`BqZPxc8C2zurXgn|JG% +z9zOW=wbaZ^CK$c^@MsyhVt?dot7*|d%)WW1?cNtQc7n%h0Y7RE7EdYyb5NY1Fu&4G +zs3YXYo(Ji37kf6!w29(&`rtrKN}VvJ8M=Zd?pw%e(F+$(x@_*fpg0Eg4Mxkn<0v?- +z+>H9kZ^z=;(^rtr9j`XT8b{r;K#OVM@4Y_vx}|-8%>0Q%AsO?cUjai^ +z`Y?`6AZQW=_~0+&E$Q;#X^NY9CXWyxN`-CN$73>gAIoo83U2uQ^?HWr-DS6>sdFo1 +z<+i^E^D=<`Gj8Re(cRUluhgIb>(c2*MlB!fcC2_k63Xl~ZvB!x$xyZ1D3V=7ETrvQ +z?veTZ{Uj@5h!wrRDLP9z@<=8LMvhjW_J2DZuslJ1??FA0NkV1@At>UCGoh`Sp#v<* +z#i0mE?Xb>8zhz?NKPHR5AC(mVj0L@9ipf)GAG_#ZJ*XRJLIFMH!2<1bBTXey45v+u +zb!1%f^iLnABK0W>Y|=X5TBea<(-e3dAGlvTV9jLVgRTCUEK#f&>zPmsTlxX*fMX*1 +zVHRm`4~eS}ik%28MnrGvrW$J@F%%jMAA}{wIpG5jCWiP@XeUJ>fp}7kQS^;@l;SED +z^$npQONgPMgSeb98!nU1_G5^G?3$9^SAxTF;UqjI*pB%N!JKRgJ8w&hvSSvq>6h_A +z8xhGjSR{xe6+ub@wWPusUj!R>vWFnZKY^jZj;eAMd*^#(L +z3gZEbk%fr$?vA?7CaF#&KQT^zCP)fmhc8vG!IbYXg}(-4yL*BvZJ7`xvv-iW!IMVB +zARDGbpI4^aI3}FrrVSaTNGv7|aly;9h*#`5PiD$*cHC31(B~f1ApvohFg~htZM@sY +zF=+uwd&$ZmQWA|3h$(j5iNz30BvULr?I|gtnwKfDs+?v&lJL?iMfXKUBPHu4D+|Gk +z8BUa*=l+@QCisn4(j8~$35h95GhrRJIVMiYDNV`XkyQJh3`K5^n^x|IVLAY76ueV* +zECrpi&3wI(DKWja9i;mQqjWChs(FRRBbnyT@r^wxCzZvH@RIdt8E&&-gl6J3ttcfG +zN&Kwe{8Y|KRB{T2a>kpuSGiypP2yh+CCFHXczl*6;H{OAPqLEbytCslS@xnFMW?)8 +zV81H7(9F4D$TmuxUBDqHW$CP~CiQ?60C-a29QepC-6ARdq-}1E4#UqoN3NN*->5*U +zx9GMp(}o%9xs(>Ox)hSvl(af^*;tw0%}sY*3b7_i9|jeA@Qb?}GrYa2AbaMC3AX)= +z%)X{D8avawI}>4CV$3c4)thdkmEWs{s8V5zk5Z4(;@rp?Jm5@82*L%i!kq(dHWM40 +zslKyJgBPX8GD$ah$tfyC$rkFxS>_X+tY=F>+gd_*oC$jGoV%-q=-OQ}K0=*0gw)L! +z9DI?R=M^}ar(usodrPEm#R5BlXr*P*>KPczomqVac745Us`0&f^ +zO96Ba4Cw5H&B|{y$?hd^HYk@L)M8#}DN#WdMJHxAgqQ9UTsb*>rB|!?i09Q_R{X5J +zztA}XzPc3j!miAXc{z?93NVztoZNi|89W-I+%EZpHhY(V#~&^^rd#B;SO&J`k`s$; +zw0L8-ytCEmoBJxRwS@jQsT?YZ%l?qG-gr%+%6TT=F@mg~ziO;p;pUm^fvoCXtT49a +z@a&WCCY8wPR`a>sTUGfQ_#jz)4H;RppH=QRTs%njThq7(gZCOF9i2xxE(f`{6sGm% +zgCl5H3j$TqB~QF+=Y8tTxiL!kO2rSgA*yWJDD~ADO54&kcNV*sd|lK?T07Z8r{3uX +zDT0t^Y5d=X0TT|_rPZQOCI=>bDF1mTXr(I4tcCbPoQWO_K>Fk%9^_Me*cVoHub!Lb +zQi#`V(Mh=0SOBIBHix5b=dnd0QH4?h7C?P3zu$0s@5Mgs@In&yWqf>3uo6F +zlT;F`Pw58A&)2xwDg0Q3(S&@6Q^WNncB9v=PnFg2Mm1m~BBUs4S8wvkNG4G&z#y{d +znL|T=3w7j7lYLPXAtf--?#7u$fTx7g>Qa4E6db<@3=`irgZ8kY8O>Z#3zRqWVv# +zE65{N5a+aq2cw%!c~8}dt(<%DATiozvGo+ +zy9R~9vwo9j%4W6(EG^TYvaSxXnh?!RE+(y5&-*Fl3hS{O>fQHGDz`E|v_3waB4u*Z +z*i*vC$Jdn;tT(LybWecI?t4h2HNpEP$#Vx5dhN8&PLLOCp*=Gn{vGXmed{wXoyxxX<3Gj^jTeS4f=)%zcm7Uh?KZPLFpu)^^^s>AdvQXJ=-od+jAx^N!-v +z9lV7uZT&7^^KS8ZNFLi)ci8vXSGTXH#DHlB0a04f<|1-+L9zyP(PF3vm)VmXMG#K( +zSTwpT81^Cz#q)pmJi-DC_2*K@drb_*IqH2%HhsEXk_D-KqIh_<`i0WDzVh*&j9T}~ +zLk~o!dYk6Xk>|zT2f+?q7mwH|)*OI8pFeZ=lAG{=+xyP`!T0?Fv4Q)0T%Y*4JR9#H +zP91n*?lR`;@^a4Q^&OW7>JQ)Eae1Te^7fF}>@BxXd1vO;AN^(i=>K!u|J)P(z3TK|=V1Oz&*dM&^6^vCM8ek3f_M48pPOf{`9MIj`t@Ci(@`8-kUoFc +zk>**iZ>>tF`XOEGv*y6?eW&ILV$z_v+m`C5o)KfU^Av)h`?r4&Vn|hbWz#le@L;Q^ +z&v<&Ptf{%s7PY?rF$kt-1RNEuCxkGdS|`hFU@wjnd$wg6Pjs5Z+}VM#mFV! +zoFEX}FdE2#g;xh2t+p~@2{4fVORX!8v7=F1(!Kg0al+PZ&c+{)pyMBviQcE&u5I5I +zp>G!c<|Cf5^u8Ty&@G2aESrP4SRe0tQ*mLRBuvBaA+lb92kw#y!LB!(wX6LmFz$w< +z0f|NQ+iX^9{&5O=x56J-6ZGS+`;i1IsdEaxVwml<73K?Ume2w{C0PW9AKY^E_m_ZG +zwB@_DWU$Rf^qu>6kWzxy_PfGDCVG4j^vgX^+E563NK>R^6B +zR-#QJGg)K?0`*N%icLhxx@IsczTq_VLFwnNG7yE@VDT6u=BEBh(mgp_wE6^HrRy5l +z2*miX8l_0rWm_if>rjUo$|lMD6j#`z4U)^@86yQW@zbI_!)ojSUAKUw!#|&klSZs| +zXi6tq{!}!J$c{EfFqwx13)w?ps= +z=r{x}GZSA#5Zf_hxu7AHjkhA8_bKUZ{azl*);j}5>&Un_Z$chdf35a%C_}T>9k0-1 +z0l_Od*yZ9BFm|u`pXqc53NA42yAbL_L|&j?mtnbH0?YHoKynAypRxn3o=j?cg8HJg +z6*sgu;7vl8Ttcs(d7Ie2?FcA7QaU*Bjcd22H9cdS-u#(sd>mSiAg-Ltfo-*l +zJadV>=D;NTEFOoN{68Tq|NWlm;6E12 +zDxk5fB0|9tPvI`TziZ% +zKv-55C9e>==vwA)wt`w)FaK5n-V#amgk~G(#G1DNYf# +z!y5bFZ$AnW`!j}(`@8VFHik`uC{TbL3~`~pD2W5vJ*G0?Tar-pw%^mFVnQtJUIRx_ +zAEl^w{t@-ach*z%jK!Gqzz)ZoSEkK9Y{$X}&gF-ej{S_a?-4$sXvUW88Sp%^6(B5z +zA!Sc#UdQjN0O1#K^@+nCM>m{#x*0>?e}#}F-9ZDjTy)qAalEwsT?EW|fe93T{k&jp +zC@h-xXR`Z*8cff)Q-?hOOm>fL811sWS$Fue4+DpWjx00{5BU2xamME>N&4?N0i-dt +zan;18dt!qS|JP%$)>17CX9LjIt*=+zni`$;17gpqfo*u?w!2jE1heANWe{B+vhizD +zlNm}`%Shcc;EUddB8hve(n5>8zulGQE{5#qb@Oy))F|9m&^<;?)*e@%K4Q1{P)GW& +z(xWcWEgP0~-en>x4n~UGzg}t-dpw`X07;l`XsK6r`;o!`fhUGc8*c3t<4Ooo-<4KI +z-VjLORM$_xdv1XN!Xm%rAHstD7BhV3qx7TjXIc-#xPejgs0_;E%O$W5Eprt*GU2HO +zC`=Ie5{U$vx8C0#D#$teZDk*MWUowGy&3gY`l8v`z$?mw6)RNr1WZPcK(26{fbAu8 +z0`?0z=Hjavrww;Ul>GO@$$;E&pEQI%TjK61ei4kbfhUUx-L*k>PjH6P>A(D3V4W6W(c5xTD;KS3QzY^kH +zl#zy7{{ItIv5nd5Ns^z5Q1T*SDA9*JNxw9sf1~3q6T^GR>1(r09Z?scTnO%Tt +zcTlz$sYaVLLr&V?4H-0w`b~`4uZ3(giqiKAwbhPR>xpTjL|>(ZboMavjl;z#(QIz` +zGEEv7j@ddb{}(a#4w5*6h~8zFya$;$qm^i*oLS#5?Sl1p{tM>P-3xe&W*0tF*^|+(MYLfBA-G94oQzTB +z(C_SI<>533MCMQBjCMq}v{v@{)oDKy9#zznx=%T9h9A8)l{AR#g%h9l#9Y89g$uKu +z55-MS(~mI49``WrA)~lFQZFF~hR8W?M6=<7k@gG_FX?y{^%y#6dN@Sy&o1+pKWwEZ +zA@{d-^d%8fdnt0%Ht)M2i_1zQHsyUANibDm95jg<7KIr1GK^<(jmg=*OF7H=h?)Ne +zRzSqgJ8wm)Fm5>6R6E+QIb*+9o_%;oIIZZFUExMV-rZi-{^{cH +ztK1@^#pJtH38$x7t-V>gM8siEfd{8pp}TlFAC`s5I|53V)(P{gDzxm$Zl4Jy>gVV6 +zT&54_bYW5;;)%Vu5;r5U2W-wmudtGt@D*EPM>rT~zzPee)q7`tr{wzzO3R&tIARKZitl4wM}Wg^GiD`uxIXuBv$*@3u=R$%R&tJ3SIIzqZ*l07pU +zmc5je?v<>)oVBN?yo;Cl+qrOy1LbTFecH3ah*=?HQnt~ltbMU!Wh$SDQcz0D`ZUE+ +zb_(YsOBDJF9Xu%iMMeC08dN8<4u +zx_Mt0P$vZ-Tvp0$f!|D1j*&ygQ}589#_ST+;8B$v_y;0!`ErhJWtSHhHhtOQ1DEfU +z>eQTltcB-c1O@6tvorF^iI*ne@f&(_5~^eCKZJosGdk=t29vH8>sBf|)s95eR;ZR& +zwuB33ujCfy9sdxz3nZ~eKi7gDe})IkKt&Dv3qZ3R2LPo9gO?_)eRjIkUrzX&7Yhk$qF@+|63a>EVjcUuR+If(xF?#c>P5pOv +z!~4dD=jF$(8w +zy`hnG`)zYvgf^&v4`>n4n`H9#rZqjKZ1@_`cv@W*)RrMCe>PvEH&yMRu+EC#jW4WT +zJ=65P>Bd(>4n^nQ@&YP@2oH6Nv^VfO1@IKKFrYUx*0EmWL*tN5#qQ~=ZnpQz3$k-t +z?%Wc;hjPAgda>bWRigyvhWyWKit52q(vK&gv(`9_l?n +z>S6U0%)5G^Q$3mEy*ZM7um#DAsNMp1Csg!?Ux3gqz;T0yR;q> +zGjo)h*Eq*N^$?ip2C^?7?mV0ufB5<5!~a8sW#9h`!lL*eY2`oC%D+!q`Ja(WWzFZ7 +zxyFy5Z_jsa4ZOWYose$!ay;abxXKl)_U6xozbvFJ2s8^RvDa--d1tQ+M+6d4t=8zn +zTd>JcM+J8)Sw{taoSI*McA}y&TeuAraq@A8hM3=9aF_-Km|il7*jH2pO0>;nRyOsb2j} +zxq~z*)Ob0w_enx*tL%YXeI~MlJsPDq4BK{KZ1n!tr>maT9yeV)^Cf$3hL4)cv)JC$G@<{d%m{rOu(z>rlSyPod)x$q3xT +zB%$g_k&y3t4663?I{mRPO;gT5#=UN%oEr`TzY4$;0#?1X9Qcf(!79@RDZnHW5+kb@ +zK2JcQDem8~6Y(%RG=5Hq1Fv>fT8(c6qvQyd{CcORO_N7pAAzKM>@A4o2rcr+xa;Wd +zRtA9%g~|xeE2&I_x5FQ-J6+t4xh0T3=C=`Vp+2dqqP53}w-G}~?;6rRTKKNkPVU@&77 +zx)p`Q>FwEqT@TD7=?}R>qX@Bh4icg;)!kc}CzLWb2Hnuc#=-0k=W7VS`=s6VFvuG) +zZfNjQ?Lr@CA$p#EEEd{g< +zGPGFeI8IE`?+x1ZSH1~hFk#!4SarfkyX>YLOwB|+gfE`1-@Qkg>Fe@Zby(1G_%ku% +z!T3wv#pYZ7*FwtzzhWkDADnP4ZJJ@jr(|%L-Jc6oQ +zKtoBxư*#1!Io5jjm(|f;z;)T1L4f_vjMXqjLm}vNogYYAZZq+t +zEdIrYo+FL*ENU_s34uWp8`qGEHv=mU90}Do4fuPM#)=JIYZO#TK`daFV=s>^HUeuy +zLAQ%WiMXbg>C65mu(vNoC2cx@v=ZQ}27c2;z6BinY@WaU`r~hsP%OaTLULjP=}D5n +z?E6gU0Z+;vUC^1peGSqAzwK)bzR@iF2&z4yqq(})S_$$s$83E2Vm%0AdL%$^PHk%Z +zwDG`IDTr3F+VsaM(L?UKK+Y~R^bt(NO$%Xck=nW!D{#TS1ZfED;>xV!(Jlq0BOy$N +z`CBeq@I)+=nQ~#>2USeu&Hd8fTyo0cu=&f!4iV_TyThL~ANg8+Zr#U~lC}{G +z!q@AYqh5PEnT)nr +zn*EwxNrxaS?|my-UBBGQZT{i3Kaf;&7Ia1H_o~ir9JRV;{m0FO12RXj&SfGF{J3!M +zpw^CGH)2XwK1i=Dwm+hnytDXJ&5ZdyCG%(~(ol8wqC60BubENjF_ODP0f)7sb6KdhTc5!-k}}Uk*mN^| +zjN*KeVHOm6$|~Fcf?ZfH +z@;7GkC^y**9|O!Q16qm_F?iob84XGbfg3X*NMZJ*RxPH$ypkquh#kCmJ$=ePgw!@< +z!hH-JKF+?FqD=QYh^;JX=c>v +z1gv2bA=|^T=(}O`>(yb%cP~bSH|?oO`ovJAyd5*ysd%tA6gXtIL5qD^%II>Y_RMCo +ztEh!KEYU)tGDzWqbN)-)yo=h=vEHLtWv>56z}dXmL2vp +zZ3$TyQBZCY+E$PXna(3rWl9#5|89)S=&r~ni*IlUiyh>AGb&dgXHR>vo*Rh`;xe~8 +z6tKycJB=$8nzGAGNC+?X$>EX%c0oJsb8z7Fj{JbM?zn8bt8cuoeqAbM_FR?hW@#sK +zVw&^%oTbeK-a*8+x1QkmUoJd=|DmXv9Lb!fn+O!oNY=%hxEQumDp1&dST +zbcqS#q^tHpD<A)xwwn@5|}pRG}{M60?HlRQr4?nXjF) +z-vaE!j#q8soV2aVrCbwf*5YhypB1vUt6W>^&T~n@F;m*(eS@L=_*2a|Brsmiis#j>@ +z4Y#VvyS5T-BY8$<0m~pHACqrw5h-Z`$xFO-UsU&HsM?|>?rSsCvbk;@>uN?%cyR&! +zN_Cw?VwHKq!DyiZ?vrinPdwqaEu;NZHVY@{G1T?TvzZMUyq_(Ezr4XiW2;? +zZKgV1X|d(AeE7v`jHM;O4wO1C5m|4mpyLFs(TCC;sV|oWtGWpfx&@`a%_!R@1*hBk +zi}4*Qf?Rvzd-05cLom7pL0n<3HWfum +za4WV(BT@o4C|6AvTXT|yg152{+*rmpU}A<734<-NJ~(v{Qgm*K5y#qDSFz0A!$)|kjVEJo-H%alI$ +z#%=y@&4lLe#Ubp~bl;(L(G(6l9o30|!*G?I!149jxz43{xD&d|;&i7+6v4T+bG5V6 +z-;Y3$>|Q<46;gce5vR-7wJYS1c#W`I8}1%$-W`_eE^q{|`^~v{XzJq0$7e1&N}1ur +zUTV0t0glO~-li}Zd)%e)PVdn3-j*M|EWN&pLw%1g_1#x=<)yk*kM~vm?BlEV^&ePc +zA|E>4YHmHaGydS-&j&*F{x_n9Hzx6jcJ&$e! +zE`Rv;hEll`bc6vt8M#p`QabyQYPqo_eAmM5PN`am@hq6JNrLM!(?&w`s4K-A9}w% +zq(B2;VBw9oYo|VTb8dS-w%5LGTTfDdU?(Nhzwx)IF4D1g+l0bFE5(4?U4}nvI72NH-KtXm&;#WK{j^H!WN7RbGUelFz{Sq3 +zbM73Ft@ZgY4jj~aJuDtWYg50{V-_u|r<$baNj3`9I4#Yh<^Iim!QO}}jjBsS!XhL-d&I$3!Lx{V` +zRwd~y>A-Cud4D7XzLvEWxL=SHSnnvLf!B9{5w<@*h(pi_GoA%fqJgn~y=F$_(*OM( +zkKTE2q4dr{cZ45-_II^5AKZidyIQ*jJVdoVPBqR?TZV4crqy|#J2>`uwQr%ZsLtR2 +zSc=Pw5BZIE?|urTtGNzIFW&0h^Ev470~a{|Z{o2<_kSTCwv#aE8hiTDdZmq3D9D&)av1R-9bkJgyq!w +z+dq(jGDi|%n6?jhA!yCobHCj%?X#EQ%x{qsG7O3$I2ul|cTdn4m5Uq9Tfi-s|6cat +z$0^Lym?)8`8z)_#`S(E%*zQPuHmmj>u-2n$tyG`O1vm(2LhR7P;C!vJ*xl9zeT3fz +z;!ilSn|+u1cV@P3YU}sdP(fH#y|iyv;pLz24UZP|*yMjJxGZ_e?8J*cykSELtN+pt +zAgluH1aT0T-`Q&uWW)CD&g@qAwAtzmq-rfPZS^5BcCB*j62aiw-rNWn7OsecaczYc +zVg$?2`RQS`N8MpC8V)3QczhP6^w*G7IU2U3n^3p_Iyy;fA9HN3mLNM(sL)Q-DhKkCc|laL(A2ubHQli$3^={ +zG05^M48lTzus-xc1L!k`96QqOM#@1{ymDPc>|PECHh%Z>_1Ni%M;7{GE%IZ?z4?|h +zdir61%n^`)7ZQv|6Ywdfpf8Wm= +zAuB)TZGNwg_)0|wu0$*t7#Y?lJ)C>zX{OTpC441m!)nR7BGLF~s^I1ID4mn7hyypk +z1{|?^K6%I~`c*zv=ozs%P2Eb4N;eAb@(4&-^!>T$XVw(@&4{GY9lfcEc986^Z0x7Y +zjgh4UuC4VdCy~)EN$Jo)T=tNB?qA%WWABA~GlN`D_67ctE +zkAc`)QdUtPk~HJfOH0r+N|e(^RCY&%0sGfj<_s%(*fumAPd4eH7HLzKn&LX95SnHiRhgJ}*N}O)2T}^0DG?8>DI=ipD9?zu?C6G(5Eo3^2e$9M +zggCjGpta!sTM(*Jx%KOCpSNMP`9nk%xtQ|=~XPYA%F@>M(^9`q27}RA{Qp)TItBM^= +z5>V=lgDrTeyRfYFsB8+SWLSmlZO=K^oeecDah*-;H6f<5O5b>eJhRK4vhy{pDh=u` +zy=lv+7151(el|(DIE#!0L8OFE(d{Jqt)@_fE#|&8W=mz3I!d9#D|PoW!+QEM6jh?xRH3t&jPZfDT*J8 +zCaXgBF8j+@hkSOXD_4gcPY&|2i#lEHy9<=@*Ni-C1gT&fAP6tFTBhjg#^|l~6{fvl +z_^6Tz4xAdhynEAWK@R>qx+`rWxQWA%FX0iYAN&EHsDw>*$3x9*0=Raej)mo4?62w! +zbD>&=a)Lz2X!;lXN>k5Dz7aQ(3p=735x88XawY{`<+~ylMv3oU^{hX~wiL7@?|svBrGCEeIs +zb3?uF#+Ahz&y?!XXZ!)&F;JLm9Tffp&pV5_xk9S?&dktIDZ@B5eAT_hH{zGr-s~I7 +z^eCu_Nut&iMBi%(^eT*=!29pwXKC@TTO149ixqpK8fPQCHLoN&K9S}v5xA)*?0C|x +z6J}MWPMp13H*YUC%``SL?FBPRk?58B>6XAFgFLU5++(Ps31Hogb2G;&-1aMR4^F%| +zzheDNQ+QH3v#;!3becV=g2XW#Sg%#;7} +z|2>nruY5n(CFGAy@U@t?Fh&iRof;yO8qOLe+WTZw-SS&!An4H2B-t|g_1tpZ9O^1Q +z^YrJY$NWZrjoX-Ww-0wU$=KaT_*8FRW3pNc@{!|r%kyX=;k-}et94q<%>yE1deHB; +zM8udkZQU;#0vLo>xfk!Vr{BY2yN`LccdDn4P1vPfXaU5;2f!d1Jnuhtu(Nf7j4%%= +zf&d{dkTvmvCAZjS3(~g>0w)A$6Yo8k7PV% +zkLATvrcVDLkk0Q}F1Ko3dfSCXS?5xZp5v%?74Pq=JKJ?7zALY?D}P&8;iyYdR99it +zxoe~6ZViV$xt;8h7?MDkFpBhn4L15mK +zR}5g=uxq8}627@IGw`!7PCa_<@uiAC>>D|U-D+}VZW_+E|HeRU+4^8{;>7x8$bCG| +ztY#Y4VyP5Oy4^TTy3WC(Kk7tcCDW5n(kE`+0#vdd_SNks-7)rU&`lo#OkM!?6xPPB +zR$>ygVsqf?*D@Hb?H}36CA(M?n1o9!bgmtaeFFVL+4cLYK^(@i`u&-=mQZW%Y&0t1 +zjG9!nAvZv#Y4!Qk?QJO2ZQBkI1*$3#nxL3RFBffAS=?W|Wr}DbuoT8q2_ImCEnLt8IBa +z08*HKE7l6Jk`U#kf{q6R^C7_iJ?o&sk-*DW)(>1vAK*W^tHj&C-P1)LBC&<2^o#pK1Zp`&p6^t=eIv)75`n=pAEi86Q*a +zm}fw?;xENKAM0G9tRb7q|BhN-A*qAWClN4Gkcc*tvWzo;u$MV2~7mePBgBE8qRS4EYr)f)N6jApx@( +zYV{k40a1Bj|2!M{Pr`wrc}j{aj6(IRd3U7+IoB0 +zKJ|OXNh)NE=6E;m$=&Q)6V#ULrrm(fB*65TVvnBCH&P2to&y~d}f5=eO( +zk3Ezsm7b>-B@`aG;-Pf(oydQg)qGl?)!;Ab_Tu4F!XvVoMA6vAy?Xr^PLnA<7#IlXLlz&@9r}46(3z-6f;Ste^8JbKK=!!Bda<^= +z-_K-R?WOuS{T7rs^w(R0Yu5m!6DU1RZ(G(5#Y>lBf;_(zywM~Z3Y?!1B=k%s9%Y+9 +z^0`e&d~fpc58j|p1pmFDvw6(?M4b%8{pYp5sHu?vqwW*5x$9yWRR7`u-sT6SA_n!H +z<%#`Tz^tZ0d3{#XA_ICW5sf}LcFKTqfA)27^j_C5kN(uZ6yXt@WmMidEHu11n+j^w +zh#E&40<7tvNSmF>=a>b=%c>$KUFyMSn-gZEA2%U23=fTC@U(Za{xFlHSW!S+GR#~T +zmk6Zx+Uqx`@Rq{+4t}z`bAM*{;Sj^E>ffcv^p_TQB4byeVm1gcWb5I@)BDC|_909^ +zCd~$pJvWikM;uZuJ{TC|U{kFPa9ztp=q>V1afQi~sc +zc5JiB;^EE2cLr-iHVcFogZ}+^f17ftVFOAqkBnlGfeM0+SHK$$q`3i+XaL9xMgDaV +zQY^C7F&GIVopbbeDEEIjN$w)XI^@Jk@?+nTX_DTAE)8Hs5@J3`-Nq%^Fr%S{{^b}1 +z+Aw5dp7EkCpi6`CLmv$I5mW>b|D-p%JsAuys!cf~Nh8i-5Tdn6msMnZFbFLdK)P}w +z4fy0}1Im@o*u}DVLxnikP7-~RYC4(lH9_R!kpDV1!JY!J2~g7Tw+dPDjC+y5FsVN^Q<6#0Xm{238XMx?~K)uQGU&e<^3h8;wWM^jF +zQ)E;yChFZ}tfV(A-YYqKh84lHsPFG&}{w9-I~S}|p{*z%|-|J)Siynr2p={q%$Zxl%ski@|?qSSg31_R1N +zi*G#;+HW1FpcP_X&iqP=bgK_JkE3tV3VP6ac}_9e&hz%u?GitOfzCSlVb3Q1>`nB`QF+&scQoAf10^b;IC +zU@6}wJlBnxSruLgnM+IP%6saVQ{@xtJ;crcPDIuK^DI$P6O$~dSQu(hXw{kiE~oHu +zec_CE3dx!@q{OV4N|8Mv(nY>%WtE(X%Lp=zb+gKk0N$;sV!)M%F&eH;MeMTXxT11? +zP88MVWvW#q2%N6!b{3tsO728K=ECE$%FrS2X3s)p5`6vX!$$+EgTNWba +zCNEU_4X-R0C-Ue4hOTI+qcRh#OsohGIdG2bW>a`iDf-ao%x$(MSk`rOgX|>78z{%T +z;Dtg6RWgQ?fpt%HDfYEe;5^qVzO;rbGg&ANGR%x~0<5KFGhNrr4a$>p%D&2roUkoq +zIFTcinJ0$JWgDnH`cU9POAI7?ZwSeXC_OonmuwUorA!R&&%_zUnP||`xmQ9b^H##? +zUJ)UMwgK6;2|>0bW^O_$<>s#?6tF1!Ae5nNilDX0?LJqo*(TI4C-f^*w45lV?7;4Y +zikVN`eTa%}HtaBMf7>pSox$}+ZpnaV$vR6xC;e06lHpW+@+>hKtkTRW>Fd6%nom5> +zEZU1pb~7lwu~hZ74BV*9Oyyshwe|Oyx>Cn3n>XV0PDnh{Mp36^Hc_~gao(IStVcUY +z=2Imkujq6`k;>W^BCMzKv{C2{W#WBBCbgVnP!RmY>&moIXs2y}pHZkkQ3}o|Cs+y?d}$&8{XF6&qG@TQBln_NUq* +zcwMknN_UwQm3POrJL2|2A{KY=r(>nrC$=Exs?~ve0?SC)GB?f`2)Aeb^3IVp4hUW; +zKeCW7<4xJC$JHR-HKm0CECn1{dl^x`p)-5Fo_r3KAjgeLYzj?jx}BEP5U6y;T`_ut +zGd-)ozc!LX9l2F)M}QVz0nSJw3nFe=$!>OpUg!8~4K@u;lq2%0HN*LWkzno5)Mt_Y +zFZ?2ZuT6zMXiOOvi+F5X>acMClYH}hp14g{N(chEmnV9uzS&~8-|lfSX1*__7S*4AWlZI*S-WCjtnt14#{_-W0b|lh<-O4gI*@e +z0yScw$CH9WUbghzQw+(5`{v1e#aQ~{!o1E*vsY*%a?CEvI^P1+7_A6W?R{Mq*&Bywau)swXm)=+o8QU`_U1C=h5Ew+yscd_`g8P +zZA;K6wA{x~w@>k6Wa`Bw5!hzSi)$j{yDU3zaqtviP{ZqtS`{a+cFxAT(~~b=TeBA? +zOuI$&b}nY&8U4apIUy(NeB9Blukpg1X3?<9uGB)I9$Xw4+rafiH~byg9D-jm)0eyc +z&%j3D+Hu3R(W9p>zK3_irP0K>dG#D{A97c&x6PurozPpkzqhfnw>rL8INn<+vL03L +z{^1#x&QXdkUc@m*m?$`O`jla+^E +z>k$Ui)$s#89nwd@n^?dpNps|!xy*swVjW3Eopj?_VbfPr>HUw=2}Y(a&!`h!8t2JC +zHyH%I>%XrW_4*}K3JgOLOc~YVu(w!L6Em$DA@!n|RUO7tt65ph8Mg#>hR#oPCFekY +zMh%ifP4M!Wm;nTLoE|FPAH4o+3Y=$0{*4fXZ +z`&b_t(_7Ce^S9uff%p1r!ln(_61bt@-3DcW#fR`}crG#<9l~qh6ZSRP9hNQgYO16C +zWVPssVkzth@sZKkW2mG6)KBEL9a>H&9F`T4dM2%-Ft3)tY*vAJd&jH%@fF#xNrkHW +z)XN&trR5(L!w)#sh021UAAuUrmXpXmm!J7a7ve?YKc+gLF{9p5wS;b(>$UoI>}|-cnwR5I;;??s +z!NO7r&!-695{?h00KgE{NqWR%1<- +zB=j?7WkUn3=UNpvjZ^KWOGF(Mx7JHyS^8VIAdgKcB94@xB&{*vSD3tw53py0bg`=D +z{fvz-I8Ze%_`-%A+!p!&P{QiC{EL>H!6ta7w@}sf#{UT*?-+T!9zZ@=%T}IvI#_vOv#j#`GfdKN<{#hk)f%x1%3`LI08CxF7Y4!q{MFIVu9eVE{ +zfDX+nS+=}^G%bkUqF7P^ABh*hP#hK8=};3S6!gADq-Zt{T6i9I5%)kaq(?VWnDX=pH +z0|#?EPaVZll`A2_=WwumM->q-6*(rgra--)=NcSsXZ*4AL0HeRF=A%9%jn+Cd}$v3 +z2UA_J8MQ0C**#zyFI1ZnePi%6MPjaSC4bzmet+wg2Qy&gg-;lK3t#K_dQ0cB<$6nJ +z4u++eZ~pb?z_TA!U$l`j2f*eYiKI9iv7%~ +zasDxK$U@BMMBufyL$B=XBj0~>BbB~aR<37~$D?OqZSP&@Pbz>uRvagG$wV(5pRD`g +zUKHLr`TO&OzEFXcAaPS6_Pxb7(>1y7)UZppnigBgVec}eeixEfzT5RhEg$tN&&vcDyks>x;S^$O6P +zqT%#teSZOpugJfO8MXTUQ(n>PND@LZ`n7zVz9CI&D$>_0I-owJ&MVr`I)vOA72z0% +z7>vgb5>J+g4opg!)Q7lp|I!mZJ0PF~&=W7mzy;3)K!#~N1;g0KNaci@4Fxx@IWYAW +zXdRtV!upVKG@0T^%$-aa^dhHdhS2b_iQdHIq4*{^TWf)4o$GIDPy3hm)gD}xL-5cf +zhvY>0C`LyxL!-%2Z3=9Rcl>mHWUoRvu9Mn3NiFb>HpxwVXw7axlG+vnssLyLiUH0| +zRB%bc?T8M=Xl1=bt@@;|o#Dd^Y@5zlf+lONGs@AMTBwK=u}fq;wFA@#y2 +z6{plf?@-)A+B3uSPB=tsh^k=|V&$E_!zQr@jqL9arcehuvp`CrEpw?5g@}I&I`^nz +z8x=`ESgZ#MDJupkT8=4US~*8M*&4`nl6P(rE0>}X7`2eq$xKYb<$P5LEYIp(Vmac(%+c1}_bn|&yq6Elb0o*3D$#2!8nSL4Or +zsT~qr9E|o!)ZvGyd!^q54p#C>4Y|?M+99h8VcMTrXGa1ZwL@kxNE1xP;g%GZ9^_X; +z?BLpPA*h=T%@1zjl-U^Miz$}%S;paW%gwB=TplSmD`!u#E9T^HjCT~BImaqJ06k|9 +zz4u<`?hsNPl)P|&eN3Bbcr&ASIB>P%O3LT(G~ej-{NPNZ%h~zCL7yp>;}uf%xrLh7 +ze_LK(sw+8;5eG=h$j}YoUo8qcx2ECxBgIf!en?+#@O|wPHPOmC<$_q-!1nI2pU8w0 +z-BBb_Qms#Uzir^NWpc?P>cm{)zeq}d)dt_2ZxU{r=iZsByHns*^b*A>_6huwfAgJo +zK8Agz(CBiOb%?|#3a372z$-KlP(Nc(QAhyFi`3jK?;R-=@r^D`OgOewoU}Gof>^04 +z&JPh63y@TajLAz}thmy)Oq^XNOI4&@zZtjzwVw3aU?F>mSC!aZz3Wxr?xdu>1;LH| +z#ZMh$am@S`$2(dGvZU7C?VloBY}pomL@Vb2Wl*eEd4OAF>LeVw)_sFp9#Lg@+5c5w +zph~21cm-+kmdSEN>+-dR&nz3eXgj}PC_aBnV+|xnd?qn~mJ~_o$|zLK0g`89Id{#~ +z)-j4m6Ssf9{%eFN_tq^LyWFP+WoAv)F}HG+1=r7M)~6O+zMK>=Y1Kd=H;k8M!wd^} +z1=nyK6va8B+)7pzz$n-tzDDyK{oCqJx36n5?=^;)8^^tH_9w3K_`dno^NQ&SDm2kA +z<=#C}L$3-aZoqf=x-`;{6w%_vaGx +zRt^YGFiYV_Sw&OHAHF=0?DcbJBPuZB=ZHZ{ld=cuE@lg=N+-RNFwo)zAM@={ZK`Ks +zop*M2tG143NVfN`X|JpLZD-a)#~uc~m+<g)2)YlJ$F~O?Qm(cit@TT;home +z>g?HO5Y=k0+O8|^aV+YRP35EfvUV$-`-O7|1uIZ8Ol~mC_3XHqJ07kDSV`3;9 +zh1Uh*tX20!)vk{s;swd>oa8P~G<+imxzF?BSyQL%S}0-8=}FeP-k0a9lh4&0Jy&$L +zhacKgngtw`I5oRCKdf|WQ9bvdvZvkUTwAYGmv~Q4VJ`vg`uwcx<+J}At!vg9%C`So +zhEk;s27}a=U??bP4Wt7CK0m=f!64)sXbq)m%`0wUAZ4^&guIeACR*LL(7L*`JxOt| +z@2If)T4x%@JWRlTYwzVE(b +zSAY9;6XnO}*FE)j-aMp2#Wo$~^WL_xWOlgq@~hu>a@6gLjyBx=*ps{0Z>+bWX0or? +zJZaOh#(SThb5GxLd)!$2WsrBVr|4Ky-OMXK>C4#Trux}80*2V;8Q0xD-ueWhNDkNNU2n7dx6vz%6N;B#J<-8 +zqiqxrO-!FiJc16(%$PwRArYZ4QY93;^-_+%KF~kz$BNpi1|=zKo%RDlJk~@8LV|^# +zAB)OZONm27>AV%nOCMEvB7aO4w0~>c&l&!3+x6%+R6Ptmb5QZ;cObWS_-puZ8_V9z +zL=}(es2LdzW&^h`f0n*nvU>1jgJR`V!U4st<6TC)g-yn=ZT+`}cfZf|+lhYRe>I+N +z!}3x!69mPTay6q>tKWOgsYf7e9jeZDpuq#4{~%R!d@~*wG;QeY0-+%XGu|{IBw;~m +z)yd;O1ghV9)*^m1*ANkkc@*>LXDLbmBPekM3Nk^&CWQ1gm3Q +zh=qwZW0sVQ85p5C?mL$*sjX@D{qb9{X@3Im-tMF=`-N_aPm5jK|CH+yNH%(A3$!6%@za_sYn2>Y%$# +zKYSbO#+!Y@7Wb6*R8;auJzIN_IF^G0_n-f6G;DI>r8~m0-7^#kJDG->#sLItvz&KJw|zK +z>k3(%JEm$GAZf&XU87gtu%Z8H?6n=QKNZ)~bg3mEAI8YFFP +z$B8CWTWXQ%NGsMmxVs-{^8(xqd0kgTvba_EXa8}(uJ>Z5-mTk2d2GTytdF{Dr%|Rp +zVrymzQk4aRSMtCCH`GDC$j{qC?5SH`^mWG +z(}_viL#EQ>idGGpn6&t})D4+}M+f!sk}{a-zcC#TbLd~!NHJv3b~~r;J>y7mVddWu +zPH^4bVu>p6^tZ#$@*Qff9rO1-(Q~fv=6vmS|0=?6G3S@-BOGyVm48srxz}ac_4j{O +zdG8W)dHx{#A6#PkpMjiz;gX@pJHzhii9#t0e@8fN#z?$3Ku!*>r*OvR8wZ=bK&^7m +z?9R_T$&sCXyo=IDO;}zgH}&=3q4r@D*OnJVv1lH(#}B`J+p%RiWj0Es(#>;Rq4n#> +zUGYzi-;nTzZGDTg8;K`rl4sQ5hlC(h=>jtKYU7c^fy%XjLvw7gq5S@>SVYUzo4-Ja~F-Vb|YmHN-f?<>ix^wei4 +zJvE)L&U;!3!S-T7Cl<{S7qB3b;f+mTO0~SF9yEk}VniY0o&@zc@NR-}zuE<;&?QwMr$5>;EDB7Ds$IzSyLn9O-jJe>2I+E8yBppwmEn@^4 +zfW)n1103VZiLu6m;dT08K-zdvR#w^`dIBH1j};d<8FZ9SQ^f@*D2QxbWZbqQg?1*m +zI7aWYiZu3)d_|7mqTttJMY^F8vC*4?8)vE_$o8zzP>P@B8ajM`@u`2VNeU+YTc`;Z +zT+($f<`aDok`uwoWswZYaLq`Ma%P|co#f@uR*2JOQjYUkFTz+wEI&&D#DkpJI?n>U +zl|7v@-!ar2Lw{vF!`m@1ULolkH&|emBut=JTG2KmAd}=Iegb2=M!bb}{K27QLoe7l +zYmy|N(T9v~_DZTOPrB2Y)TR&FF?1RbsN^vvC(9BG_{rO?LM-PZdAJlLo@{KLvYDL1 +zC{IvgrThU&oI$3%83-Ffl7TGO;e}|)F27U-X5e5%lVNmk0-_&~kNl81WDkanON=F? +z#R?N&PBBytr0RA8@F>){oE>W&=C`Jp-nSTWQz2c|C*Ei&tyv>x2p0uo14|wMbucm) +z`wtiyo2MBIFKW8x +z6N1exxU$6DQy-?z&C3EtG)v5@KEdu?gwkOcHV8}&CwyECGxni8n#?$DUkKbc0P4*t +zev0cb=g!(>J`R_-y)*aILRPa?QS4Ae!!YZGL8OXi{`IBIlMPvS`3!lb7zjJQi=9cy +zi+i&abj&-|+Kbi1_TR7+@o*sPUO4?=L#p3oDvn!J8%~Wl0Et4$ZmW>etnhsampH^0 +z@l)XBE=54l#XkHvx?$e9*A-=_B8*<~tYwMCVllcrm2Gov((_s@>)LDwS+zd%7)ti% +z6k?w1+ol!K&&%6PE@XR`D3)JlvkLAQTp?=ZXjo@57OpOO(#SdDwu&^@yer1_S86KA +zjIIC*HA?-HOq-!^MOi`qaC%BO<9h=&?^D`?;j0!?Hx}y(wV9ml@3YJh@dDIS)e<4jm_ttuHeinl3Ii^7Z+i9=#ssiAn_GK*XO15?7lH;w{tZd{+R_?i!CyAtf +z?c8UgH&1*nkJY?EyUF?0QL5W{TNV+1UNON`n(CqlagB%*YKE;Dr8CH>0~Og^-yjb} +znGKf=DPaAXiwoi2IeGJDmnLGdI}+C&q%EXdyoCbLM!i2RHX_coi}_(F>&=>DDaNl# +zA?Y@^o9(Aqy=#QR8_Zf&zP`7oN^v1u6~R7kBbze_O~b`mM5Zk?mLHzFuIN~$ZBRRw +zU3J*1hD5&0_D(lZPWjOt@LKEct*L|(U~!fc1-FwuIS5>M*)rl*uK8-;?42hNUR~z+`9&N6U$zPt?Id2;gNUw>enk*E +z5=jzHb}f7{H32eE*+YOEUY{*1n5Wq#ou2Z&O~}b +z3NMKgy%8p!&x;|V%>q&0!yl0XQ1ZhlO96aUV3945ReiYnr9}qaBBLW%Hg2U(G_03` +zc|KR<<@)6Q1v(Vu-Fo%hRkB@yrOoCEw`+p7gUzDyVQ^Cuw?6D6i)6TU;iGr8Z5D;D +zSkNPTRk!c^+lN9QIf}b^3m&QHi=G&5AKTV`PF(!r(GFmf<`OC(q%EB0o)*COL#)8_BOacJwXB +zav{r~IX}~}NRXzOsG=S3NuxyVy&%xt={QWwcpbwnPCZxE&rJ203A%Ykbviw@T7__W +z<$L8}tcA*685OHtp{M(d5U(f)D-Wm{j^Cd-9LKSkZ`ouP|9!IND_mwH!j7Swb9Cd{ +zXZ?LAQ+X=Ium9j2S#FwUI71|O6~5RY+j|0M@B;#MEQeb4V{k!+Eigw%lobJk^XCZQ +zrjArJbqfwQj$N?C@we=wZD_g6(8L)wY@w=wG(UhOwjtuTi-Y5~jHq-p(cZJIvk!tf~!*cjQf@!9jzm!d6j)sYhtsQ#;p(Vh(S +zPYHTDZd|zu_YmKu4twtWUP4tGG%sQ&PO!fw`(_-jwl4^$gr;vFSfn0|65wQU4E4L3 +z7JhtFi64dzMrK5mf_3!ZAfV1~g|ymC>wj!YB=mW3RF +zyB70b@Agv-;uK-}awJrQr&Rm)n94^@cKw#aOP#?ZP{ke)OQah3)Q>Rwexo&+%@cm&$@sWQy*7)$^csyV98Nj +zI}L1EhM(=67w`2rf$=Z*8?aD0NVX(VoNtuYLZ&Gq-#<>&mo2rSWfqw +zZnDLA={+3Nh=rav>za#>ES-s*j_QL-qijtP}uRsJ*B^!C%5ifezHt8-?ROg +z58y$4Mo55OJ>Wla8i>;ryZw-rJofpN3K*R1Mp9@t{dmILy(`+~AM}uH@X{&a-gRf! +z3EMA_A(sIGmmNJ}q;A~T<5_oDpu@tctH5x%b@w_er@Z`zQ8bo*O@_NqpqJB&R}*#R +zk9DbASp`F7Vf0!W07ZC4ukjtN&EE(g&c2|69QGxmbov<@pjMUTVgvr0W1mlR3SL97 +z#H{y_1^nVE=K +zmyyT;^37jwC9ozmggkQbujX{|txR9Sy{;26iJ3Ld&-aiU8fH?y+4wb2Ap7SZnLh}! +zZ)*7LKyGYseDd=&1M{|T`^o_H^)Jg;s?(YJX0juPyxs972n~$Z$tZgf#w=-A{nM%U*rKCYWN-c9u2=YnGxXK-?t>I +zGdF)tq(t-4T+wffKx+yY=1fcMK}vEUSoYP&t5si$miw6ouV|as +z9CjG3klpybJ^lggz=P-(YAm77x3HWvW=TCdDfQfugtL#8U`9arp39R`>r<9uGGNI7 +zjxmr-LY7zbpkojMNgo{umlk>qm4z-QL`>_`oUJH*6S0fiLv89Q&R#G$#a|(Z%9|%w +zz#&LMv@?RX-+=ay{X-i4*Zx7gwA-5QwViIH86O)aiqi{r@```HJ$NE4a?e7{#m;z^ +z73umURmmz&-8&u+_nWAT-!mCMHUXn+cv=A$8RJ&KjHLz +zV~$y;=&kM;kzDUR0S{<4NJ48cFjnzD>lo%k3>yUVs0OpIBgK3wWvq_9m<{X5CCnxu +zfEFxoVSUD8o#Xp*F!CDN|KU)|0@tI57qZJR^%6fZo0Z_ej7MpKFxa$ieoB{Ov<{A> +z$R^zr#>{%cD0+~pf0iuvk;QdMi)BnTM9Y*UpRxVRyun^Dq)c^%@6&`BIxG@bb%P<)>)ZlDL)-EB822- +zpX29B8H#v01#^~yiJh6pKc#^Vcn5Zd1m|$l#A*FpL$B1stkhlMS$;}!b&9!e +zgUO2yxjz#?rfgDqf3B)k@V$zHI;Cuu<>l?{%ZE)Oy#gWmnb~<>;*=-dx4#2af4GyF(6W +z=e$_*>9ZoJQ<)J$8RbRT)HuWXW`VSkch+Q4#qhPhuw47k35h6RrQ?&Wl%Q>J{h974d6!WP*212|xuTah?1BYlAg>`d941-6N3Z~~nsmmX10x0SpM2FZ^`vHy*bv=k?QuXoAyj@+@f&>vW +zZq}4f(ifEPj221Pk27cMX~~J`vATjDth}uW)R6tl>hHnl0?oW+?f+^%?0W7|I6*h_ +zd@E7Gcfzs*6G_n{-bP +z)-A4M9;j2m*Rb>2yo)AbZu>niVja#O&_`*7HSQt0-)w4}ed&Iu=OQp&nXza>d*aKI +znnXE`%_`?L`y1MG~s4f +z9^L|pGxP*09uMKoqD!{~Nm=J4#9~y&*XE^@UqSPSDW*;M<*>FS(=>D +zk7{#mhm+1ak}Xc3GHE~K(hhi6afR*Yn%iAQ+uc^%@v0pcOgb*Pba+N}co%l~Hg^z4 +zI|5cah^n2zCY>QJoxo)XrLZ%ixif0C6F?kPRUzF(80P}~2nbn)!o>5Z%a% +zj9fP9%5>?;lxQg`-_Xt6l$ZK*!C0&r<%a_V{m6d;Dvb@*m7W9AS`VoH^yr +zR5ts-fy(%6ZZI{HO)dbd`<^-3wu!J69}Lk@l{P~=Kay@S8M~|(w^5O*MuX0;j*Gw~ +zeu~?uNy%btcjg*RheRt}2_gM9&}Dx3BB)mCzTsziSy2RsoX +zi|&=)|B_JuxDF{%xwliXlv5+3`Lgkpg>DuQeQA9WfY@mW+q6mqHZ8dre^C9_ekKAf +zuDKZFA|Ko`+hTCuOnQG5rxIm$q_tC(XCT%5ahnc}rZ4oAj+5AS +z=}z=qkA|Slup?gvhWJvrYfZ0qkP~0+x9_PH^!vhj2tsjcls6#_JJ|~+{`MQwQzk-# +zVN_gZZacI)NAQCUf@~1dzR}y`U|0}MjxeSwx~?_dvGHs_4biJ$HePj625%q7!;d3X +z=I+FgtQ5m?0oO*fyHq?DBDvPLuJ*u&3YG<^BcN8b?kCVJq#2Cvfk5d!Wp9ZylR&nZ +z8dystwKy{+RKH>IPE_c5*tO|LiV!7863UxUs;r@5xhL2j;#2s3YQi2CY`+EPZ!M7f +zW!Shu7K7A74k&MVD{^4NUH?ruGjzAKj#=C|98LM~;r2v=sz4m~kxPTx7TdJ!FGio~ +zH$O-}E2U;l&AbuvN|ivRX^Hwt;D?T^Ug)ZWv$jZIkdZeIcngx6yX=eBe6;DEy^b-@ +z-K+Wf)dfwPN+KSgsZ;n~Zy=q1aOf@0!Mk<4GUu4y@-f+Z?1gF&6^yE6gpWiO{l09$ +z@{16p8;+KV&D*b;c(j+m?Ws4ERj4U2m>RTa4^7#33cNLQFYOD92@42H^Gso&K=GsI +zeU5Zlmv{0dlmU}mblkSei#c*3BN7ooV$G&WkPH0wZ84zQCDUSx)}A#Gva#k4VHhydSoyK1sEMsUhkfIlwFJMf6-s +zHyBuhM{UPxbq}74UDSjihR`szaXe!LLfzu6FQ#pcm9-;Mq1nYygRRA=(BFcC)|j1P +zqwmEWgkY%%`*>|64HblW1g8)4z)z_gFBP{*a;eGB{?1Z#|3mG;J61*TN)m^!seW#JmzxPTN>1x5NDO*78p{pwwhDGe* +zU1Oe6GlfT?8JLh_2%q1kJ+9$k2MG&Za +zIjs+o0kd+JrsME8WVFDVNWTLTM>z8Z*Eo9g_r`C;WGD}nfk)hA)%}#7`lH|XTUjjO_mwbCcuOi@DJAxOWP`bu{KoG4-$7!+|Oe^i}`_ +z@>lIqpX?!fl+krka&M*wFk!*v^>|y>WU(>O-AxT^T^>apdC!kd?zjdL*+uzLu&zVx +zZFmx5X{vmt-}9hV_=Y`{maE{Z31EP-Awd%GdGNd~{g$2(Qh%1PFER6_3CLWu)z@UnhW9w?_XQl}k-A7(Zl6lhOQvRfs(XK-ld&)3h!w}|@6P61N +z03!(lg1FzZ7wSr8V3Ce1Bf|p=%_pQqh%1pp7=f8UZb7&rQUIwbDU6+Lxrpix)^?aO|bj3 +zh>=3rrtz5{p3jY^S_5d00}p{rF&kkgag?XeXNW8-leed?M$~Oi4N2Vf`<+rC`Ih5J +zC}g}=bN%|_x*m9&H%7`@LC{nFXS&S!_qcsu#MAmeA$Oi%`K+AQ1b^^k?z9m6)%q&r +ztw<`tboBKb)UJg$Ju`FVJEFdQf1=-%z9|`tKlFp9P=BZy%+mNNfOsj0iX88UIwWJ1 +zex)`&(>Nc{gxL7`^4iM4=Gj0EO3{25GP*zQSY#XG{inoT4-TURD`5-}*mBXN@x<=< +ziKLaKhVrI~t7GAx`U*ZuU!|?B$8%HV{>F2W#X*!WNybYVR{B5EW0u}IUj6Yf?eJo{ +zB=eH&`3Y-B(l;;Io3KbaCwc@CQqKe(ERTGlK%Sk59?ceCArqJFBmU*}03@HquwY^o +zyPSMTBlhb;V7~@!r7ql5f!6LwGcq7I8i0PwQ=clt!pQU&aEM7c-BKY;j72?HPSv#z +zS!RZ;hjZ~}H_5P+{;;QIjDhV`NgUO5kkM`xXJim^$UCH(8M22PeQY7*!70WD8Inkfu}qD~ +zt!u&%zLgx%1fbsQ(poum-mxuGX1kuy_i2NYOL +zZgjbQnzRBCn@Mx^ii@?P1!;i#xY0B{4yBx8$sw%65c`vd*WN-?&?0&0IBcN1V}k*~OZJ8LTKiegIs5FF=UFe-2gt~a +zjEs@Y>-V3N>aW~H{xI&Jsu(XA=Yq|%YN;uo*nT`)=6PyRG&$urF+!#rpQ$W_xTk7z8ZvMv3hUAn7Sno3TIphUEdyAM0Z89rv6qDxuWc^H)CWt8P~ +z^n3VDlx<3es0YazowsGL6oOW+vdMl@&=1`#=wj3WI9*t=+r#BP_xum +z?y(ZrJAa2W18Zz{Ur*v#;$>%-Vf4IOb^QvXf!pH~BojBUQ?@y%jCfDG+9k&dtsq|$ +zPJKaR{opM7zEeqVG(4)AEGUaLFL(cH2R*GPTds2}WCtj#G^l2xy!UtpT(8k*wFEiZ +zc-5v}#0CH{eIEgqGSXhM1i`9wlutXQjmpb-VbnLO}Fm`uZzvz6#z*Wzob7O4BsR|dv +z1t;espS6Vs)~!x%!|P+0{zfGm1Fkq@uI(GS^oWEwn06Ui)a^!t6NKGfv6ltdo_!`) +z$XSkf9m{HR(iQ0tn%LGO|%wTIq%Y0!!oZFxC#viJSA-b9C9mdaIe$DWnH +ziOJBlBXZZzezwgAhzb4TNUGh_h>KPA{S^!SeE2}gwt?z;`#J}^O80^C)Pc791GnD| +z)WZiWA_kfO=c(3Ur-Z$`;Nna8rR)v=cF~u$vy@H$m$Q_+|2atc&q2!n^g+s>orVOG +zDfFxWcAT%3OYa-#Uv~0RyJKAOn*QtRNURzN66@YU}We +zbouuwxE?*iBOq@3laPkY@K^ce8#G!73r%8FH817Gz5DfD|s`63W2-U_Yqww{+Brm}7 +zp|EXi>>4wHGLX{MjP^s1RP(yEtSCd;?{2zXgls}jLXOsc8{2Ckqsqov&edHzG^jG> +z;3JM7JiJkFYwhTzA0KkE1V<75XP6AAD(X|~hxfAU%%wJN$~?J&djMp(_(&SyD8LZa +zU75%q0xiE7jW|B$2%#n`_>fJED%Bs0wHFXU7tu_~#MrSNb?u14XFM*yS +z8Ac16rMmQB3ad5o@PGujf-$B+_%Wu68Bt2k@|B7^2KErX6MCMvY1PQA1tH56bHf@| +zGCXdn)$cRi?6_w|j5eUQl~M!G`x{O>5eZ +zaNxS-Bq-|OUo}3eK*pw7M(fz{7I>t=%@KpVupJ<*4Gzna1Zng&8WLb!B1Dpz8gHzX +z58Ju$R!gm}PdeXMCdptCu}1Lb`cV_ +z$dg8p35#t~hpa^5Ni7`yL^vJE6Qu1lN~0g=Re@!86<^p)f0h=VhbjPd?i#6XcTWe& +zZZ5ySO@$b?_ohI+Qz7!;86i_ymJYqkgG+Z=o$+dVBOZV?#TX66z#Lvzt8==vn4B=_ +z@GaSI_L<^Id2Bo_PjV9;rnZH{IIUelI6y=y&Q?RUaUjK#6P2#k19rpvg!R>N05h?8 +zJ|dcEs0Y?09^`@Nqc4{l%bt07v6StFN?d;Lw@UBO%;#^{k((MlZugR?Y@?8Wyjm_9GUoZ_{QCGGeL^zA^+<9@1?PoW|f4cpa40c;fyRAIFK +z_(?T*^t8soce{hA0~$Pp%yz?f`%aDb8?OL6jX&8K5&XJ#ZPo@HdV{6ffcrz=%+u8< +zcrWpxR{CFFI(B98I!8<2YWc}xoW%KnOjdhx!hfs +ztNX%b!%fCe-t;~(6G05oU`l$e6|>q8v|j5m(}riDEGm_ +z_1vo<;2@=@c%wU}S}5D&+0c*~=b>emIWY{RU=W-S9+}GHV<#c}(QKq-%8$wE!x1Wf +zReO?@m0vD~G+^fsd0>pe&sXA?HEk9>Ladd+zbRxvdT_L+?4(c-V1$)zBi+b_NYxN} +zBpesfySns1-)z=qBhWJo6R26y(!nlP@TX|sAkdFX)N0WQZ|=2B;l6~Gu8R*n{AX0g +zNF_}V-eF>M#X+#Mn85o9adg&E6&MPHf3)i`2{WrU^ZMk&tZ4M +zAaTP%EWs*fEgM4+TQ7k2KR)4CrEI^j;@JE+fA_aa_15_#NJiJ^95SAA5oC^~wLz!C +z?_Re53|S5k$`Cy76}C(T$%l{Z3XQ&ACzu1ONN?YGtXz5DsZ6H;7c12P`D+&|#QUh} +zlgkCj!*g}7^qv|%@+$Fq`DyH9^W$txz<`>*;w_!P*KxU|Zu^bz6LbzY6mq`zTik;` +z+jO^~ChnJDCV%CLwXUe%Nyt*oqPrM>Hk7=H3yRTyHT!s`^+%2@LJ{klbLYbd}iDqlKuBN +zc?1^+nGix5#nhD^4;c;L4-HHS9j^du=GkJeOQ!TJB;|+>MV3cXn}vy?Xc8I>9N+z} +ziPulW+WovTPF9u#i^GuRW~3jd`J&ii0YjC2yqe}@gdW7*eBA>>jcTQC!*JHr+rqjvt?$F| +z=;r11e+@BeDAW_`%$-R8i_S5CJme>m_M7V~P@psEF+w@G2{Dq(^0h&Re`S06@qK`B +z?;uy=I-dDRA(}NBe@nsRHOf1ej@rvP4M<)O5n*9Gk_jnF&5Zh;N6N%AUvU%fItGbm +z5+m6DESs<>e0&~I^PNouYx+VdZ}X7S?fSv2@u;)t`K>3Ap$Nf#h}kDI&yjzk{+o1Fad +zf)h`7&N=1>%KK(X&^~8BOU+;tdZZ1Dq}r6Y+dTE4LL7!mIiZ`ho1ZEmb2pna +zh%RiCJlNmxq!;+IkYYOJQNwuFPTRORc}kaalALCWl}r4wzJ%<))ct0R-;42feM#7K +z-%F)IRk|^tzsDp~C;{|co#gCPHmFv_r#6qn(p(T4Al}a9q_a+`mz+4wEZ!S@#b6=+Ocd=HCxXEauYzY{x>056bmJsD% +zx01Egl+)Sd|HX`l;KF`u1P=l9QZHb@#Sg9*V2cj^Qoi4?fr|Ae{sA3Jd>gtn9*N)t6x2-`n&Ho+g$He +zH_a%dZd39h(c?Hf9JH{cnCcW=pjrCeF^ZMp@7Ej~?ZP!<`>VvW*?fzoTcT!D +z5}HxE((A4!fJ=@?13KVbY;t#csTTc~twIq-r)bBV|K8?epP^g(xOq=lrJA$F+SbKC +z1IRtmWpCK<=4ke2v0G{x?rKA*2?bdOuqol>4hkWF7WAO201gAOsxg?!A+r5c=cIp3aQo*A&S-L&=0)Ryu+WwDDkbpT#nPt%H?>9F +zn?L7ostn{b;BQ4}mZU4y*fx_}W)rtJdu|;|A?Z<9<_ZSwybRt;uLqT%;DrD)G&<NvtUg& +zKghL7IKuDMhUij5OdkgvqSwFVaHi)AbnLt`jx&~|%?XRcD>TK +z%}u?TWJrHmmU+vERkou2?Ut5c^(LRn`E)@>!9bB4f)nt}Lbk#2dcrC?fsnRNxfnHD +zqfM^+Hd4LYG&>3$Pus7>D}F#gXqFn +zbQ%x30DF|Mw63!0>!(#*_K~_yN_6fU>^%6>t3Q@bb-SF<`SHgXm?nUM2uK)Zjr0v9#VF^*yl;*Fq)w +zRuA?jPhKU^d(&EbgS9Tl5B5d1_T(<~6sYv_9Qwms`?#&w+Jx5#+pnLMu`4`pH+kM+ +zqTa4?(7tKVuEoLrZiU@FE&JvTgQW)tYi|tp#12;98tf%q>|Gcfj2-A#u?P6c>u

  • Rl!E)WJCB{t1xBhE+9T*Mkx3;;t(?=cpQNz+~4J`W%U8PiO3N^ +z2~H2bJtxJ04AC^L$6bKIBa?xt#fxw{TRR22@RMt9R7v#YIzrvYKQUe4NM4xYdngvQ +zt*g@zvuXH^J6<76XoODXp2Wj^wN7^;A7mt>ly7i6R~PK%FEOEp8h5lLjg0T~sU$x= +z+44bek{Nc#dzcu8?9i$5pTamW!Ju9f(!e@Zwy^cfT1;0vK*fTAQ715s7oUrNG6XBo +zuexg(%7SUDlSe`AXd^4a!U}inlxV3_11yUpNniDcLgB=hqK4wT>&F|*IvwLFbTy&}Dz|=;4g-2J0q?6BY6@!rB`@0g=0&I~ +z2J*uEu_UJYs=`j4?(MwK)@NaFlq|T%Otqw|h!~~`A0-JF5E0r@*;fSpSREA0#x`6}Qn)N8@eWzek)GbK~ +zJRyL#a7l4XJIr3GT(XvEuN^aoxUk(##VD_1_HbXv6>K}yf)CeBJs&^x`O3zHy1Yl0 +z3=so=zzq)k3K34|>P*&ynGf`^%ATCNH0`P&$ssVp2Dyvuz8MC07U&rZt)4#ckl^y~ +zZ^nr2qSk?->-g;|#|5XSHavvpkO=F`a3@@3n8rCm}L1V{22 +zVzpyN%3X#}JU!?#9vBD$hk=DQGAP|(L+J9zsBh&uQ>JHeA6phqXAKc|?Z}EIW6sX<6-&hF%l!?jJv_=8E +z((9W3bKyr|baJB7+at!|^=@v{gVYbcih{_ELkx+9Jj3%q-)1c$(}oIDG`m9oD{WcA +z*(M@zbZvA3%u8^80Lp64l;Xz+RUuVtMvvut2z(frp8pC`1sFX*)y$b71Xf{8U`_01 +zs;daU9;VzYXYYvs(-3DI_yWL?8mVg_+(}!xi&p&{iUdon&*j~<21P~>ScvTpZPSMsW84U*3&sw(e;9Lx8sAeEa!M^Pcb# +zd)g-l{phX?Djff +zzoDWR66{7-WqnYQpnwN|T&QR95tYE`{sh1A} +zIzX=tS4Du}h_p-A^&b*SD +z$-Cry@Wp>p6U@>-smbw69`*X!@Li!lHP?Om+WqwINAe>o?f@vR-zj?@;%xY7(;4Q)MDwL_?l<&*t=w^9yauZV +zcb>icZX~o-3Pu05ZdjHJy+`aY7Uw=IWkmJbbNxiwF_tz!NO +zO)zfG)*gKg0X9|?3o>OMNoU|_z>Wl6OpoNctC00dHBM2h2Qivq779zKGs1ffLgLDS4agYa}1` +zCBW~IQ&1Z>NY}}~jT^R~940;%wqu5z(BpYPK^*9SCJBRqtD7kn@&qq*b&-a`0QdM_ +zCmTr_%j7;m8)a_bQ6xPJL`sR?!2Qhzg_s^kzq&E^SaQ@8)5sk?G1+5u +z-Q}3JneY@INy{m&mmhGzDeiG38$A;#&W_vM6R&O-dJ>tiJi~sc4LznI4M<)AdJ;%+ +zXIsY)I!3MXVs20pA1nCmbf%m{`Gqko^DP<0pl`bvDxL@;p*S8qmjX-owLyp5p+nEx +zrU@Kl_c(d%R`A(R7Y76%e^`k-vm|KSI76N46}=K^ginZ(hYw+s&dw%prqCbG1~D)a +zA#-l+atS0`rhH!-h|BFU^D69f15z%%AlY~#D5lRZ?t9cQDz!}~IaL8ppAD_*b8|wc +z=bERx&7?)@B%V}HF>Xxi)C9S!Cl#Yf#w4i|be +z=&Gi_Nn-$t9v?ZAz1%=ja>-8^WhyBY+K@exSK?KbvLV<4g2qj32@KfvT%KDmg-+ac +z+#jWst5xdLKh795kIaozka;L-!$HA^cI0#_znTN +z=bR8^o?wH?Qggkzpu4tkN_@Ni_898Bu;iD`%^76rucDjmz{HT_p5}RJM>R{(+*{^> +zCFo*lxw&+Lb|IX*d>?sbAJa;^mW#6H%YJDW?{AEC9w(0- +z59!3;y!X9wpIJdnkH?m=N(Zys&$gH9I#r05qQrbN~e!?n=~xs-X@ +zHv0$HPfq2 +zcU@)P*MU2%3*QkGMQh(JowaKpfm^}RC^IFFDTgvY} +zmhGUYTmwsHko$$67A?8@nWR~<3PNT^*9y&lXCYafbn1Ob%mm|th3Dqu4V?iASW2R+ +zy{8x+LY@dV-5YR-RQHlo7ge5~+*8>-PVOTn7|%6E5BNEj`?O7jSd7M=vuhq@mw(Z$ +z?Qw=p_lM+&hu4;Qj35(%`Dan=<|Rl%SR#UduS5BvSr%cQGtO +zvF70ORzqrT@Ia}H60bj|dQ_y$d-&Yl)65mI=r6FBy)Abg;AtNRQlEUU&Ykx|w`9WS +z@{ja4#kt+nMc&;wo4Gt)?vQyC83eGBh?DZ-y976HcEi_Od!PJ#%@C;X(xd4Fn9Fuo +zrzJ?T&?Ovje=IsHwcdzXbWGiTLu|+?M7v`>#@QH$R@b_r(AuG80KPcg9*F4J0ys}2 +zA=@N6_ZqlpX0;nkc066|z)v~?gOyzlU1r_wfcbQ@WvALg=UKU~ya=XH-dm1h|bGls(E)N2ulU6ZcbmHrNi9qZjTZ#b}FYXIH +zd*ph8EPKLfR|O&*ilucqnAYPLzQ+zurx5HguWkqOh-jbTG6G@!yGde +zWoI{tn*A^qEPLaCuw4c7-_LV&oYw)jHrGiX5SFmb?a=oXzWaxri{lzlm;*RdF$=nk +zG3ePDCRlliUkM0MkBat5z*PAHLyTpfgvX&kBGyn$6}#8+6{qkQ=1ZC=$yqfPEaOnhx-=4&u4m!-P;W?RZ*3yk5Oa@33}d#L$+YtKWIEz=sxjLjK? +z4A`e$i%T8SCvb@ic!Mm=DQJuOt01r;j_IyaJBc&>HqsK`rQ%5h(Q6inc}LMxjljxe +zH~n#BjrZ^wGw6qw%N^?!D2Emx^E=z0{w04HC0iipjlBSBwuUUq4Eal9j%|^`X&rQc +zK%lg|Z_`AmgfYQp&MaKYKuC~e-Ue$DkE+beR0ItcpjP(hf8z{+q}exk{vvfDTmX)twZ`+&3QiKr1!dbm@!a-LVdQ(lHSP +zvh_81EA1Lo39t{+uh}9oM(aH}Et;2VBO9^CwNDbmwvS=S+v;Baz*-&F(sl#ulEO6y +z9Xq_6h3guJRO_6pb#?f8u&Ln^6w!|E0W(w{zG{qVm;xQH*iGGHV~X~~f+cW_jU!ik +zke-GS+hQ56+WX&3OgQe}Iwp{;6@s_dZIjx04lKdN-Io*tqi<1#6{><^^Yqus<};?Iqazl7wNAwq9~E-AqV_9n{q*F|GXz>Q9&=UQh5aI*^} +zKYFrSp1(va^Lw`4_VJFHoc;G3JoreC*?I9F +z(-Y^?f1RF)LC1g_okLKi3+=Cf8=ZTgHi*)qg%F~=(&mJP)8z++J7Zr9Fp-N`db?0; +zd09-ru8u_05LlbB4r`lZ!8)y1eueNmtTDJK3rjJ_EPgw|=5wUF%4>O%eCNIq63fl~ +zjrjg+T}s=5>B&Dzyja8kHa#H#=@=^H&&VX7zB}mq0C5R`D26aCps(uOwohz30j3!K +zUG(6}_AduUvZnRQ72$j6wK;bU(7TJls#^ +zKc96Bz`ezK`n2dzDw#F5!a3mq_kt!(@Jp>Nk{<4_GAE01MzRfeKiyc9mnhKhiU8GZ +zU7qm4VLni&Aw|mTqcJnuDmzYkYKk(%8>B$usaGM8VY+OMs5?yNrH8xv^`GO9r}_75 +zHb!vw0P0m#a)C9cfUWUSl_>^2 +za(E!lyYQ63oWzl1qj7DYE5-Fk5G7!+RFLd?n>^$h9=-ola5R|zbLLb+^SFzrU)I^l +zg;BYd$Egr*G<$rBGli6DAK*NouET +zD|wARR1JC_wU*zdZ&0(*eZhT6E+OZ75;=_M+@M_=x1 +zo4@aokXJVM4hOLSN#GNt3|Fgo`xfQhjig5ia#uaqvoy@9Hx8lokB2 +zl`LRLZPO-eHHL`Py8-ZYl!hz8dA46*pSUU +zk9YT1M};{B{c@sevq%TbsBOzWtGK|)nc%R-2nmJ27%tUf)bH=H$y?w|hZI5)Qcdza +zC^})6DIVI+zCLDDgA&T1NZtR+7&(c|PGv>R$^}o7nR}45AgAy>Tx8Z5jpXcSW*S9S +z@Lt0ui-v#Wk~=2kAsmUqC8d;5O&ZybTnV-g<$h1F3Y&bcH~6n*-;;}B4~nA{xlB4z +zGKc5wL1D9WXeQRQ6ouG_f8!FI4-k-1P>A_>A?gb`*o`0cWXvacnHe-A9lqiliKhdl +z;khyPO<}fAl9*_EQjIo~+-VAhOhM*=G_8VSMJ)7#P5SWjOdc^Zy7^8u0 +z*)C(WL-K*meE+_oh&|S<6w{!I#)NixiL_p?5fsM;>otoBSq8}h`;vA<=zU69uLu>@ +zkMiEqL=NC4y)cQyIY;hcMb1&;73C9+MuW-Az9KWKyk__upZQ~%ZK0StJ1u$93cTSD +zD-k#*6Z$;Fd)Z+~$WdpOC5wC-#Jqt|d*~Q5jY{k$C%t!0)n|EpnvIxTPCieK35xQh +z(xujVy9^27e(ep9y=OF$9BeF +zG228AMeLw8>3@Tg=ssV*v+rX{@Lij@Id@)tDw^{%LTB;P +zy_2G{NZ;LUglR|7+#Mw32@=l{mwA+xf+PG(-@iTYXvhOvKW6|DrHVJYWfmle~t%x1%pIdVwJ$)S{D^K5};hPHeHP9yJ!N!}H1 +z=E?N*bq;(N>PFiB#55-A*z|HxxlynG_d;}cd +zcESBV+PlX#BdH`$X*>%6?}fEVRd3GB6tujlcCBr)%f=$vQKnWJ_o+ibQCbovYDAUfg};-B0emE0jN0T{q!CoADjtP>?qS`Wvelln +zT{+@trss~G{(Yo6C2suZ)jN%|x10dau2SIlA++I8YQ;?8Dh|pItmZqFzHk`v@ye5IA0#tBVK_GS*I`SISM`WUh>F**JpF8 +zT~OG8mXx#|UJTcAZ*jV>MS;J*SGxcR;55ypHQ5B#Op4WRow&KRH^*c~GG+)}+8B83 +zw9nsg1T@>Lol=&KVW=e$;=8oMxZ(8~pdhC5rz? +zT(a@ue{wPZp(X!Qv;>#Goqe=d-n;I~G38Av9cDUzae_b+7l2@e!nrI`6pXu7(5?D* +zF%OuTs0erIU*5W|3tb?)7$%SDQrW*XQ?@32ZDta-<(;#Bf~p~Xb3c?r!6AXhZb99X +zErcQZ(-!p%7f5_Umh7jF16UFeWY_&TDz&SA^0~@H%zW=~FeE +z77~r13$>RI$r^?qUVt=OZTbw*5}B=Zw5)=hn#yPTI$8p~F*8gPCyc+ccQaU00Si0d +zDCHlH1$88uZaFZ;Kzic*%?M1`FyJm}VyajPUZLA(7~8CHHqj-=Hgj-C7gOn>Jk$kc +z(Mc>satY%rNg=HNgFV1#6N4?Ufv1gTG^~x0hJ)-aiVg}m@VS7JlU4y~?=E{7LzK9% +zMFo3wzoQi3B{yE8*GFutdmI<#CXjT^XA$2K)|+*zO;1~e+v2v^1aLsI@O1v?D@1sE +zG*r2_+pIM;4r*`nvTAMYl2Vg&RsYhS!yE!JN=+t}gw?p;5U)IpmF?KVz-+~_&fiI* +zffO#tWh{p2@~k+=Ea@Ak)hqlajgYF<@8(BNhbzGnZM;F9+duZ2$y6ML7&vdP0byWL +z6`g6S8W2PzsX_~uk1D%<9a{Z3Muq>tW_xeDYJc@%9naga3;_Y3lqv#CI6%-Z_rG~( +z0CZ?*S8Q+dBVbue7O3bdP5>5Mi<&xhYWPcdCxMB~6QZ`aCfnlq#~>@GTO_4A-BU}z +z>$|~~8y54>!1_dm!aLIWnE^fjbbH;CEAY_6LKidV*%$lf!6?FJCv1h>=5U-idaTfS +z$o}nEG#ggC87#&{LDtjAl~9Q*;)b(u;4L6#1frCx2E*=YvlNOlh0w!;5yJ%3RmT{} +zZRaq|x|%o`yI`GFHA7O>3Z{ViiRe+X!*tIXxV)C`pGQXE7 +zkYNr?PDA9Ih9W-tWLvxtnk+Zpue==)8uQ|#C%I+d!$<$r9~VDFlUv_aexw#=Uix-h +zzUkqO&Oh4M*>nHUzQQkuw*!9;=yar|5RyBkSZ-F$1ZWB3GFTEGUmd=UhJfM<+ZDPO +zH_&lYQe+B-&|A&kK!ZC{R=NDU +z?M6ArOEuT-|<5 +zPRMnE+ID>mNN9o2#*-Tk3OvcrpKmJW9L2qT@JWvH9^ +zHQ+Wmd%>}B>51g4Zv=y1?_Wq62<~3|SOu^m`{pL9Vz +zxRo6F+7YJj9N^5Nx$A_uQ@r~}BaU0MOpCpu)ct#%U?>iHZFG_s0vxn_U?J_tNNJ;C +zfg%MD!iqm;g*h-vJG0Dw7$8RHQFyE<77$V62cIJc79#0|c#kNZsElG-lsvU)lnneg +zI%Nf&hOSv&!cV2BM6QDCAe9{X_-8N+M*q{5V$Hyc}k3KsDEkmG&_ +zG!n#1;~f-et0mN*JiiWSzdjHrjUAUc7Vy)Q#=w!p%o9)wezPJW^6&6&oWOm4EY^IK +zIfe?O@H`;qG-zJXWfo}+4>yhSh?t2lWyN)&sGD89zp)v8goI7wv@gzqo^1b@L+}m4 +zwcPHNYIj4$$YWFnI6alwm~z_1gR9PEB2!7TF|?fGg%S9hZQU_^<=$ +zG`usfmFMxV2xXpL;gku|L#c~o0X11`$w4564L$5 +zOQ|mWAi7I%ZMw(*rYJmJ>V5g}3%a}q$c#b?@A7g)k$Q?bHzOO(z0Bpk=uI&vQ{&8O +zf#3bP8fjxu2{xkY*cLo}4Wn44JL0k!bKEVXiPvB1XI_pjHD2JHxj_GiOKGXap|+Ls1y%s}Wyip)L!;%5)^vCZYZ&njkoEcMP( +z%S*&ROnN>WBpY3j$r +zThjeEl~FgZk`_zj$u`Nl&Iu1@vK*XXeR=6h@`Xs}TyxV~J61?Q?3b`NaYGX+C*9A` +zHFHYSPyD$5-O+5T(fF?ALM5kc@JhytS#C^8fw^9^g_4&{aq&Czq%robiL_*WwCoJe +z-BDB)&bGbzx+n^!o1xSkp<_q#>G$s=6ibgnCG>;r6;%r +zalHjYP9+md96b+y@Q7dbmm%j3&BPUHf-d#PdPdQlGJM-If}U6x_L39Lmt&{6 +zl$-Os9Xn8^deL{vJi^T44H;~Q06(%u8BVNJo}B#pc<9RautJ@JlS&d6V@Y38>`!9; +zoq=?2zgNlkG=5ZE(JE>Ea%sp8pBj8jJxbP^7jbC9AN}2*d7P;t=0De*@cwvQA1QU* +zCHAyZg^XL$d!=Nv-rU0Dw@r1D^4W2r86Gcj>^X5XW5w{d< +zpW544Jw9H&Wv=$;NbwHpoxetI0u=4kj=Z=S6WqfhTs0Yt#hKdLCl)Z)~LVYYUFZ}TJbDd?wHTbSIoPpch|!u{Kq(}QdD}^OsxC_ +zxAG?f2@(%B*YKg^fIJgr=x@4aQUv9CB@>?yf-4>l-*UwS1XuIarCQHeB +zzS5kIsyG{1yVY6nx%;-9bqzMt1E45NKR7$AO+o5}iPW!YR(=T*tJ9MbGcJ8m}s0?u14OAEmoH^5-TCtsa +zSzX>mE`U*WyGp0IW!Kq3*BuzQ3tF8Av|Q+PDaax$6ypv9*;yrC{_!_L!Cif1b>*1$ +z72puZH1awc0RxPx>kty;*dF7C9-1Z5d9jD0(i5g~#b4y!Yp5;8%<6?q_fT4i42iz* +z!Ct+{-ZU*@j6ol|;Yxx8U{}4$w*qg(+&FvT##wKBU}&OGfG-@p_~E|8C-~*sNr!rx +zL*t;sRO&!i{XqM(fi?pNf&0Zy%fb5#_8nOReLo#~vK$_0IgI_`C*S-p*QY-Cce&2l +z`G2G(hlY(W4x9Yzbh6td*>guE8}v1Cs4bn)S9odCCfnpjwR!K4MmEU^`+-^dU5SgIU29 +z^nT(1Su0`7*CAuI(?;msvK)}Eeb<%vdJvAmP`);}mxb!YRDK(>)v_EGB%TtcJbx`ufHFJ+rrxLjvaw +z>uT_Kz%XzGvL5-U9$5tY48JpNc?h|#Lr2;Wt#YoWqsIF{jXAVV$14b$^L2}HXIIhs +za1CGQs$aNmvZp&S_Yt@O>G!;<;7Km3y{UTpqR<37IPmJHiZ%)&6&VqRm2HQPf7vvL +zU5tc{U9oXB1Ho2{2~$)ey3x$Ex+4*VOu;fXWT}9T64=B_K=_(RM7gnGd$RH*1qGn> +zyTM?Yip5aCCQ|+6G#q(A5UC^aQbmN%Ll7gNz|{FNCTtY65e2r%=iQe~;debbNJO9d +zxk2sfXu}RWnHYf>Ve9Rg`%fy5%IbpBwseo(-zO#&yiVhFm4-~qO4Df{44$!pek)GN +z@^d`X4RmBF4@ImLkk7&1j+|kEy+H#i1~x{@8_W;LRCG!jIKQZxzruoH!R!>33BBRR +z;aVsQ8RG2GAtT*71YRh&fVqHRR>NR-)~z$EMr>HZYi+LvqGh0b`N!5E3BNEKp@6PQ +zm+$9q!y?tx(l#z}AdfYD6jV{zAJO;Y?McLqd46gzEGcRiy~-w9KmZ$HPVPy|Gfa(;U1M8b%9;g6Z^a*I=uNP@r%BRQ!u0**KY +zjAn)l7+WX+G?5ibx}TkTZVX}|2B#3705sW0+C?R9K`IcBK8j5{XSMIVr!S=wI*DOo +zt&Eb#A*RY@Lbe57fY?nBpEM9?yZy{VS>W5X8aQytKygrbb>}WuOcczaL(-EY*mkm7 +zQsUec6vt2Jn_&|?HC{t4t-_ReL>3-rr1D0{bvFIS+T@Sj0hc_LKHPHqH+rK@gK@XLaxGp=9bX3QCZ7H^J6`~6ljcd_ +zD572Y(scZC)4PM8sBtk4FYa7H`eC8^A9XgYyDmWPEn)8IAR^8v2=9TUs_AfAq*KLf +z0qP{cPXN)0#7a9xYH>|;0=EYyCLp75ou9(}ps5b5$>IiH`_ww8|KI$iCzgr+ +zfAEv_8ee!bmNWRm|KcY%4IlbT9pC|?lfX58V)BQd5DJM(|H4mh9Im}7^K04#xBBFI +zTP;NDu8a2a_eXgFeoE&E^JmI+AH|&X&pur}f1$+=oDA$$6b>HbM!!fXk?RdtLKv!~@l2NmQ^mIykYSA5@qc5)v-U=3D +zy;6COWv}#-e|yA;44#`StUg@+01FeT2#?QDn#4o=z;psePrbSJ)MIAZWqoNMcJuI& +zkN#k1hxeaH2)I@$Na9S(AAa)CQne*pO1)#2ni#|VE=i5<1ulh~| +zRH+Alex*uydRH}qzWhrTpcqdLr6B4Os(%g5297^!TRGc%<-?_WI&V)Ttd_MLdS$YB +zM>dcMZkCRKsLz5?XTaaRMA;w2)M-=45-3kDUxHFKg2a<*0{gsvzs8odE`GWA(bxL! +zm)t$SS_f{gKFQ7b@$?Z{w6E>cx6xBHSjB8R`rKjJP3NZ2r8vl|J-Od!E>C`$8eLvG +zn=2w`9{xV615aZG2rqby&UpM%_k89QD&6CW*6WuST +zSRXy?^94ryD8tw(P}#;`r^J7~0KT^x+(?FPAiyZ}@XwCH=V$1Q +z6HJ>NY||5AZ%Q-edVBU#u4?%B;yswv%x_DS7^LKJn=q?U@>(i~egQbzsTH8@`GIHi +zSYJhm$Y8T@(;gPrnqjcwi^Z_H%j9^Tr!_Yi*6V*momH+A)~~}7S4Z98`jHh-R7#W% +zH_8(4``=nQILH_IXwr%=krll`Ju=0N4$X@mEsn~cjW8W$PN+wcfQ0jCAWwtt!Uh$O +z(UPsho>&K(ETRC}$-){xfjS6dG8JNvIMMd~FZSO1sp-D&w@o93jvyV3jV3BKIvPMJ +zA__LT1W|ekJ)xO|kkAP=p-B*>E1^heO6XO=LK9T5&@Zo8smlK9eLrQN*>ldBd7iyz +z_RO6h_zyC}XXW)?%Om-eJMG@Q2mw#5(+(2AM;Y0Mr>_O|+ETZALWgXru<`gsw`iDI +zDm*^O7MFU_-LC`C=K}c>;7~_2Vp)ay$(HIhAJR;u!#s%w%#fQCK?3EzbUfiF9$1b9 +z_w#5>ZMx-J#5G#-Jw}r9HY!+%KvRnojt1hqe`Y8{Cd9+|;2RT^QbxwOQsN{M!o~-+ +zbHWdDGbc*$_e#Omi>YOCG@sSvygrDJRO~n9bW<}xIqtcBH;fsO9}Q& +za$1ZWMCQbX@#oHmkUHBPbuxFxlypHlquD$v+AhY|EDMrTu+amQWcqTC^{4#iCnGs-^!&BP +zeC7mXWF+!we7xRz%v0&a7k1PTLcqbMU=5kX$oSMUnS#p6EYp4hc!QqH$r4$OcT#0` +z62KTSMv!VqiEXr#C+xdz%%!GyT3B%Wc-E)>tS^&!*vX`4Ifa&Z)=1c${e;4NW#kS{ +zyt*3b7(2(vJQI&C;`I?Uo8n*LqZGmkyJQo0+cO3E19_^238>q@O3CI+{smgaYU^3& +zb^S(dLRTaUpE3P>7PEH}()8@{vUsXZ1#v1qo^Pq(TB(R!c_x9InPr|0^MGaa6O8TC +z&x8k==4RcrDOAk8bNOt+;#>yiH#|v+T`knwX0Xz9SnoYqH%moc*d`e82L{`x6KU}T +zS!QuuRjT3)Pay9sRB3UJHO_Ho~xm#YvOyE +zDG}oVp0bG^{TV;P11LFJ*4rDnFL;7>Jp1Ti=vr=tn8Tfpfs9Y#xZZ?RcDWxBotAQ1 +z;w16@lC=Nf#L``xWmj~*qfX-U3>Vn;48e_xyF2vi?=b|s!*OGw^{!#% +z6D1(Bnx(M3jUaFPaPLi=pInZ)T;e56@;Al<)A54n$~5X`%FWecR8tJI +z@t%uq!Du7-7%xMaUTfqMW`O2{Z@#Zm9k47EWD(I|Dxq!zy*uK4me_bItR~H_{#OI=_kmDz^ULvOL5JqG +zh1T!zYWd|BDH`?Ec3JC4Z)+^0W_T*=d4%sj;7O;0MDLt{BM(`~L-{ndHn#-Nwb0x* +zgR&Oy9J8xgWwgxbvKo&PBnEX +zfJaX0dmL%)^t{@6D6-St&HJ87*U88(Yy+~mr0aC6ho!oAeUF#VpZ&=N9-_kp6#ZS| +zN38Re5%5l+*Y!hC+~kI6!*6(kfWrdRd!cYxh)?gn?A~xetOxgcXg(IW!GW0Y4{eY1 +zn!#Z)#6Cz*Z)_5F2*XcI>Wk6uM^NDm`M#q)eY)8$1@e9ARoH(=C>IU?iBR5WU0Zs1 +zt?SI-z|Fz#*ulpioto7hTelqAe4N^c96AMux~m)>Zw>bQ4E07jJ`r^6{=-kcoN=!G +zzmK0h`4>O=7eDzw!cVH3B{Ic{zx5>|Iw^EGOS9~0V#SP7PAVB@P7qU-( +zb@~`ghf#C-pdkzcZZO7TVhxn{&VGGyA#ntQy4fg3k^N*_iG=UCI|JU=i8+8$O4yrYPdCDcyx?@^w$TGs8bZIp+S$m;ZvcY`UjBXj_M&P#LG42i(`xfnuxAVPj$VqNx3s6=4_I +zh%;*m3h|<`pRSkb?*o)4UzbPux5+ca>d*8rGifTUYUaSXmmvNby*tZvGz2x#ZUe?q +zgvNGvs~TO8e|+&YvSFfoWcO}4%(tC;>-)No8u?Mv){MjpwGmoIpC~EF*lMcnX+8J@ +za7meAk9r?ecx5CtATI`p?6f;svxJm+WVjgWVeR)Kls70Fxy#D*tkDxdH*#hudVlI^ +zk2}e?S0;5GK~zm-rBIp~>BYtVV_$ME*uEWCmiS7{LXI$8W#<=K|Y-E5ycPz+z@ckrHUY`UpD +z943p4R|8?~!nq)X{iiGW0LeJ)f(y$Io>Az-5`Aa7Jg60R-M3sM{Y +zR~KgD_uLsKl7qnLZ(? +z`%QM9!W`*fSlLTAW!JZ#Zc|~%u9pJB6P*Vk7>KVm7O9Wx>UAUvW!|fT?dK6KF7ZIt +zR$!d0hDvQV7Bf$mxVWBz=Jg|2(kSJzRcH)U7^h*}pP^C(?7Q~;I +zJqQy}F$KOA{L*ny5Q52Y(;A$78vdh>C3OFP#ZUI5{v7UX1D89d|He-mIRA;CH2nnl +z$sp)|%}=KPAN(Zw6Cu$e_J6XUpv4r*K!={X5dU232=i_YV-l`Nul?aC)f)U@y|wmV +zd{nZs{_Ft|v_l%slObYeg?Fv-SEA2QO(WsQf&ED(rZeu?IjkFt2>_Okgk$hA!#$=7|C*>Purbce`F!DZv} +zix&&+pQc=GuNYU4F>l4f_VQ>F>d#2hqBD`_%1uFKRk1QdAcY((X~$Ok%Afs--w)OE +zB) +zw-v!s;R+x(rQiCKcD0k>IPXol8HH8theC$}!TYwRbZ9?R)4`u_yX%RFi!_fM@%!e( +zA-+F*svGp}^*3)*@ch~BAF8%i-@Wx*X6?elb~H!-b(dB!xoaFTIZIEfD|QlYU(iLC +zfI@D~~2(A?`a+*qGs~4(}NK>xXA5Fhx1F+57&@_tF-H1>{Uq +zpCOU2%Jiqi$4|}T(r?##!=m0ZPMtyLV?x7APl{&xZ#-G{gXy2c@EfqXZ~olya7B)B +zHD?WWee(=*`s}Q=HEZb{(om}S=|*YG7dhciLkAvzeIcUVWXK9FjzLS`W$16WIuu6z +zRegJ_I*dGb?Br%>DsiWQHffOd4$xRX-pofdf8d<|@r?2I&EF=WkeTpgPWUnq&l!o{ +zwDC=t_uU@%6EO{KMuHnt+`v}VKGmn5}r52)Q-~x%o4uLB^;TEucbxlxWo3_LjRc?7L6P)!BxbC +zhI#}um?8b3z#E+S^i^6QjgN&VjA-z&rTu5LlHO7iypTeR%H%Fa(2E}871OZtRoW=t +zuR%Jv5r__J+ybO02arj@w&9RCnma>~tsGX{NBu?QyFmjzYrIutLouDe=S&0*X(uj? +zM;T~Ec53_b_0kPEsn+AE^KrNl=>#*Qzr(V!gR3MOLHZ^*=nc&3bp<@=Z=J0YUorDLL018;5j$1lyt +zKe7wxLF7Z|%vq_JB)j~=HF`0U^4B`SRSOk10t1eV=82H~%7g*w93{^bJG*?Z_E}q5D#*0CX#iz>){Bu|aQg`9&Nm%>H>uQ0f15~Yr-^Wy;{c#3X +z5kAuQWEe!b4Z@l5qFwfpf9C|G*p+_t3`S|+TUfu7Csk~R5IVLUjytgt$(I{|7@#H( +z_=~R<_~iO7%plBB61+aXrmK+`%Wj{PDm>X(YQib8mbxwDp4IJMn%93%l$l>U4|C^F +zxg{G=fl3+8$!5xgWU2Wdi>JKOgaV7Plu_|V6Ud@)yx2xk?SLPPSt6rVrKv)E5T3~9 +zFMPIy`)XGil~CD~6MeU$jC;0Xxv7#?3f|4|1y*9i;u6U!e5whzpH8H=y~=qh8!(nY +zg)ob0r5QCj)sN#V3`#4ydn**m@^;wgJ4BoT +zYRC#auTa<&cQ+TOC2(g)9!={NJ-xg*N2+`)H0O&<)fczgRGEiJ9oFu=0OWxO`zMoM +z!I8ac!e?foC(wL*iS?4Em4%b_1vzzF4Rzh)l_snHxJ|-4ZH`j;!_OrT7Rou|p6Qu( +zmDAYt&;$N>ffVZs5hAW8Kv&|CR_PP9;I~Ur9P{+j(mWN_d#`o<+B_fLGvnOYY;xPo +zD3;^UR5NB@1zd{1&l-%H)P&}TLyYAlqQ;>YM{1(-Zoigk*{RiX!IoGw6-3y`u8t_( +z6W>fLWg{5(6%gc@fxIu1t)pjKUn)P^(!A}y?fwW}D)L67sfiPD1{?&GiyP+x;0FSa +z3{r)3A03}!%Wzr?)*nv|GV1ffPi#`}3;43i+B60q<}@ck`SX7B7JAr%w?qe!Ee2II +z0mP8!Xls;P`-shyxh-;3FSUVJrV{ +z-om|c9)9!Ek$>`*<(p26fFZ@vadgY!O~{jutS65jKADzxe53C)tN!%8pyS6R$N7Ax +z#nz|ISDwzbJ~b6}{OgJkZ@+U**6>E_@YeM3udQK_!U)7*1m-jXj~YQ1jPO4i5qv!& +z{BuN9;n@y@XA(}&B%_{57d(@F^i2NsGsU0J&g_}+@i)j +z3dX!1jrqJD^ZPlLDkciry$(Y`LE9j85b*g1{ssmiw?W$|c{4VvJq9VR=GezB>ZFP8 +zJ)3VrUrljuD%vAX3djE1dxH!O*MWTVx{@b(+CfXx{dJ|o +zg_szAg|qc#&x*}5)SL$D%U_gQ6`w6Q`>mf`Uau)nuK=iwdW~ +zhWk?--1~t73(kYrE%>iXuLn8RZ`+7a0mXBT?AgvZ@dMX}8f)J7()U{2KG#(Hagce; +z`^`{O-TW}iI8o8Exqk6Q*`>;BPnsVtkF#9|Z(FuB{591WwEX5t3+L~*+!%pf=UW>; +zzwgU9;QX|;>FfNzfXn}8;1Y3A=v7xcMKpfs`rqmfWiH*Kb4i2eC9k~XQenChpfh4; +zfN_c(ib5pqoQZ7VBBY5$=3>|IRvo%R!M`yJ1{VWWhP8L`Pm?=h_Ubljn4*Aj%GaTz +zB6~aJ6?xVmInE6iNW(s>$bJ#IL=C|wW-kN{QNYDBh41H83)0Rj3%t+I%-s#XYFJ3s +zSnhdk<#0jHka^JvYU_Q!;fdnbNaAkhIXMKp1dpwMh4+8-RL)O&`woH+U@z@E_M_86 +z-{3`FeBNO=aU`XVCEd}kD=MFYe`l5niruTz*jc4bb%YpxWH3(FA+)SWhAIQK8D@u7 +zt6ZBF;}fmO0;sB#6@B0i;pksnVAJw($Mg8&pSzo77rIUl($sl|js2FF1v6cD`aV$1?vqkY8?%@@%JL}77>pF=bv%tT4B +z6|2;1F4qam+C^VakAM_-GoVNI(4G8Z$7JBuu#(1Z$aeV6Sn+QoH13Q1-6g#T^c;=I +zW-!GKKkJlhM`q)#@4jAV;RKXE4G5#S*CZ{pXlUUy8{dU9{JnS`F$@ObfS0cpHsq6% +zPfPZCKeNAT#OT^AS4=Z*x8i3wSc+&U_V_GqO<$w$Vb&3(T;I%oI0E#S>6Pt +zI1{HSDgfu8_1T@jp5WqL)o?*vlf@b%uK +zBn9#eC?)yDEk_5Mik}$g#;CD~G!D80rn+rBr2~S--M9+!OCuhQtBFxm!HAyZRUt3C +zh-k9s^zM1Y$g)N4WhTgbI|xs{6#Nax_L1a^LO=(w7)0C98x8sl2|+YOt`%RE6x>ac +znDD)0cgfKx4-Er39*De5?9d&n8ES2+fvL`5-ap0P7t{bGv}{A*9_n&#(=+I@840=v +zJaW=45tb?1p?=6EK`svC8f4~;V%sC-7O)~BU-LO0M5xmA9vBK2BP?$fe^wpPqp*xb +zD{ygU36+X6Xv)q&H02 +z@+2@<=+9tf_REjPKFdRaiNL`R+y*%Kc@R`;F}+~F+W7Lx1AiIZ>t!#Sme=1ug_X)( +zTTWrLe$4vhKRh`7ZBGop9t$cnVHi_EM?+7>8Sd1z0y|E-?EI}q5ybq~qX0Rc-zybC +z5CB|6Q+PW~(q{M_m|S;I&(1wVE-2{x4)jT$jLEav9eViAVPs9hFi%jt#g?lk{FyAb +z@=n+pj0ECOqMDAb!pHXZqEA9)@zd`S{!9r>1Dl>GIHNan2cl4BBp68t?>OBp4~sKI +z{F*_4BO#({Rz!%BAqB<4LVg36@gcY4#h`7z!%WKW!HVuUVTT&CCueGCUbBrL#!n{| +zAfnv))UOXmoe$2nW+iBdZ#*O2?h=r&^whdQM3Sk;i~4=$&MI#JWgf$l6H)W}dtbeX +zhJPzFH*nY4B|91gARGB7Ub+{I;D@zORE;M6J~ev!@G2PXh6bGiwM!jAV_q^Zrh+`| +z1dcJmx~m +z{QR}C`tN6fpbxn{AY6I&J$f6!CBWt-y;x5&N2Ld8!i^Q~LW8cI`5o+uxe}T4W-=5% +z(5?``D3pUq(t1XWqZ5+l6kaPjhxeKO`WAEn{Lx~*0ln{6u=*Nw+4xkqM1`MkVvNDZ +zQX+KnzCuWOrM~Ds<<0)r559owTbIySaJ&2Y1X`9{7j6!HZ&OV7l9r#f6AZGRQ+of(H1d^ +zIJsUu<@9a;a7!o92|}3_AtXUtRw_8M;u>iJV@ede=Fbb<`)0w6vGw7_<52KW)P{Pm +zL2t-#WZdT7Z+NNG+l!0Q{2oJjD+F%XUz<)pm)+ld@-F@PP3(N=CtAdx8B5?HV8&9! +z8)o>SG2yQf0S8$EH>3oYw4!9Kq7Kpp`K0}krNFqy_mw2+U4ySRhQyD6tWZhsvIWlr +zZ%8KLvT5j-)o3J_#LpmqFujEjBmJ!j`Hw{l2>E@E^nN~cDm40vlwbuXTBtEp89_d+ +zMLNx>*t!F4aO)y}+qyk4l +z#NWx&Q4{}NX0f9-OGDI=IKf_;9U}IIduV7J#i`GKk``95N}1W_#Kvp; +znSiJlO7YcyNK78!9|1@V4j<15rkRD}G-6+r#*T!AENjKzYQ$ZWil5R9McGmWC!*6l +zB5Dy)$sXZy(!?fC;)m6!`x7xRTbv0d<^)dvw@bwe+*j&H)sDLF9?NMAY+49Jvl3El +zZ@sfgt~Cp-XA(@Msmr~TER~oN8^0TfB-gdL3pPn2VM&07bBiPXg%vX#2Z&*kU#tax +zGL2TZxwSGu`z6ZfZ9^-uiM>E0e4r(mlqTn_#&#lutJf&0oD{NjXepaCJMO1y8gxYT +z5^&qpLlcHHrY?`CnyjW`khdZei93#pLZSlyGkJMK_fxS;jId3)X&WMhNKCQG9FazrFbO(~fr2OsxIgX)DsXBp +zWuh_j!#D&R?`JrlwPYJl7Db-b3dR!=$+(!`{Dp^F%nlzdqq!gxf#>h{@vIBld_m=@ +z&lv=@#X$8*8i$s4&6BQRmwZzvfaJkQ^Ym5J&O!G^{Cn(JKh95YlD1bXckD-ot699$ +zY77RMQ-^0Zlx2OVrF>Y;iY;RlBcqHx)9O`0&s0MGqjU@k8D(bYbFvGSlM0%mjn;z8 +zwX%b^0lAG%#4A)p;v{q46W3)I|HM2x3zfUmGyLsZ@ZU|r!;>&f0QdpH&wY|MQJ%Qm +zvo8%n3kPZ~(FV8|P^XNUcQQJ8o;DlsH?I=D13x}czcm>ZgX?e6{q-gP>jpF^K +zL=H2za$WqQDkuk+W5>BOGJYq@Ebf!%UCdgJyiKu7Sn;8-Oj$skF`oQ}KpRy-Y+sE( +zW1nm`5OCH$;w)WYCy2Rs61ssEfG$M=Co{H0Y9}Gh*drGsT_~@7&&BkfiTk~nZ9LuwDj5R(_`5yP$mM{zjf|^Zi50RlQAgPg%hZz_o0Zs`o7ZA`#JPUXopL +zk3S(?7Jb`~A?Rg)>k^wG)`}}Zz;)Q7DFxa#J2N7T7my0as&IhfjNK5 +zU3qKbI(>XkE7HjPHEXi9;w{nG=BVolp>hPe#es)=6O>zpg9%e~5 +zgy=OKo(LpjYeID+yTg)v$}48=@7v8+94Nno3ab#3^E1K1%oB^6)f(b#Qz9JLU>k;H +ze6+@3wk)?Oi$T;v5fqO>OIil*emxP(NvJaq{gfE$tRh&*=8x8TeCCTRgsb!Nz| +zwgh4(*lp3;-8$#(yw!Pe+Vgsq+f~7CyDc|QpRQ|y-Dg*NJoSU1T$Bwq2u2sS4sc$- +z!uMDd?nb-;90Hy0xdGDT7ue3ev9rV_ZVS7sp)V;CYy6{6E7lGiAal%f_;k;4 +zWz?}V-?7`#u_JP*HEXEr;m}a4!&676XX;O0_&AQXI*kiDj!rvF4Ly0?`ou@n`Rf(u +zx{#*}`A?TxpRPeL&?>&)h|;`XjtS)1>sA79bf +z7kl~np!#l=cL({zCTeJMnu1?>y+RQ#F>+J3qF}yP&(Uyc)btErp9XD +z-Udlo1=Yg!0?l_N*?i(d)J+?cYxYIfBnunZJY7>#p7!+trKs=TSQ7o)5L5G-IBT*{ +zb?g&fk^;4eQMf#EZS(E5xE)3+9oHdLp|-l)Mi8%=p9h4D8f)-hvbIMO6`Hg-x#owM%A+u(t-j+i$1u_F5pT|4(y{qxS@-;3F +z3s_7*#VZ!n3BiHkZ3j+6| +zL?J>UALgJttd7Y#4#Fs0Q%C}LyU|E}7I_XyJ5ibiKB3(qv_(|vqo$E_o?XnCw#R^8 +zvX5NSpUD)KqY5OjDKH8V^h$c+p_zv8Ib^#Sw(JaKADUm-j3|WlAn!#2$jPr#@e}s6 +zJQ9mgTHy-&ib9o!+RqbYzL%LwMW +zX;5%s>4(gVhrYP`K(6-9RhddI-yvNqK8+i@p=WLmK!8`c;?{1-jS(7X&f@LH9`3F+ +zpwRX=nZ1e~0J5=ZHZed}Q9rjgN9jtT@WKJU)uq8?K+JJ5?818(zD8X+9 +z*|l?o9C=1Q7QE8fw%uJrwAuRb>!|I(_pE@d^-8cZw(0RD+?VXHa!B>aoX57vF6LLD +zlu>B64HCN+6kQ!Thg|@2KB8TDf2q%%KcuLa|R-skFw +z5+~S@o5|N9O2B$UV+;X#>9eSMd~JwKXWyHh`Ka6SYMA&dP|=pYq*7=%(UpCyplt=F +zeK&WKDg3p>^`LJ+^(69Eblu74J}}j9L{y6+Ki>+^8?3(mZ1P%N&0G2Tn_8QLq31); +zeLp{{T-JNJ^`JiA0p~-En0j6rUDx!}XHIK4dRYPNI;j;fI!& +z%x&SG&X4i|#0!QqJ5L?Ed&wYH?$+do^79>SHC2HmamN7ye!WyIdM@5y=Jm6lqYYoZ +zDWNv)ZpM?CJ#QSoWcU-NY(Iro=s39-wdv0BDTicjPvO*+_2%^$wb{MC-ZiImL{D!Y +z6_|eP+UV^#;Q6U~e+3&%h$(`E+0=B53nKEqZcVy=`kXg^$UQjb^~%-i+Pvi|ADzyk +z72C&a*)M*&so#CIE92y`hX;Krjjn__D?gaDa8w5JB}PEw6Q|)w>idUvTk{v)wkj5Ja~+-rJ+WyTD$wLr8XsQm8!uSZHsQ{!55~&&iK%>N8}VKLJ5p9|_DhxI +z$?cwi)c4OEWH&T^Ji3=b5B%(J2dx)rSpRu0dROK4^bn)=r1s;;a{}Z~8>2{===qaJ +zL{dNzdz2z5j8N@t#6b}8U{8pCR`9>Q17qe3G67@G2i_SZjzFCg*A$_RIEmJ +zShbti)>Q(9L6My!Lgrx}%6vGqLJmIxXr{1 +z+D6BeVKI2O=c}F>y}p6kZu&@^$OJy0LoPR=_?z7PHAbLKxcHjTL`p>XCtO_6r%!?5 +zl_RwK4I!3gE?+rr)+01oO)niAN)0DIPbyx+E%L%ua)D+%e|Ef`X_DT;t?~(%r)8ld +z+AeqL#wU*m4Dhh2jf5tqOS6{SLMTbwEkQszNg)pJFc$CZ7M#~hRSmr*ZIi5B5>l!X +z^uo+Vtt?*84F(s5UYAe5hdxp%Bg$c*&8J( +zvU3T9c1-BH!dX%23yE*E +z`ATF`o$W~9@mbL9EIni%v?uGJQq~!n9HpM@J)zkSN_;6Jrl$-cFfK +zjx+U2z*N=iEgp<4kGQ)Wh5|l$crr~E&g?JCCo{Zw+Z^|&i!LT;B5XnY0U|O6q`ipk%SkGn%y?ku_1(@nO2UZ}EI5Q4aETb`-dlNYiU?|Lw5!VLyx +zg;#}%Uvx*|*|~aSZof(kZsgqg`jJ?$ejBcaHJZ3}uPjeU?Jk6ud_^-2!zna4oA3)6 +zfL_9$R&$RS4?Dd(@FK?h1Sf4Bi90$F+o*ceT+Q7z!ROp7mqbFU`$mlES;m2 +zLH<&^C9J5f+eKcm$q|2p5tP2y^@GG*5v9Bf+a>thDmWyoOxPsryGnVDU9RZ-UBj?4 +zKCHZmey*?D42dONARbru{>2~AQC$5&Nh5KcXAU|MGi{%o_qt~)AE*S(j +z={)Rog6o!4_Q56kC)q4n3+DqHH)OUet_$e7BOT~-vXqSqZ1xEB1m=d7i`_{!-<25w +z7c3l3v>)aQPLA65cRQ8ucd{0+A3gK1{G*eNfTMk$z17si!V?dj9qg}8J#4ztP}2SI +zdLrD*+rDS?;qdo{Y_8+=yoRxxAmHWwcmz6kMR@VM#BmTvn}s+hib~FEOlWCLnrckh +zY^2LIrOP#?o(E>h(Oj0-T+z~8HP!rJvzaZ| +zQmfZe@6gf^(bAaL(%jP0I@R)cvxO_y+M(Cl<Gl +zn?u`=h_?%TxLVM#3USU1BMyk7_r_npdaa#Q|dlt1hS} +zW|k;-8IUI`eV6S@kv4jXBSTLC$ys@!%dIJh{dYm#`weYH&vzhZuzc``jn=Sz`+*|L +zK6TDeAXE_p;@im)CHv|KDt}=M?60b;9mH_djZl$|0f~EFK7~TX4OeEVLKFBA(7sNX +z?}@?As=5>jccE^z|1F(c?34_`Bm4MzC{O;H|D1CtCL^vU>Y9 +zS56se_@MUV1j+EQu*7q&yvOQa+JlM{bma4XVKLZFKJ;w+aBA404qd;?0iaso+{94k +zh|TMDKuqcaJ!veT#|PK7YTfwjkTLLVm5>k^IGCFWA}uw(eS98lScO@ETycc7^SY(p +zV)!rD8~Vsu`W!X=_Dtka*TF?(Ui(1{JQ}jv&G}`Fv_s1EY9@%M{`zab8Mkxh@pCjq +zZhzmYTl01YjD&EYD%?vJ=+dV1=hJAixJHp;+n&?NJtf?Q`<)WfxZdB|Pl@9X$UO&6 +z?>osmaO7Kuz%E$P{8<)MP@QX|bh?P&pYc#myTkj@#V#>@ZR6?t#t;XVRP{D;HqP=c +zo0j5$kRanyuiaOV#gl> +zxXzVO#1y}moq?#yci89v8Qm0>UX~$94#g%{<3wR{KGmt7#t_({c9^pQ5#dMQvnQs+ +z%;eXS=;ZtN`*xEWFuxKM3`aHVOqy`BhP_iH49>)#yA%EWu)UM{S|SSdXbd$gc0Z{ +z=00d5Q=}ok7;(`GhB{uv9|}VJDvOPGU>70X^_)k3G^Xfi@2t>4g7kJ=6+Qu*-Jz+W +z+A~Iaxx>uT^_oJNt+=KrzttYZiG?$5iny3v(_A>8DBn&pi+Y$D!$urwH+aY96MP8* +z?kbIhG|+&u)U}LxCQg5v1T|41i?JrS0+)VAa9{nS>hj04@w@8s56{MLPpRbt;7(+E +zPIt9wB>6#bsm1Fh`_<-&Cl5k7gReh@ueQtxRpVQXp`rgAK8yho+y(}im;5_|>%7qn +zR9!;+qgDr<*Az+Mbs={2rUiDBe~sIx!Y0oq&Oi^_whKvWK!0t{{4&vQ6Mw*tJskN~ +z&>P>jcCX`)=``uYKTW5x!k0l(VYabJ?&n-(oWauYzM7P@;dq$^{nOl~pZSONl)GySO}qdy +z;^XhDFVVVBG*`ijsW6FUrj|<=^TJ=Mm_bW)o-AD*#O^qZ{9X>+nOKP%!tXzQSmJC+ +zLG|U1aV1aiTk#+lmhHzgtlE!9EBIiGv10t(?==p8d0J_;OsX(@DoUR$*|c%T$3uU> +z{GRxFt>#Y79{SF9MngRr4lgp$WVPp(%OR$DFVsL?f-oNCR4^J2YxKFaBt|szc7Y$m +z(6pM-M5^A_tnhFQ9N0i4IBdO>9&YTuP(;$lY`sT^;RVhc1|;V>z8~3^CW>Q3zhn#i +z9!x;`FP#6Hn32z?9!BT`vb31Y*16-#gdM>Tg0#2f7nDx{a|gq3*%xLP%*M65#pqv4 +zPRYY;nS_^?z)$}((@UcBO{Z9oL-H=Pe{|x(J5CkF-a=sSm>vOxz1aK~F|TJ$4(pA7 +z#I2^LI{tle6#?1!gsVzb;K`ig6F~lrQ(lw*EPaYkUOF|b7WzwqgoAuEl)~(&+V%ZXS>)G(c_4zCwd-fe*GSO2YMp)u +zHo=hPyte#z%gI)7oHCCKX;kLtJ3j(FwUWo1QjTEH=d0vLJgZEqwTAHGkD@1tq(ycEe?D|l!=8m$si%8ee8(3euK#&?dZCJMuYB4sI%oG5c% +zoYZ^@FnPcMlLrj-hex!Yb~4HgkDQ^uFiVL7E^x|8vKkbNGSV+l0)H7@)+{-G8=oMO +z6IHbqIcE}u*!`h(ZgfNZX +zRM~|PgdKhyAJ>75oU)CiNYM}J&>zqcBO_2tps#BMhRg^(m*O+gNRVSj0h0-LRHy+V +z&P+R72m!eyO5?f1F84-a%Spn0c%S&>V@TrqMAlWAG`mGw9}NNIUtIuY2T`Dq&Uk|- +z1T!;+rI}Z?m~R&nhQcC$+NKw;GKX!luu_?ZYw45o@v?nc`sF#)N!;U{023nO-)9bP +z3#1Dwkr8$Y-sKq+3)zNi8I<|VvVM9L0&-p>>n-5funfx@R!*xSmNL +zkQ-^>kv7$sGc{Q(WmCASntOMhdaIn&hzeEbC-jRJQf%^RWvN3pagjOUd0H&3gn}|A +z>xB7Tv#=nH8c}PlD2Y)d9hTqUlpkyd{>llm$W0d|P>giLzp7>=mlIVslGR>?IffGs +z4diGf}%;z-3YQT)@ZH89Snep-^T1;kX$w~f-DrF>9 +zyJFJ)Hbq9TQY}xmkBPR*c|Kk$`|7UcID~Fbr>aFgYYH)uf!2K#|7r`qhYGkkdGBUI +zLdQ}WS2kGaRh;Il%ADrn&W#7=Qq@HRmCK8j2Tkw2Evv9esNSgsk(*4@D@wXR6h6j$ +zKwi4nHd*#owNmVCao9Y2IlkD^rl^`0^U^C?A~9F;KvW7czGpQlX_3-zAM{CukEbHR +zEQ@zSM>G>sG`0yjl^gf<7b=q1?k+W#X(W`0TdXGVk-Ml(?cDLdTZ&1ro}o20^zhw>UtGbx4_&w +z8E9~FSx@HeMO^7^~>cAbEcHsimXUgcH~l_R(zwSR>Q%8V!MI* +zNUz4@t0_h0x3(Cty<7%~$9XQrajmHCwdeHH?#s#D9ZqOS)8g>QRdJ9)Rh2*>(k(+><5e+aN-53`Z^AzxccvU@Du6S=(%?m5CM73ygiYm{xC|MBX +z^CB&_wX#w1kAjeoP9uV(y#sr7z!yiLYYd6N-$Mq-US9H;L-Pl@IBm}a>81>fH_2X2QyRvAncgUh#>s~CVfpN>!5C;qy95HQsXL4KzvfT6TiA)EJ +zO-IRE1S2+*(dxxNwL4DRbhw6h=rTGOVWhX^buSh=5c<_J^ErK1U5L;AUhd +zXQIUS(+|HKCFp4lpKm7K-3?uT&vkhc{jAk}5=Z>bwR#uAyY0?-m6t#+JNo_gqsvyl +z2gvK*9P&NN^1;gY*hl(1Mf$U{d%k7+s$qScR=NP;#+`;9Uwz;6Rs48IuW!pD7q_vf +zZAqL;$#wO)-#Fzi<|anNBizLGF79UZn5ze|KS$lwgWF#)_OzyD6;`hRV<@c(irf>Rg|G#D@YS4{U`*yX>1UGCpM{#(0Y +zwC|mn`0km`7}@Z+OT+cTIl!)A7msLokP&y0C?ui}iiP@}CWFNf&=W;bD^|};2g@;s=X63c#(?~IpJ6f7$!9GHAd_>3iXIn+Sh5dvt`z?aw7DhhN(z_Z6_lz +zhe-B;8J@mAth*omZ1tTjVCI0LkS^pJF)SS;qp0zKFcu^s)@9HQ5j_o=7^+!cG&G*! +zliMX~0Tz$&Iodl~Hh!o>KKHU=C!2x=otb-x5Q#@v0nuHygikGP~)j>1gIv_(44(ndxW_8aPKVN&H$JL +zRARN$`3HK2Du3>iO`VDnh_A6J1eBt{&E*5h;G09Xi1Nmut9#UHF#LyvA(Lj@>F6w| +zFnc&A_W((l%+~w8Pxw3`$N%~&L+({H3*ra=f28%L4ej_wu~ybQsg{4n+7 +zB?jU=YK-*rAg-lPG{EcCJs&*>PD`E{tHJMsMUaN@s8J!o2^Q?y*6h+ZUM1gU7nIYv +z_x-1dFk$Htn|=5q7Jy?u< +z+tGH(=^4_G-L>f)9D-d1daQc(i$4|fwi=*72$pp^9iK0Z*AT|$yq6Tl?AfzhBMDZ& +z+jgJTt{vx8O*P&@#XB}S6dRF)I~sS3kzrT0$k-U|4V=w!n6zXSH$(v}j?345_Dp*h>DI(`AD3&*(`xe@t@IjcB?HU^T$ +z?3O%<9&l@)kwncHzq3j>A*NmhUKS-;yyjBTSgvrwh{?esKByo-Rf1GVNc>V`%>7P* +zQz{^9V|tu?T#?+Ov{aKOJw_Hc1CP5T4I67_@4%odbG1o(0hFRG3IRzTKnh7)$J#NU +zzz+fS;kp45j0lE0#6eY#Vo0q@A3#Z1h={xx*?t8qm{MjaT*XE9=>FKfxBwTHZbuq% +zu~5WTm6r{$Yxws(%zpv9Xqg?yB4ifWP&M@X)&#mVfm*12Yc_~)Z_#2Ylb9{LW|Oi7%VhVqbP); +z29zplY!pQWMX5?BAqkKIN$8;_^ePEW11cR276^iXiZoGUqlo@QQ2{}@ul~-tvv+r9 +z=boL}o!Oc54`hZ(hMD(yKi}su6Upfa8MCsl;hzb2l^ccBf&UPE_`W{jz>!*i9}WK_ +z`1nVdsM@yp028^xL=N{>^Z8;I_VyQzL42b6@@Tg!(Jqzc>aFs9=TITj>_jV`-f)n7RIH@-)fPsvL_%CazGOO2*<(ERgWZCxgnqA|~&4&U`pIaVlyY{WIr4Aev`~2y% +zvj?u81hF;X5=87YAV@_$sT|q}+A$4M%i^SI5X)u3@MaP0@Eq;lYvtb#KtZQ}j=5z; +zIW*X&Q-5y=%K5c2oY360E;SfRrM^L{VZkfuUsCd|UaPMb-|gP75ULL?d#7)CW$@xldbyIvdUWtzQ1I<<@x^5`kk53p +zOBluxU+b4rsDQt&RL*(m#*&t&vN3bj0V~Qhc-J`WN=S9vnHk$+vd9_}+Sc|<`S@U) +zh@lI+zE?Jtd;367g<`mpVQUu4{ceUtJ!iEbj)n!$)5FJBd?dq +zKppCiI(yP|9K66z__jA115sJ?9$(O&v^58c=Z~xFB8x|?K3_-;sg>QUsHwj9%We*9 +zpb59659maoUf*sVEc#l|jg_tb{7rDD{l}&wcB^ameh!`PrK}i_u*^VT;&j|_-}je_ +z5F}CJnoawE(D~7yt|~@#9C^7CL_i)DMIY9uNbEzW`9w{SJPa-$I!H-epuh~G;Z*Wb +zAz7(8nm!dR2ahpfk>A5&+PKucW)MJ}sXyrUpO@>zVtvrsY1(fzyqG0D>KZj}Pu&fR +z(4D4j5)dv=(bllYYG$70w! +zafm+*g98o$tyNSkaLj++0M;4IGA4u^nR>rn#vzf;n +zFZ`WDD9s^h!69@~Krm^J-OWpuhJi=0G#Z~6%!^LMvX&Q#iBrjvX_~h>4Q>VofXn%j +zKeAK-27DOJf$B%8H^<%q=pHJnbc*(7BsrWH{id1L1?TXZQs9^jDv{H@x{!=?Pn0(Y +z{W2rma)=AHPe+)6fyzBT;!iZW+gySY!IE!(7S&JS3}%up=rrsU`AS3oC@cgDm9#AIS?7X_gC_FS?6-rW2nK(-e)0z#LYj{a@Mw +zDh%r`wwMOa8gjQGXz3%lV?IzP-;$?M%x5i;&G3{Rqu}Sc(KLL?^Tk+rVkwwf#De92 +zogsYlD=u@)dtsj)GM$n)mOrdtN_Ge@W`XjXVwbQ4kY=jEhctN&j{jm=ER1-uIsfTY +zx%5~t09+oqb5(%)ky^yb){5e$93QuWL_XOrIQgHY$_*suqp9JKT%aa++D88f)#7z?XAy>p(^aPFY*zW?1fbvTqrrx!nJ8)Ajea}s&i4K +za7`x$`BWHskWY3f3pypZF_!(DSfT|W7z`IzpMFYm#hAbztzs0N3Ny2>tZFV#)T|tL +zi=Pmm*x_m8QP1P*otxT>!|7$^)$&+_3njiEuVCC4du>7=rsD8^whPr +zGZg^~l(vN2ZR4CBAHq!@M)jaeJgRGl)rI%Xga$2nT^7}_+46?g;6HBSl2kzCAzsBx +zQ9;2XUk8Fb3$gr +zjvZ{el*GzFvd3Ce3O)oY1!tu>C>t+Asp$F(r)whs5GEi?5^fpHG?KA(#`cXfjxENl +z#;Z^u$0(Tts+g5CXDp3%nIn@~CeD_Z&x&RQlE +zXoZ%Xs_r2336|l!fM!~7d3IAql?5$-Ec(aK)>y-~jy$@3CuxtQ`Q>VGdxvYK)^w{9 +zvW;Zc&LY&Ba^<{RtAqS+AJuHP=ie^xzTMT=VgN#GH)ygZadwve6#4R +zv?P@putSAya;!)hDhc7CyukGJVn=4RvK$SF_=THCNu9fav%%TO9yIJ=Qk+dd$5vzE +z0jpY#6@nHed|hARR{v-l?aHItp_^)hv#Ja15Rn>H%FLXg9JF*6=}M^`G!t_r1AsMy +z{!^9y+l7I_a(+AUSB71>>ng7V6?6-KfS*lv7YujNjKyKA6WuYC?s3yAWbMG@mCFw* +zK?!m_NSrv6a+$p%exlbM9w;uD6i2mRnVj_Vol-pJiTG+4bkzF1p3C3M6VN5EN}q7m +z3l*q~1m*>x=o^;PyOk-^KGAEY=W}=Byo)KkEBQimLEqNK-baBK?r8Vjb?$o_ctI@p +zNF?eTbG}gS`KZPC(W@1?HwC?;1sDFsG*ms`I8&9^yZ>MsO{UlENiR*+bHL5Wa_zZa +zwP#n2&;3#voZ3AIx;zL=8JxR1DEnye6JU0&8<6`nxN!Acpu`KUcmCD0{|!Ay`v1$= +zCH%}tWb_EJaD>z`LU}O~^J|2S4qU2f7*W%3zIInwtH6YqG~NU1lkDh2Lw23#G3QEL5PEw)ih9fYzc>JLETw2Bra +zT+Et48FgHJ>N{oCvPy4m7 +z)AjR~*_WB;Vit$fuK(_RuJvkVd0YOgE%V>H1NU8@9w^?oY5BRz@87Qb-t?qVrBA$E +zx;AxZMH=t=Yrki&4zEVa2@~@o0R78=T?2PQQ9sVBVClG4MLM3=F^)s+TCg6VrG{7s +zgWhezVi9f%Zp!cp$tHI5Wp8V@bG~==R?)#ttB(NlCY&W4rAe);NTE95Y`~!?Gp`3; +zs`~~#e>mY0a=r{;g3^gt#IQER4EUPCY3q`u734hL$X{JPJ7g8i*aP>H>ZW&YC51o~ +zcX?hEGep|MN2z-7PVF9Wx2kq2Am-4+(749u6`szLla<1PAn;9w(mq +zR;K<)U}yI5aVqH>kaDeZus2fC6f)$(N2P#s_T-$_Z&xuXeg`#hPxHpVRbn4iITMVI +z%V_KJaq>ooqMS}}k>3RtTm6rwKO8DG$`%~D{NZ@iho=Iai0Tu!s`2%$$Ll7us#!LP9_xdDy&pnfmPNR3vO371!>l684nPZr8A*XOw +z#jbwz;m-(^h#lyC9VFLF%Be3OH1_^dJu-j@9qbBN-`l?&A-tOd6o~x4G +zjp6T&FH8N&x0L=n1Q8zmU-Gw}P(M6PP8s)i9P=!jCU=$!hM-P3D +zxQIb)C@E=soAR01b}(+Hdf_(GH%L>m<<)P}LdSN|_L#@{GOL=4cOKumP914`0ny^! +zo<4(#d~PZA+rq6KR1`28 +zZ|JVO96Z%BzRbK?@oU|bahTRvFq+s2u@3S+V*T2;e4B1*+;f~ +ztGV7xxGy!xKgmP$>x28GRSQx<$=jom-}PGhqifRw0y^I9y)-YkGRROmk#4Y8 +zKvVX=;%B}~dldhwb&Xz80M2#Y^qEOw)18j+ie3xEpd0Dw^+U6D#T$_O73Z&g`Db=F +zVfoR)5mCK;KcP^6|1+=Q?uRuJMp+r2k7*8229*2vV^uL&}7s^^8YQoID7LT$S0c!cOT^?R<7 +zzw>j7f_(Qz%SGY!rqvE|kl$X*GlTDrG^~kB{a6 +zV|5eQp9On564nxVaoFr~lMMI_H(;zWe2yC+Ay9(20mnh|KUm&yT*RyoaoC00iSdip +zkGTPh6uQJ7oQkaZTanm}GyNmF7=&*WdgE0CMwl_$)8YRRq7@ASzQQ66rh`qn;B(0#_%un9^H(V&Fcp>S}R@SO^epDgB}J6+|*=4ucv5sQN^B`uKitWIVkD +z?U4{_elS|hh$1E#&W;{wj6OXTEmEby4PdhHK=Y;uD{i!H4)oros0j?M#uUOvL5SaC +z_pEwC!M+i8223|0%@ZcS&VcF5rUt_O@N8sWS(ejElU4!N!F9IdXeO +znRUKNE2fDZE{WnuZ#6fvnE>hpsPGn)EKxEWsj!!BR0m>oO|$WI4b(6 +zArmeNm~#u-j9_0!Ty}sbD7mKtL`VeA?BMkCernoUK`2s@z%8%6FtAiese*hZBppl8++uuNvj0IMyFF1-JM{+IL>r1G@GS +zUwOo-0@YT%cJ<&@o#*_%AN+(#q1gRKpeeI%vGgn8N8yn_ow~R;plqY8U!-e8Z(c

    XyA3`nMPZaIWp=-FIaJO_X0ODsDT5}nWSIejq-T^Bcby=c6! +zPQVenG&0p%PP#N1S@4TBg=X|i{+Mhl+qg&bb{#jes!83q{6kXZ=+O}O^ +z&ub^O{qgFSj=s|!&!juXm(R67Z@*3M=p^5M`Jm(7a!1dyhlzo74*J3_RTZCX7wbW( +z%O_8@T?CZ}pKi8w9gv)o`sm8A6-iU1aa0jv!nN_FDEyf_O1o2rDkJCI2}|yz+!85U +zxhj6_r0nk0ZATW`-X&k{)Ne9)I8L`*Ekh6V8<5N8}zH{dnTI$Aew(?;o+cf70rK$;soMn_awZ +zk9m7^U7~dP7IgWycLh##U0LZ0*6s$vz2P3+k(6#?K{u(rn*#8dm2RrGm~JJG^AIyB +zVs?QzF{)jhJRwe95p%SGSZ`03M^6s$PoSVDuf3;WqNix3r$qZjOo5PHSuwUB&cYdzZ0XHZTogD?70%T}9TSY4T!reQXN9i<(4ja?giDo6= +z*Pg%fCK!Bc2pzXBIm<+$OL;4mrZtP33lU=($p;`|!01F5@0$TqSD!<{^!~wv=JyB0 +ziH;!5?F(0l!34;VuM3nWdBL>ph#;)S=5Qp13 +z*KMT!Pn8^y%o^;KepQQIn4hz{iZ>2h>7RK0d9;LcfiWvwQ0T#IUXieMNIq*Eb=#K} +z+Tfd~sWkJ0$2klwlyWWQ^X3T&eB(igC)-fR +zP?$Z4*1fD!>w!RevgJJGKnZR(Uv1Q}Yj9MEP4nm-PgESNj~1WBpsERQR>=u8r1)N` +zWL0wxLDDJWiE)`Wmtk&a{bB;Go+)R^270or^DRcHKfJuc7bnUvQ}3+H9aBr +zaU19-M(?lx1h-nZ=}qKn*#RX_!-Ed&5-E2NP2iRXsyMWUBQLSvs$73F>V!3HR#LHU +zj7{s>`UygYmu<2YDjc0dXQ}ssWDd|8ea;?bbu3cf-sgy-T8^{jo2})qRIQpJ-E?W2 +zM>i{I;S*fpo-4e;sZCZ7M^I{G{!6!h_i|D1;%_M%%(36-Kd^ +zR|j+&;dm0I|{Z9I)zed +z-oM5LWrGq0Cu~+OMTNnqVYd3WZ{hD|WCS?X`H#)W6tEc?2TpbVbz%IQbP-Zv&i@)l|0HpfJ^m-n!M#Rq +ze0x{DU&u5|1}vwgchL&(wVZQr@Wg5FcPv>yv!6-)fSZ@0;wcPq|$1 +zR`oNsYtQhPG%sy$g;zvz;HcC(u_$%Vv3RJ)oV8rFBniW#Da(i(r26NQPZF1UJr!>7 +zu2W^U<}U$Bm-#=I(;aPNtB-&fZysP6H7=Hd2ldZ$uZxl`mY4c`aEzkwl=zcYZwJ_u +zUrP{skOx+y9*Nw(Flzj2vQ}e+*op~dRPks>2T{*8gTb1(?hTN!% +z(IgrtXW@x31i~pw;)rWUF;W|xZKOmBjY8JekB%rWaQpDB2JWN4i{&8c?TfqX`FlS8 +zVKF^d8XI$eeY7>e$qXqpfnz$@fkDd?Z7%W1`|GY~IKPpePF=W0mcJ5tiwuWel=@#5x;>f^R?p +zM_W&_V-h!^e$DI}!9C#~j>rK5F`ZP4A!|&0f$OhXJ_ZbqyY{`50g|=j{T#`SNi5-Z +zO#TGXlsLD4R0BDhpjZp-p|cA`#T!tE(d3stMbO&|(eImvu{&>D-1+tQor+9txuBnIkaLsqQytG>lDN0B=%t52=JVDd_K`ner)2AYu1<0mqE`F6 +z6;>ubpItI9zWeEw4nbdT-%mbO=4GK`^f$=MrF#9}ufufWzu$Q=4JnCT9s&33gF-oS +zk4Hi^OBDM@z~E8BHhuW-g-8V+b&WyzPl-Hr4!XxvYFrJ-SAzG5kQ;-e-b^XF1yfqM +zk*bU2ZheZP8==cSc1Lq$tQzsXn)D!^5lKM)a*d&*sB;7)beeL^ho;Gi=ys(6H$iKg +zdjaf)rw6J@8yUc-STVLliak75lLM1CAm1R+%*D)FSh$iP +zqCzeH#7MYwGih#$^nN5l)gUrV5D^29&25VLDWsj0Ky2%!b_9pMUWJkNEs{XKtb;7p +zb|Uj3S1z=KiFZiQcZkVghZVs|S74Mdv&0y)*zP8RV3bT%W8=B8z?DfOfpT2PxB*Yx +z|95=y5=sZQCz9^StBa(==(sr+sRno**3A-`v8SgLTd}0p?qruKx(LQl6(ovWW80@f +zu?7hZ?3lgqr2T4y!Nn8R{7;{z-@uzKpKW_b({AhKDkNPHGWyMS^ +zl3rrTJ;AZ>x#=NpsW^7>Lzukm>Jmv4l|DU{49-o}G*4+BiBd6SsUuhq>@$H#>NMbc +zoFnelRZMZq*f&M)AK@5ja9+5jKUs(?WoH$4#~KlnZT%8@eQC6kxD0rfiD6b0mux!5 +zbcv#rVdK&lBTWXAO1~=sPy2TGw3Aga+ZSULT9~TX_;5tdG9kT^m(k9Z+mcKAwV2{p +zML*+12d-WW-C_`kSn~vOL_&;RFgJ*$;6hZy0SlCpP%BnuKNrSZOxc@D3UsH$6DbEY +zqW$!FW7E9L>bdzN86jedkwK=TV_vg;&d+JeQ4BNGJvr)ct2gm)tJiQLY*~=$5f$OT +zL^|V|^<mqzwuhHBzsu<>ev@^SNm~0{T%A{hb)xy?|&ILV&hQ +z1x9K-m-Qky=bd9%mqt!rZi$IL^Tilg!6+c +z7FDsbUkRj)*cFaZ83!%oeFmXY{xX>H(49XcceTcv8gZ7~bAIu&do;KvErl6lh1M2C +zV`{D~sSFG%MeyW`TcT9QnX=;C{xRmU)~LHI@Fful7g9JDg*@!UV9jKECYJA974yJ; +z#jkjBzO7N2W6X1TB-vJ;xF;smr!oXv#c0Y5Ypo3GE=Mlq9Ww*hflBt3<~3asDE4H& +zS`5`EMJD?*O6@Cy5LK_5uU;IviXv8(MAdlm%ggx%3p12#O;&{yt1_?l5>Kva7`Omd +ziblv22Wu~?6&;$++0@EEO3Ef^vRLB@YZ3gfer4U=HQQ!tPBqm&SV*j#u6h8gj%p$i +zMaVtWynVXaf)eDapeCxjuFO0Vd^&QATTb-P(zmB5?~xP*gQ7$WlJ7|2$`W1o6y+ae +zM7(R3W*$kL81v(2BnkUx1tl6%+As!hkWg-EG( +z86!)h%0}#aSbJ$X2U?SQtgS-6xABvC1d)2v9+kGIlYMFqQNR{zTNE3?%Lauh+@u^D +zK^V@vW`bbOwl+p{Ba>zu8~m$!@dfd{;pI_f&WZfe-i!t_DgEW7Q*MwWAb721SmUW2 +zojk&w(>K{pY(_pQWUAFMsy=0`@bbZM(J8{93?sxJh$4hJFl!8_5xna({1 +z92q0@Wo1~P>Ox>v-{lUOho#1l1Juh``!4@tKIGN*)U^i|uJn`|2dXvoNOuR^j_fTn +z?z!Fl4=2=uxSQ$6Tb>;ic+?m#*eq0yL+&s{ZF> +zpyTb2d}ZWb$@SOZ{oXqFpJK{PCHD^q`#%QyzfA5wXNm+=qmzLHXNP^3ocsUj8#t>v +zFdum7#Y%skUcdC^OUr!&eHgf)(!X}n|Kw{=Z@m0kQ*R>|Z@bFJ#$As!Cm(P8;f4Nn +ze#5%+K-fh+^2yH1Cu&_!w$?qdmwAf4qxW>zA+K%GUXG34hq^rX{(6dAH)OMC=)jpF +zyXYZ@!Xc-QA?FuEhyUwImw!)n{)?NDv8*#=IniUh!m+%Lv4R(4MZd;M){U3#8Lv1q +zUKu?uC>*cu7_WUXe(l$|aNTqJb^l$BIaqwSqxJjj=#79^{T*!!pC$K_R@Wc7-M%zG +z{j}!X!0nFZg}JxA#YgVk{PPq8J&vv_1^*+ZVa^^kPlW91~^Um>` +z%=N9Tn#>DqoSDoI?lPPz2p{&FDkM%;O%+jo%uE$iWsIgv;#B>oOWAtW(`Ct~AEwJW +zc1AN5SuXxFS9zDJXDSOKKg{q;m`49szp2xzJ#QSRODkkg;CzhiwK; +z`zy%-rj!JT?Wkv(;acpmc$GCn7XqDGp20m3hrZX=h*w?n422F)4rHT-$Ma(#bvC$D +z`wMQv^>zmw-o_N~06e)~_PtQos(+a(1)IfT`r6?@PGzmmuDN?_1EIPHK$9SkwMr+5 +z_5Ga)Lh^l!SzN~TjZGBE^#|xWRGgts_{W`hH)LVDuSpwtym_%rYI?S~OT%DUED +ztuc=uNuAyAeOGlYjtU(k4#;*|ge9-O#(~x0>9(yQCQyisZ5C(Y&Ah3kQ_Hkj$_^D7 +ze!SVnxj@o0nr!m>3S^tH*Nt@+=MHm33czdLslxpdYb)u%dko7;rEi@&@e_}J{)24$ +z4N)4;w2-#(I8XTAzI{bT%Y~<~ +zABDdu7xPu#w;_HeH|X}>W$B^dC>eF*(w{5sX85>Y@uwl`9wnB)>#s@S;Wn=o@X`wS +z0cu--XzkjgSZC;KsW7VKA0W3f^^H!BgeO(1_q`Y#V_3h}FzA=S1=-l$3If +z%U!7dKmb-ml~-l7#oqtg7)^tzHe8q$99S!>4_$}-nm+&J{B|q#rme=WGkC_YS8YOCwAtV+ +zo(BusSkiSDI9v)ceur@ihS-mO2~7^ZFVA81@MeIRYuf~vj;5`B7n~+<0BB|~R<7p-BwbbAt3Iomumr^%esfQ^9 +z*W@F~x{oI~t89k2|`b#0Y)-vCO@tT`T)?UD#w(Eq^l<}qV=tFRMQoK~6YUf#Z=d%b|6g2pLxUPm6 +ztmym!q_(RBDwE_14#o5rZ@UlOCB5I)Rzh<_Ud~YzbXpdE;Dkb{Fl#Ig(ltmmciWvR +zkG-%Vuj?608zd762dhtbGR$@v``#<5hG3|S#={lb=SFSZ)(yfq?@t8ThT%}W8P`N@ +zG&rO%V9laXLD6lPX9j@BH*?p!T;kTbi>=)v@E`+RrxZ6BstAAWo-!!W36XSOr(T5F +ztcuHTX;-n#O$Sp{K-9YC|BskXpG3xz2Q0__UJ{vNBmmRFKscSR708SC&~`GA)RNb0 +z^B3>y9u7%7()UKSX7Pc}mHOmEau?JG7P~gS3{E+jd_nv3;)6X!U(%wqy)_ig@0%Cx +z(2B&tb_yZQ$JSISp2tg{i|YiQL8v)R?e9%E=T-hNoo?sJEy?uWKh>BPE4U|TY4uUi +zZ7^oo*~bE4ISdh9OCmBz>yAha|IP<a +zkNjaej6Fag_`*ob;l>7T3Rt-vCv{t=@n%9DeHWTMHhSsDh7+hyen*~~h%EK4h4iH-IcaI8b;+I4IxslC-%Bq(~=qht~8|0yjaf8rf~bq)r`p{Y4-EAKI} +z=5S$fVpj3WBz7R`5g#>Qv}~nL6Grv30R771$~1<>7))6yzg__OoTo{9G`31sdgrM>1{8lOhjM=P&R-!HD&kPBY9KiA)pF+X3D9mdb>H78 +zFt9vupbY=2k9e`~_V)+-q8?pXPZ@99I~ywAzd9$4Tlr}DVm%$-F=ma5ugPEkk?%?S +z*`@&2_gwwudF1Y*bP3H{@@hS>%I?I(9GApQazo)L3K~ycB_N47=#fE)s*rNPT1tNq +zQi}m6awGUCK+Z#Y)kK_7qugS}2AWYjxddG}=?w|uR;79QPaX_T2ADyJn +z==6!d+#DIaDj=U0vuIR$LlafTH_i?nR{#V1jIzz_Bj3Wpiv-bW2IOF~#KUUv8K3A+ +ztVB;13tgg^-OR!X62PFpr$iV4UqMq)(f;<4=h)$8qtvV7s8EB1Z~=?!M%nF?L|7pD +z0`_MCd9#>x*(~NgfjVuLY`;L4H;C<#ivUe8mwWb&N+xjnQjIlRI3)-e5C!!_y&#-Ls3QGLX~I@4oczQR1R+ +z9NsMErf=LY-)OWrZG%RN5+cqR6Jf$hYI=~m#}oQl9R9p1_UKIV@snE4w*oNIelaVzYEeXyE7fud-vd`4-@ayaxRO&njws56d4L5k*z_ +zp9F`bkwY-c=@+DNdeW-&lN$Ji*@lIUJ*?QCC|VC|s5^7#ND*wYs6ik0GAe()L$0r3 +zzC8@yOqKhCZ>Yshto+L9d_F%Tdo*zcn^nLslFnnG5D^L=LQndXX;0_P+LzeDd--4>5B%LvWm@R +zYEpb0E2K0FAo$Wmzq~7~yck5$V{ApgZ^hp3QZN7C@Rzv>%QSR0%evE&@yXMSiNWoT(bhbOE$5XJJAUI(JI*UjLTD-*w<%1fb +z2KwcvG*gT{i+mFb^QUWF5~|MV*SR3730#g`Yv^bHn(?2tZ~4_I#I-AGS1DK!qM7L` +zW=$D!fyG$oJEe6?>6>Bl)xJ5d)m)V^B5$mwpMCv!cco`*4cEO~s;AZg#*v5D+4B|k +z+OBtEu7OvDB@^NXdwYSZMa(=J^*HsWDoNCx_|>W6R%^_L=NW4(BhL9}KdY+6 +zxm7|m;kLeUo07s!+Om(gC0rQKkRPv$Z;fpqL}(f+16^VRD$M>lN9Y$bD7<;e^;&Z^ +z`Dj~q{i&!VB=>}7gCd!FBq=;Y`v0jeO3@))qwxlWsnpO^nj=55NT8-@FL;x7yo(y;{>OhfC3+^Mh3#UmkosQ +z55vKodJrQaeC>$1$>oZXC^$ndP+Bd7=X|Bz6tOfMywfv~o7|J8-BT!cIlJIt%1TiF +z#KSu-!H-lQ_WZa~;oQsb6F1iea6BIVwCl-{>;0wITQCv)rwJ^9iRK7eYAVO6a7D`E%s*PX`%d%W&^ARm|{OW6_f<^NhgiMI` +z;0HWFS>p^TM06nHB32?+m3%~IA$c9R;adQybQiLK(NBthq$h}!g@M))MjtH6~l+SCK +zgI>(;#mJI9Q&X?hs6n@_fku8;P&zpn2SdXhlCnJ!P58X8lV0(cmER<#7>{+((K1U? +zAP9F3s%PRZtfpDZf9Jf~hm&$r%M!5xB-wF*LpAa}3Y32AR2rq?&E+^*CzKv+ZKd_n +zgbW1m0H*KEbyQSF!vhTTtQ|_&z)#BE8d4z@I|#Mp;PRy1J5?{e27P!OqApL|znzFt +z4A%745oX{thV5e6uY$Mk1wFb^|(>S#V)1m~ntw3YsNey{Rw^T9*}cL){VP +zycrJ-;O&yePZz+B=BMkf2GX=Uu-4#IJVVilT|3*+>iUn5U1{=3xa`%87)OQkHuIMzBLu#h`kpXk~L@@@v~E~)j&>7CS#Lcs2F +z_DeFM{jK)0dbgFr&g59y!(o +zAE^r7p7z6)Hgvf~w9LslTlGF164DG%onYp?hC^ra$VBp0rFAY$2pqOhV +zQ#P|pND=5}nMySCC~AC=b5^>vxPIEHcS$3?|C`F7=gi@Omi}DRUB#9!W~@QFL!3)8 +zRa>KFc3EjYBkFxKcj=fm@zfk9zr0!I=*7e=mi@KH+(!Jh#;g;@DbE1`TkP`KV5%t+ +z{}=m=kM=h<-T<(VV8ggX!K|783#{Tsem*_fh*YJ4L)t7S3>+B9Cx<=<{Bg+;n{JoB +z_{V?5^2LJQ-P^Fk|2VE#z5rysO|WZ0%EmTQQ+u&ZDxxdQJC-l!bDA4J|DyYy_L%sU +z+T5l4@^V@d8M<+Gj-_n;n5K>@sXtg{58^zM&Pt<>{T8?M6K>BRFW7)Yqo{+d!{5r( +z&Y&Pz=>cy4x2wfv(spUBL-T0ZRkL3o4v*k;D*7YJ^gE~A6v_QZc7XK5J7xhx@92Zn +zh~gXQX&-K@QbzxeM&Q^Api(lmXBR5}41C0GL#IUxxBmbYYMUgXpML9H;h5=^O)rGw +zbd^U1n2hwpFP_kQ%i7aP`WA4`S>nCfi7C8d^y7sqB;XTftMx5jpN?I;-+dN@7SZ4O +zeO7FGb$0qN9J-5mXWm=#YXMB%KjXK$AbO<(J?<7W8=Xph#VGmmq}$@5<538B`U}t_ +z!mTO-+k^i6-^zn*`gcb0uiM3U7sW?nmS3!gPj&13Z1cIj234{+2-tU@5|GC|qdQUX +zc~=4-1pBfOai2h%6l6&#}&6)D!_3v_R=J +zqbLyxUtq8v46Or36zkLE1>q}v#RV3qV}TB4Q%(@5fn2JrL)>`+QqeaW>p&?c(CA#6 +z12F=4lmM1GJ-h#Mo?yC^7%ZNGp0sD!u_)V)A{+>~(54D@V)(mIuRSf4!r2xD%8x#@YTs&L1L63sSOKiB*Zq0lleYLP2DlxM9LjL#T!m66GuF@XZ{w&-kD-NA(Aoy +z4-A`1SC=iuOJ#84fq<_E8zx0eE_G$S$3}(!f +zH)sAHNhGVM?{P~!ycll7OFk@+>V>Bch%;{p(q9-5WXx&$QL##fS)T6UCXyssAgeqG +z(I&>gqsZnnao-7y8Mio<=A5HytV%YelaryCz!FcT9rgvbs?lz^bVWyw@o0|uDEGHd +z*13eN11tr=41@#xyn}G;R7^n?DRzm|FOFm6#+8a0^=ero>I@`8eoIvBsa(b~419Ae +zH)n}cQbpSB$n;(!v)$8={oUv!|J~@A@$>9?X@1z;CSE$Eh5k&Cd)OhndetY}LPL(} +z7v8ay?cttv&o4RAFh{W^(Mp3GD*j*a&Oh9OU%r{3D6-!Y{iS=pWQkLwL8|MaxM`3S +zdMKr0`dN*l9asjzH@4a*WxGL0X-P!&cW18% +zt;9uk!Nnzdzo6WPOa5dY?YT(aO6170ODzyITvf%>?urAjG63v6buYZ!EpLx3t#`Z{ +zZxZ0e^`ch(zYhWY +z`G*5U375_#&k!!!S2pt_LtCmHjQCSN0w323#VTgnLokMvJER$3_meYPO{gE|CyivR +zHW!%~!L|vK>H2vU2xYu`SbttQUo-x(AYFRtTGx1`O;6ndx7tt;v(~-zon|bVMK|&* +zs$Jw?_JMwEB|gwBE_LPZ5Q1>J8Jm_8?NBWSy3o +ziEQ0ObWB+!yP&T+F?S$gr?$ptaZnwHm>OL?MNC+!aHD;$xLm8GeJ*ptjAtAx*b-fF +zX+Y5Da<6-$qRb`!0I4jtCCLDpyv;DK*){Q$X*sU&E@Hl@Zt8BI=?z=+dwp67ZclH+ +zT&u_7_lnrn=ShqAE@O<}X(Z>+aM0rJFieof26oV49MoYZJ8hym$FG{M&|v4%=)X{^LTc1l +z-L$>no*(c2$Cl>bUKs~_>YB}4HOD5Rw&O!53WbSp^~bn$XfE87;VECd1fMg0da +zCH@RDn{A1?-^x^MQf+P-L< +z2{{XFt7y^`xKZ+cgmbZv74Jly3z1i3VB2Hf`#+BJ1D>@SO4`G}D?S=&msoT}IJb9D +z{aWKX!sTJlgW9`iI)a@$hN*r79-SBSJBCL(re@Ac^#aA7V0}ju&-wItwY->7qt7PZi4u8jEx7}k`Ee@f{j((Ey@rpHE8yF+S93z*6>pIL?Vo0Z57+xN7M*Uw)D(`(F=X1F6gt@#f9%Zt{<24HhNH8oS$4h`65=bBSnqsO`dza9S$EVcLO5w>cfs$C5iua9vye8=LKA#*HQLF0~s!a93t4cPhgzmaDEfQzxkXD&@_Bwt9F9 +z!W^%hl0Md*ja1p%=IJ}zu%NKV*F{MgcfIOIGS9Q^-B)*xQR;j8*r(m(Cd73U9F7Un +zISsa#(kj1)5}NLHDn$jephyTqXGC&A(-!+oltl9(1~Oy_iNTTX2k6fiG1F$y&#Be+ +z4*1CBYpxo~-gmQ2QZF)eB?PeSfv^cm2BGZn=}>bCbA`r9eGQA(Z?VBh6G6AWMXbx% +zEr9!2oKP_P30Hh^PZ@go(zRo6?J+WYq@u+MeIw3m?d>L!^7}8v%etZRWVuSK9SJ8l +zV~=9od|6nHu~Xr;dhafX*m_@;5=}gTjbBfQRp+I%D@88qRkXFXsJ9@8!N(W^-!u?n{vPeI +z)`WZVUV#HFAsW4Xq!HY?16uKKM|6;Z^*r*w`Sp+t} +zjc03kPb+*p(u%(fkgDbA^~{Z5BuWJ|u;<$pLxn+z<`e8wl!*xD8Qi?A`BHaO>IwvM +zwt3mh@pbT9m%*CXTw&D7byuvGaU#hLBsB1VLvTn^+cx%)XHzHsQ7MmsIU(;#3*tyLPjlbZ@9J6wPEFj +zORG!wGWG3!9WCCu)+`qpBDQZlx4oIZC;lUFk9CcS-hv&7b&{Yj%s=nX2dK`i%V}Zv +zT7jVnT(tH-*J?b1oGWPb9rrzs8DJcD#+whtX?!kt2r+qNyhpC++QAjy-I>Z;K1ErI +z3GmIHj@G-Ls;bK-a#1_+4aiJ1Y<(P~%HTu`TDd&Rkf#10%OCyEAT@t(sytq+c}2_5 +zV4?aU5_$Q5BQ^GacX;#5ei +zPMhz}c;Z;~IZ<15+Cu%%Q~dkSu|p!LHJ7gcAr*z(AZ1|oH^{V3;GpJ;gmLTPh0c(z +z3^2buX6q8Ek^ibmqm4W0E_N=h^FliOoj1Pe=C1v!-Q7L0;|Hmyi*q-Bk}AK~d$G@p +z(3HQq)X$e%+siQ6Tu2X|a$ZO7EZX|@mQTmT>LZ4Ig!!{3e@!@Vdt~knf`IYACOs3$&#vQW`>?;Jyw@$7Cu6^r +zQI3LwMdbdLoU2x_H`D&1iyEoBzE}C^g9BV_mi;vRUgdXm(j&2M;5wR9JFGt~q%lSx +z8hn44Xa_V*e@Jh4LLTb=o&c`0mSsMe7Xm~jVGe$m(J7Y=2c6mh1eDH#nHk%DPZ5RG +zcXA-e1Ha#2uZ}NH()(JuKoA&+o_Av;?`g!0FhFWu+0H(g-?44$XGZzY +zN%fZtU3Y$chmJAFPmTWGp(Qt9NyK6i2mB(g4nVqi@NaOM&MXzu4QZbv3UQ!*9_5u* +zc)b=4S`ey@r#t(>dj24rajzw0V~dts0g4RKis;f(3}e&g*;F@a;zM=_X*wYGzRdesq0I=qn*Ch;wxxo>Cf01DR1;g%Pu+@Lz1|IRNW`s2Du;iYxoH +z7&uP~W#b`xW&dX7a0I9w1co!8LJE!onZZdC=D|TKl)wq`X0iEK-RG!fY+9RuU85E9 +z4?H4zJWN&{_8UjCccp^(oV03Atv_X{p(ODFc#WbbINg;|hNn=)bTONLE;Q!w9OWWE +z1nbKIiLXBAhkP}SKWs|u3S|JRAwR3*kMqJtT|h;<)00xV`w;)&XK9g1YF-9u$WOC9!D(@%{+9B|kya4B_dT^nyd*&r2wu +zqatYRN5E;vtU_h5%HQLvO>mmjpP}cL5W%P1LqrI>!)jWSgfeb)O-uAJFREsM37SyR +z@{Y-MrE6zUid&PrOc7rODQ(>l03a=suI-;WbI=5EQ0JSxH0+DJ+BqK` +zC{DXoz=`9>TL@y5#9?bPD6bK;cO3f08v67^+EJIxVPQ&Tb^0A71ZkCdwuZOs53fo3 +z!)qMoG6k+l&p6b3HDTS|{7P9UuaBo#XGN*62|MDJy%!#iu}ZHTPq-aTePYh)D&Xvr +zDB&k#{=JdYIhYZcf%j(7*PRc_o$`f|V +zxH*VMQ)mTgB=~}0Pcp2%x!VJX2Ze%qvl}fnv~O)0I-sm=3pXSDt~t78Weri476hJJ +zH#gyLrtxxSThdy2skKAbRt3X%q6KF~H#O#SV=&o!y;GbPuWmv`wAPR<$>cU^^e9^} +z8yb7`30W~FFI`h1^Dc0xm?!*mt3Z#D5dk$s +zh^*})jY@WYqc*RSmo+N{%cHLKx?Lw2@|g>f4mJ4? +zI7wxPNzQ>0?*sU|=JSfHiy9A=#*W{r&y2YCf}MDnbofa|?Tg$^6VVrmnT1G=gVAXh +z1`6*E3Fe75y#^&PC +zyVhLPkE?%`w?2(O3La1#0uiT+IF74X6-A+zS%qqKqTTMyKynIbKJ`J&-7c>RUH>~k +zzsPyA{K$`F?-$ob(P_!tMAVfuhw;QIjNGVcnGBO*aJbr_RpdM=g7FLX4#x1^86$!k +za!U>F$!&6M`eC<>^}5$+Ql7?h)=xa-w>MkvY0}V(+QCfmAiLJCHeqzGDqFky61@Du +z!o6U`{hFy71K0Z+X0?bPcLD%jgR6S(reTif1AKF}O@zFxj!v;I_Q@!U!B=NRWip8k0a=Nl+z3=UCX>%bt92$uh}4 +z;6Xh`?#t@zSJV7=s~)T?&iO=n5K0Lx6hANuyccpf9=Np~@QF0_*SNVyn%!E9wJ?;4U-hz)v)5Q4;S(WtCjt0`wN=07bpg9;zFOhWWOl8_KZxO5`9G? +zN%tDy-)!t|IV>gZFVuMQI4EKkR^HvBZwg%{^Srb=^uj_uslN>1pu +zGZ$k$p2lZi +zM4_LWv^>c#xggTJu%zA0!Xju_U^pMo^0VjvoKlGi=kf%8HD~=LBLDl}{UpI1>?nEQ +zpeo!M)j!h_@?EhQ-TC@R$NMC+K|rQ}M5VbN_MYwa1z@*SzsPbFBZA@3PZcYfibR_gI>o)zv>`@TzPAl!}JK(RjS?y%v`HfYS0&vlb)$w^Q1C{-<)t1kjQbF6%G1Ah#FsszX$L*J7 +zJHM9eja1ObOzL%}mA+bkoX08Mw`ZYEJh$#O^TAiZ)K(ID#qamQ%huN2j7Z(3i*d@b +znilf}O>15@kM>mW#DnaS#c$SH!Ul~cP2dN9cR}}gKXK;HSj +z4LAyzEmE8{j2Uz^MDq!6v=t8dQ+yqbfOhn)vdA}Yju@!PLrCQtr<9tY +z+a(e$4>1=pauT`sT<#|)9DFAx^7tl?R=6w>w_5)&`a}@uC|dO-#PDmR%Il3r(;K$L +z;w@ASb;T?*GqXnzuUnj1U3b%dgPh#!YnQH|X#|K`gY9}XCM0msS-ao9e&gYGf6HK} +zu@P6U;z*;?v>-$Ooe5;oJU$U-2$T0X@z_406s+e7HBx9~ngm5ABxD~pR1a=-LmBJa +zi=A{f25dAYf>4{WOj9HTrHGd@ZrvZWe(wo#!Hr!g)^OE5dx>H7>{DF9YKCs4gn9kl +zW$Y%`W~38(yUW3I%1UM+jE-yMsb#lxtGn%$^+^@$vpbZZR^;3FciSI-@hK9G+pbfo|vWiaa^{Gnc9Tb15Be^L<*FbnI;9Binr^%Ke3k$!y@u& +zW(zx{NPVDtty*C2^g_y6u2@WNs?x5;IgVsTgKbjtL|TU90|kC}uW9(ID7{^FdW{2J&(%ooy{*>N{h`6&3*U|2&F2xi +zK#Xuo7T=cs-Zw8EClNUe$o$8#baSUlFW-%mq9kqq-xgP +z4aksl?e8u#VN8*O^5t3j=%K%Jz5g>|4scBMSG#%rC#nQ!H+~*}VNU1Ymi3SM7GSRT +zw7*1oL-z@I5FP?e2j+Th7TR_?N2Llq-mUUE^u)8XB0YopZfz*(sc%#Tuf_HqE-M4P +z!(?Md7xnDsI~lSL-;gi-9>wuBPZyr8_UXOs{7kieX +zO3TCKp`cTm{7#@{T|c=RbgEaNd@rvH#9SS`_$+(NW^<>GK-U{!rGkWe`FChh2Zt9G +zZ*lLH0gY^*AB(-U&i902FQ+!f83AL&m&L&jffBE+YYlA24!Cc2b;r~$7R=GOXjqePO +z1n#Ob$sAev(TR-`|5%GV+KV!?es9AH#l1=%YZKJc*CBwgP5P;~O>Eow<6K +z&S08?-aFN8#KuD(|8Z-T%6*HfX;>Y8DRZvwtvW{B`R3?Fqxt*qo%oOcFWN)6oe9t$ +zzK_C2euF8R246Yo9`#uo?yTS0P;a?viH0a2? +z&N!3Ri(F%Rc!}*!YjJa_oG=8-4I^0`>Kq@cCu<@Yx0~*U+J +zNL|9^AJ+KF-tJt-`3m5kl9mUrIzCc|`nJkWR$sm435B7pfu7T{?xp#KQGrT}SNLLe +zL^>~=Bp|6ygaSh~|7b;Md51u;h+n3Zd!`X#p+pOGqyPv9g86uyxbf+d9G@u5N-eb|&Z +zZOX{;VobKs3G;LZSKUAh1agY@19mNQZA`DU|wZe{hhvN9r +zr{PL3c;PSEQDdgz2U;Uj3s`hB#CARrxJUSxyd#i%#11)!P&z*!PV$R66^gpT59cG8 +zza_*nG5fJMdEadK1+DmSmzYUYa{v6bd^UNK9WgAXipFCO;^W?W$EH|MBI+ElV(ffSB)=5YX;!8bk_PhF}hwg%7})g<@8*YxJEt_G_;Mf;ad10E@tnW@n8gt0N&oPYQ|Ia5=eTRl +zQ|aPJ#G$0;>_k;gTslBhhS{T +zW>s=9oB{&QIrBCHN3|T(^`89(;>iv`%pP?>YXU +zkg}e(XPYW`4K)nb`tUo~3pcqA+a|e!FijZwUl}G+KS- +z)w^%$7lq9E<(2wH{-YK5-7I63dFF4KAMqF`Z@qc`Oj}-|5Ijsup%L>HqUHWE&7C=v +z3!DosVW6dxGzbxw6{O9nLT0=>6gHl5^R#uwCLPW(>+J9F^bH85R1{g>I*cwUD6A%H +zJrSIW<~C%8Y?R!5K7^?DO1H)oRKnx^=Zk)86}#<-dvGXS2v4cnQSizyguR$sK~8H8 +z%uhp>eV8N1So6~-l#YS~gxpLQamtu^>3$9<**fe3nKVciTqfrh9}X)$oZi2RBC`(L +zfg%srrd(ajnX-;}y%+_Jj+uyAGPRy9sE4=7d4%RYo{Dg?PPr5yWTA?zCKI7Jkx^al +z4ets>jfkfevC1dnl?+oJt;HN7mYuVN0+F@n&;MK^&X$ku$X-^Mw+L7IMJ7EwT+k46 +z$Df_wty589$~`uCH(|W6kC*e>G)vVV){_~ovm?oR_sBf5<1#_EXYcZqani +z-QT7NI<{)*b$vNbemR8j9g7oHmhZkQPgST?jK==Kd6lhf`eIP +zrcGF7Y^d+FXiIU(Xm?d|chc5|Sc6zNd72zw!!p*cd|@p-G8Iw1DCqi;_Vx+z$)R}F +zq529Lz|y6O&c4+Z!eWg8hR`NUs({$GTKQiMyzcug6Kt0l?y)jkUKD;$w&KjN9T9+(z; +zyH{K0meuB)?M$l +z0yc%idb>>e2A%u1=m+5Idu_s=CBE+qr7az7jmlqmZKCP)7aK|Z&gA=19zQ?u{`q0~ +z{{1HX$9ML7pY7ij)qk|K-#z_o(1-qW9sRA?fg{TXLX-yn7(o6ElhgmhsM7zlft(7v +z;k!qNtHOrG*N1EChwr}}e(-BpqCe7TH`07`V+_ww1% +zm;GTc2d}?;QU7w}<;zk1UoWNlqp$5o-yR)(A2vF1eRQgRbmryg$6upe|Ed4{?@f%% +z92I;|vUHX+ulOH&O_elR@cet-(KuUFKLn4XgEjR=u#I}uBWlgy%**#03~hk{6?5}c +z*^=+`?2@Ar_*TqwJPxPov_{Vs8wp1%Lfgu@n}Va!8WTGXSjEf&1#0Eg9*G_r8#{x$ +zLQrLH>2%l*Rt)h4$-}o`?Mtd=9E>72*AJ>**f#?cZAY8pf$@`-8p9Xrrtvho%@?Bs +zDrO-t)kxj#*q%EGne+Vw1>fd9!F%2h`aD9qf9;{cjWD{GnvGJiDETi(^!CG+`${=5 +z&p7Mp)=iEZ+N~||H#tx2ZmZLD>_z3xO4F&JOC7F`6`d6(v)TLbmwX8cFgfpyEqaEg +z@n1VeBd0g*`~s6st2%1MGyDA=lRqFso_*QuVJud6(R_PkFQLRaHoK~S<$l>^cdLul +zR|%kZ&NrK&(5U8P)XjGwz}eAU>5vC)3HdC1N@B- +zT}%ceVOYt7pSMfF+Yt62eh;WROW(*V;e6jXb%QKteTj-A99uO*)TmqV#VaCvv_q18 +z!d*i(R%<`JrwTul7QABQ3Jor)Zp@5dhfpQvI`g|@Eek|Zt_;v}*&ueE{>f%d49x*a +z)cis05ZKDqT0CE(=g^W>0Q^zei4)ni#if +zH64M>eH_z#LU--Fq24g-J`6#JUCVZrzfcbQ`Qg*?T^0oN5!Ke{*=~hXw#Ic%Qn^*F +zcal`WE^2mWZXuww7-^y2VMwcMxVukA@i=(~ysY5Fo^YyRSndv|n_3lkQ2mRYdzl$}ItW +zp5}h%{653gfacV1?~eU^E+i8Vk-vPcljTWJLV|=gc3c_6?1&L8+Mls7)y6e^CXt_k +z8BO#<&5cwAZrR4toM(6PksIN8Ed(wOrmGSmU%wUAM +zL7LH+gl7J}yEF6CUGOx)K@|^*@%a;o!a;SE +zvq_Zz^4Wrx!{B`7^aS6^wAS0N^V?E!0w1Jr&N-)~mc&xT_OFPMw5X)Ftz~bPf-?u9 +z@-sN*repmpCV?X~{PdvJCW%6t*hwjJmPME4EHieyZZHugN7rrMhzP_$iY}~9zOcUm +ze9d2S@4hE!=%Brd6kFz2(hW099fMmF{gtC|dnPkagOSCo6-C`q7l>XfjVI(+5>7n- +z@*kd5{vRD`{hR*tA0$RV;OVWA?n0~Uu8Kc_Cx94vCRF?-Mhj280b*3@01e^Nm2Cu& +z4UgMt8~#ixU&(A~f4`*;IMfO{D^Y#sF>c+Z-ARn9%${!d+dRI|b^c?epi6PWL1nR< +zCI2-?<>k23IH@PDtTJ!8e!$kfFu!xnyi;}h*pC+=KcaIPd*{q*!Pr1+$Tw|8km5<3;eIcWbA?bT2y6Re +z$jevWydZw&(DC6RFLC2W@vmv1^EE6rkgJKXL-;<^c_~i#UKN|IG$|vjXCt%`P$Jnx_eMQ339MC(9t{R9*ZsewB>iZ2Y4MrNYE7z-!$g9Au +zy8-^Bsqu0Gt5ws>+e%M$412ehz3^=3u`gQaUsDF-HC@%6-{Q9EPptwunVbYGxC28w +zF#kJyfd%Ktb0_3C64;itSlWYgQjOn;`%?Js`9_7`8o&z9D=lDF$>tbn4e0)u>9&QX +zg=Z}B{(Ef|h{e3)=Twz2>F!IYWkJ8chJ`v{3F{WSfqmuz7dt)gSBp>JjmYv>KQ&yB +zQwDUw;Eb-YH}*7I*WBp0edynBzMwg$^=^D_qt$=by}=HZo`9{8I>1k&LDlAe=#p%e +z#_#!|Z4WIJ +zDPS|;HVooSpnP;8dkZ4^;E+hq2su~s8ZHgOg-3YN*g~+58Au;P9CSf#CV<%6HB5ZL +zcECu4RB2I4d_PXV!KQ)5wEN?98e0zON?kS)ff;0wuyEyh8VEyk^JI?uk#=F}eOhu> +zM7lFtZkHKVV=%&+h;l;%OE!#2A^qrt%70}`=9qtDO9GM`@O7R;LAFBn089W)w=_dQ +z1c>02KU=eq{n>@S4Z)=IX|e(ao5;{GkB#NipxR`0VD>}S +zZLxYEfd3fk$OIMdL_2_YEYMOgw4dWt&``qm1$J3$Vgj3iJw!w!(wbV>`$!SA0=gbQ +zZO#;4h@pLONz_`1Yw?Hmdqb`Mbi_4hqnhAx6-3l`V)CQ$G6MM8GH)|(A +zyP2ZF)E3KF3WsIDSFG{phFOs>W0R-hkW(2fTOxz5P4)*M6Cnn$jYbf>FD}Vn{gYFo +z)2`Gce;!C<&9ji06l?#agKZ?I1%C1%`BWWywr}uMQ9_LLfmz|LcZ1@|MPeP!7Sd=jF(QzL2*Fu4$+hf +zGN)U3tOD?ffOO46>FV=0#=ObOt3Zn6a3T#J&y5F`fW))N*OQ#lw%9hjpua8a30p9R +z$W}+((3((rR7f=oxUOLxwro|bjx-jGAyrv{K7}T(dqL(5T|Znm0OE +ziKYP#T{N>_X2rMTLke5hHqJ<;mP?sSH;;*3AP>P +zTzJ9ZPKBG{5CC`5qTSw#xTS;Vyg>5ftrJ)Q*ORR?CV|3vCgn7lpB2cuUCRWcii-ST +z2BhSMMYcLn`T=RITBW$lv*06KDKC@KAdUVGSBe(W(@1ooB)8ErC)A497RnHjb1RBC +z(m<9$U_o_lc+Fvf-C&vfgi0Yz;lQH$qB(*nE%NohUN&@H6G_z$WW8Ax`!1%SyokOA +zEwm<*9L-B3FnQ+=7djWv+sHtC3#m!FxwR)7N`toqa(7I!&IYnppqRe}`JcV6-_*G| +zG?eLVonfw<5F!iYsr_Ih>ms62%-GuSdsaLPE;2m$1_7LT1P6J%>dx{*oQFG#uE)e# +z*w72iV|hi?MOvu}Ug)_h_Mz5IcroL-4rP3maN~5|yxCpsEE2#@FW;%3A404UFuZ*7 +zW(ozZI`>lNZhdaazxp(F7p0<+SDxgWEjWD7z_nDTKqUkC%M|Wf`H0^wB7xbfoS`My +zyBAwACN3`LG2LQFa>E?^b2+IPgffMm6HBhwiSEVMmYb&aYTqp4)e^j`8wbQ^$PD6P +zPTL>g6lnf07b*J(d)v9}Yqe#!imNF<%KEhK{GQ`Y7jhzX>%Ll-A4A<)R>v%sQOItd +z6s6tVF7R$yb*G@M9^F-7U`|!wO?vLrb +zGsVS|JJQG9=;?gcY9!+PAIOwhd^c%cJUviK^yZ~xJRB_&np)jjORCyh)Uf4ggSUH5 +z(;>-|P)UqVWDAmswkcGysCtb=G)|X0mPgsq(!c5|3JzCB?GS-EVz~k_vQ=CK{HSf1 +z$xj*yQ8nR1aV?20ZM(k7M`*W->P)SRwS;b`Kt2{nTA0bT+C1E^jquKH=>8GjRLkDn +zeiz&-?q9fDW7_L09qmH_6Ui+$Y^61s +zfh-y>F7GX-@~Pzmt5b%2;%bhF>ny1CiVrf6JZPYj*IH*kqLNuZ>sy4d9bAPQv&fAC +zx#5AZUF(D8{E%7hQDy?vE_evm6uJ%v-sTdb;Q~|g>(m3>B*C?rpi7RSAzs4EDof85 +zm)6${FJgmte=ovxFqU%iSxPL*8^=o +zJeLnv{?UI?wk&vG&za*@(4gP|^1*;~`2o7gK(h0|weti1A+V(w_xZD-{R6GD1K}ov +zjGcqbvx6L^K}GCf?9sD9z)_hbAgAige|%E;{{>>S^}kPyX7yivwtMyU=&SExuja46 +zTC9Kd^X04Gzg~e1q>!yr*fHsUBBk#C9g&jI$GE*I!8W3HyQ4g=B-dGG8E2o_;Ws6M +zR=5>xz56;ne6RxoZmY9Q?)}v-EFIftq88*$jbKL<`q>Fb^c+$4L{sjgR3xun4aB-( +z)`W5-Olgxy8Roi)>#!!KHBb}zhNDi?x8kZpbT91xOjA%I;@~ju)#){A+#@Gl?4CFr +zP_dDkopeECW|BZ~xu0Am*femLCLVpS5RI6&P=)2bTeaQZDvl~}HrniDM2CL=3Z}!G +zj0__mXi%o@RWUSm(H;e66Gzi_Klm~7`Bu*;#7bhfq*=3i<~%fO=F_9cKpHaIAjDV= +zhT-|&T%~pe=3esDX*?E?b#V4ycELp(oph~?HIqZroLFD3?zFO?xYMfL +z5C^gGFew<5udceHOxHqsm)m$O7t!OT4Jw^?ln?NO@4KgN`UINoQ2%HT#suNQ+~2{* +zXz(G&QHhE#Y}U^GThN#@@|}7DOfB_J*IwO>Q_E%jO+-nc87;ZZV!P*(g+%ACVv7e-85FN_xC}>nm`c2Soj-U +zt{Y;EvwY$rOVh^%94R%^PCgCYY)1GX3-&Iz8WG6jXARx_u`p#lE@uscfZFBhuri?; +zWhyq*sDE@vmh?2na8L7=An~IZl0Xg93c@%VC15pZ)HBZptPRkSh9#=Tn-GyEQcTOm +z6AY#E;uWql!O@of<#8*yG#xXnYa-4F_S4&C`IqM)xUoZg{M%M}9EfGQ>+&@fzMR>H +zwj|Ol0nQ$IJzF4>-+Q?kg+zl9Px0F*kOcK3v^%@g1!_^Q@UVG&9Y2#n7P8 +z?b0{Kx}GqX=G^NVRv`IwoUFm^vpZ}*8nV^Y0b=3`Qmbbgsr7@`2MvN012Gd$&EA|V +z>APVr>Iab}pFsOKM&JargQfARBu!t5{IYxc^;r36he2PY+QfE5q==;=C0sR*Z-o?z +zn5hoC($TKk#^E5=$|E0A +zL34)7@)SAV!MeGi)3IIDXspjlw7vEPPo!nGugU=_T_1u@kfW+r7`D-2{S6!9Ans?f +zMDQbpo(!xE*MI^KO7`t>SF`hMP@oO>>!P00VNOPfssqh-gXKP>nNGSF-a5Kibt||$ +z!@@&6o$$&8$aqyVEF>6q{$g&FBePK-{B`X=QNai=5Vn3Q{tos3zS8yoDN?F#f47$M +zx6%bHIxiumZ~vMX@&wDYgA+DpYyZ6{BX(A1&$W+l-6%znlMq-My~V +z;cvBiv4?r#YM!A|fZN8!uGC$3Z!5)5dMJ?lvcG>7uH4De@1`rx2<#8Ia>2`+1t|Rj +z(Atlpn8U-1e`@a`9RN~V9H`$4)d|`;b5i$5e-{W?bUsf5h|ys8mhYO*4Ksl<9ZXN> +z_f=&e#dDOpAwqY^-Akp@7dQG0zBaJbX%D*q#jSYR@&JM!?3l3!K?i0{wp35n&uoly +z82NW}=_TvGrAq=@j2BgHgEtIiA_0)n&o|hA@Th$IOR-r$-Qu_Ugd-10^WDM5q^ZRo;9IUWx`9f#tZw;XYg23%2 +zH#0!s-_rZzUR}iQ-ML>ymiN^mx#a#k20FoTu0Q60+Tkaz4xB6iTMF!dxtD%k-$QN_PA4!S7y7q8(4Byh$B>`8|cLZ`hLRYF=Mje$n6TS8Hq-ZX+gyQP|SqLrN!in@$e#N*{?^Cah4 +zrp%OOjfp%c6LYjDST*DH88-8{Yjn|Hl0*o107w#uu@OV@@Q;QHI5M_eycxX=&U&U5 +zoj$>y8(^i+hsT;lr1LptW>ldI>%cr&jt>Vu_VUI +z2E$v0Nln%4V`j0#LX}{U(lI=T<(>RB6g7f~XptdEOJu402ypJTc^0+-pZHHff_ii^ +zj~DTvAYsm%1~l24C+Lu1n*I=3fx{T}j~+urzw>5|&$EwV5KM1c);R0aMB*1eSf@8s +z6|DiRNoz`@Zo5zqv@zPMIUQcS!huwoFmcKZzBeN!VM0Yw%-L7K6N2Jow^J=7`ll6XGvhQLh!GwGAW;!(*phnR_RB31)EL30meM +zmP-u>;hFewg082X=FZ`_i8;5CiJ=P-ssWT0!CcTAAaxcsf#WsQWRkoyR%yrXb&EZK +z$TILwWw<4!1tcAA%3@%SjQDZ82WR>`fUopF(ZX;|seMO_d2%Fr2KZ*5qNoFlXOrl|aFC05!Jdx^h8; +zRt!Vu1qmRg|CJ>5^HWCyqD^bV8WAb!-Z}WTo9V86R}!gflJjAbR+N$V+)A(uo&RYd +zFTX9HQ!RMrmdwWgX~C;Pi?L9?sjwKC<6Q(!B3=JAA2E*PGNaReBDvABg&WE9d~yJc +zj$=<*<((nZ`wp?Jizr*H^B?;OmoO6cHg8<$AIM}BCH(Z}9Nv+IpfL_)X2oI@<7*P> +zLq*Ot`BB=pbcuz9Luo=(-u)M(kQgC$)y;ffVFN1t9zH$oP&yAOWcwBULKfIe30Q$S +zs^pYC{Ipr(?Po5yi?eJYr1& +z$ox*DX%1*1Z_X>bY*i`5H`k;n>t0cj#^JIJI+1!)xrFlU@a}93;!X)hFbmJviO(n} +z(cKQGY%O48hVKOEq}MK%%#+zpsEB7zV)|>TB#_9YkU1;!%PW5pK@>%Qom9_75+U51 +zi80X|t&e)FsVfwssPfscnB!I +zmmP8~d#06Kg2VyT@ +zKeW<%t30(pk#J6QaEgBET(LX9$Y|l_OGHigq&Tkd=9P0X%iY7)#fAl%%LQdb0cH{~ +z7rnLU-mHtLxUEz(voZlw*=SiwcFUpHl*W*6&5K0|oT{gx1tu8JvPrS?;`P1A`z8LBQMILVsE8*zk@C^p +ze@RHve3OOhRW^CfxO)S!dA|zlMFZ92)3w7|_Z$vqwLHCJ5>O}01WuLfPRC@NoqG6F +znEsH9SiVCea1a`ZFrXpFX*RSu}GG@<~ie +zu21edJ+_^{$bB^(xPi@Xy-(&ex|(=`*W^}pB;l_v`0bef(H +zJSg#Q;#W7$Wi{OwH!ZwqiX}BZuD)dkQn2*}`{OmwP4`p0%Zwx;70905$ +zEUW?n1w4;OFD8n|Vjd$WS^{L#S$u_vJp#$hp~kT%ErTvCUEIiv9vWeQdo+9hYJ2W4 +z(^f72vSMV}Q_Gs7+B!A2LZU~j)c;AfdmB_-w|%(v<UlfulxzA +zf?Q}Y(o!?<@mU**d`JjPaM1te7XL#Z{CrCNj@J7fkN2Z3>rdFy@6`c18`kfC)bE_W +z-}$3{XMXix+Ua+tzW;cKe1245QPkP+UjzSZNk}^X9i-%-_MZbp|Jw}^bsp-1yq2OR +z@IWee*A4$YA_AV-e1fgxBUNuS-~~(M)j3g{m2ITbV2-{mY!4Juf8=!()bl47Vtmbi +zubfiGS=ku}`7*Q5bbYI5p4-(;T?LLSw~t64S+6YV1HsVPF}nGI)97>iHCj{bVT|_@ +zs7iv<<;IoP{Li!^qooR1i^Iu}HQY_Uj)+{_x@Qnw-;=A=ZGAnPRFUGIQl+)n9?8SU +zCaWQ}u&h|7LJ(){qo;`^woS)_U>#_T45m#%*WNGh6t^fJnQf6|oF( +zcl51;wJ?pw7?3@5C0Bvof*n7CgQ?;6>~6C?u|YxiL8e*|3P4va<*poBWv6Gvh(yiD +z0`zrP$k*2_gd0WF*|CSM9|Dy#R_lCjJ=qY|zB~3qRj1MjVOO31n@uo`3Ssv9sHde& +z+PDS$EWQu*RzsxxiW8v-fF3G#e&^@?w-djS$n=R)7l#|Z>NQYLpHj#E7 +zH0H*S=C0Tp2lw<9HB4>_Q*AR*x|=g^t+I)Z{IIdXvk5Ag?zDulM7i+U4QGPg!Ku0r +ztJ`@64OEXWuA_wEUdLaxx>i4__3j$Du_%P=RoQnv_gUZ0`E`#jMQTkJuM2^CO8b<4 +zu3g{jWLQ#p{26wZVP_T@v8Ly-g#p(F?5u=5CWvfAi<2*Nn>3WJ3OchxkgGj|SH3|< +znkWk!lpF{eR+tv}fkvi#doDAq*a@9|&mr}0uGTKbiOwCq3@j}#$_y#lBAZXL*H;VD +z-xdrrq!HD;+HWG(ZF=3gw>$RT`f_8Cgs}3a>GrFJV5FL8RMX9Y4zxI8kJ|F!S3Tv_ +z8<`rKQP9Du@QPYc)xRo5tk9w^n?Xpz$XIVS1G`h}mrp>|8chx$~K;HBkg}?=26w +z;2Wu1NlGFk8kGD!@(}&y%vE|Z*;?g}ko`5TgD;JEvv*r{3P3W+ff#{uSTIcabAYNI +z{xx`11FLqnO8J!jQ-y88G&SU5)lgp$`FqOHW7}?;rN-<3!rpsDHNA&hyCDfAgx;%4 +zl~7c~LJ>6}9Yqk8t_Vt3QGtXWLLi}c2%-1Tdj}0j2^|X^6>L-m3mwe&XRWpOUi*x5 +zbFR)Fdwkq8xM4tk&-=_dnKs=)Fm4p1;$SCxVcwz^J2t4#g596CJ0WTSVh~Wz1{n%2 +zak+3pk5Y2jECe-Gx{7Xu0m750-uTQ}OK@~JmF4`o5Oyhdu$+nn{1);iNWwUp(XE0a +zxPt<>#a_j!KFm4$$q5l)8w|d>0lh}jmcUc-=aFD0p=<_b>T(?DZ6~8pq6Oq@Ro#6J +zUW7!u)+s6{-pZ{5K@ihYp3KpF9<3#Eu8E+9LVvn0=GKcx5lO`V~HuyXgB_d=_Rq>WjV4DnLcBOVP +zy*9DWOevum7yvty +z^1=-;kW}e|F41SmAFeJa4eShCR@7$BM>?vmH4ocbOs&6vS@_8wn0dp*VMNyx3(P7uYYzQk*%e?y +ztPU`c&QpO6`ACaB@;HiCUu6cUMEkoFX0|<5DdnzbMQ0`o0+?z;4wfCS`0~CP-l#}5 +zadC~=MRW|GuK==ek9@7(_VaI*kIb!_e_#;cTSWb}8^8L`Zu~mzGJK8)3kXI`M(eoi +znoOrq9Z>$^_0S9Z^Q^iZXNihoIII1SOfsENUiKf+S44r1BaQCUMo)3qad2n)TvM;v +zbNAR6C`Kiucc1)ocv7w37ogdQ`oQSv;fB02M@y&d@U!?V^+jcLGppSlkBIAq*JQR|yI%5)S@2un|2G(-?j45ORu4mMe}kdPgYTFZzT8IhG0*o7*03CH +z9}a7!v0`h)>un$u6=%9`vmPtZ_Nm$((`^;-11CuRBssG4{K9j_6b}ERXRCIGpZ#Sk +zEuQ)S?oE6C3ooGc#ViZWsnxMj8PoR30=3r_Ol2mI94sY_`gXs}`P~UqUk=V;89;Z4 +zJfY+HM~{MXCH@Yzf$sRV%7_c~5&Y<@tug2s5r&=lt={jB9}YgiE+6#ET-x~&;I}P* +zm%W2Te^-R{-7EcY3{DN`2FT55;AW%mt|Qur6TX-d0CYw(JE1og0y72RMx*}rjfcL` +zz=MC0p;7&&Bv7D*yOK_jLoxl|w)z?1@EuB^sfVjb}Cw +zHid`mZuy%!1&JBsKKEjn;DLSwMkgd(cQG)F5PFpo>PlhYLV_M_42PNY6{7}*!hQds@V{VSoyoAOh0t9A)~BF=NF?ExOh}|TCc4R|p^?CjL6~qx +zl*+(aRYJ0hg7Yyx+k}Xe#;_L@Ul1~~45*|2SE3E87#L+18IML}lL%O+sQ8J9qDj0d +zk;t_et`3h#Rl%Cf;q+*}kGBKVk%7!j!MMd>z*>#OL@t>7$t-dl`bLTu048?m8yTY^ +z=24MO5i}?7)ZVBdG-90+n6DgCED#hy3;rVpcOo(_BZ)fH=v^2gQZTN3F}B~4pg@e( +zBSn1{zz6&lYP-wGdl+g%;TGw75%Y}!3UI&UJ7~^SB2OQ2n-IE53)aDcxQL)5-dO#n +z*lM +z6hkH#lZeXUZB6(&8u3K+ +z#?b$*8IAhF=c0{vGCmijVpEC1-q?`h6z`Pi6pOT9jo1=m`qQ1PUz6!?k?G9~7?X9v +zcW>-_5`oC5nK)|#%r)NDMsZi1XN6A|0*Km%;OZ`$uLqP(giKNOg3H5 +z_mautg|h`&#G7KW2+rBAlSz*;={O?Ubt*Ad`M=1}m6C`Q{laeL6hHHn;iZB?Wp7|( +z>s?>4&a{7sV6>PV39}p~MJ62YC+-Vn+0k7BN2$3&a%maF!b3R3nV-{$Ep*QNu}D^Q +zNel7uc_kD+Gnv9g%!kp)yD1qsOr*9`VLdzxyPKj;Du5u1n+b(3mN^10-eTFgX{ym+ +zm#k376s6t#-F4q6%ZR7bz8=e1FSWq3^V7N-+IwILWv)xU2cOl4o`sARhVo*u&OUt5eM9IS?jxz=&DlG|9a)}j7@wD%rKDmhpV_I(ZxZa=kKbCxYLv!Qc!yRqm$Jj- +ze>oPcEZ6bK2XGk%xRV1U=SZ>5Jm+?xi?LxgtqD-yShT!%mR^CFaY-blyxXivcDw%d +zbc3p4!2Q+)zP%`jVb&Fq!rx?HJ6Xp2La`8wSnkDQ6T^V{;$~*K43>Vvkb&=rOA~ZD +z;0+=pKrgR4EnpPXTvx;)9NQ3TRYxYZvTujo>yNr1oN$JjcN7@~uBv7?Cvj1-WTq3^ +zTN7$qed^P2S#(5Z*j{Yjbk@v;^migYAAEyfg||MmDsR`V>6?h5oAV}bLnC5Y(H7B- +zX$cQ^;v(f=z(~(iblc})Z1V#xpGy67Mk^1ztLK5t=1P0XKvTIqxiXD#RV2V(KS%3F +z$VCvFh11LywOq=FXHXx1`-kM)qdp#*hSX1y^@?c-Yreq7~lYcptkztDOXKcH18o^ +ztjhMoEc;6Zy6#xjesAp$Z0dD|4TX~*WPu)AB+L4X?+h8V_y*M$w!@P=k}rxKs4;u{%0r +z4i00cQDgV{$1I~*t>2BYu8)}=8Mo6J_jVY+2SnJeIo%r?zkT57#_61J+xdpq#G!lC +zKH>Ln;(r`2;`}!*Vw3v6#YX?ToT5}t#^aHa*VR_5J{dvhW3y$h3R(%bH8#;Cf;V42M3_u-db?KbZLLDjMDty;k+4v+@x!_a%2BuWcEd@_v=R_#G6u +z62_Tg+$p-!wRe(x#X$iqC5mR%K(sIBtQkL)iUps4S;exZCatCZq?^sU?Md(13AblM +zocEt~!X#PhdzfR(-kj`NdGGf7l^%NVyR_GD(L{-x`5=wgH_iNO;Vagi_&2CSwqj~^ +z!fOnOmIVo0)3u0_Y50qqGrB2M4fc)vcRa}G*}3zxesAd3IHcyNt@3rQSoTLmol?feljJp0NbyE2pm4??V;Pt9h6b&Fg+550U=1ODq=Tz06)E5pa< +zxKCm`;G5@8i)(DXH;uvz)FU-7UGtK{2JGrZ*s}4twp}(AkwGb*IoJs9V*Z$YIkhWC +zU>pSoB{Wn(>vFKCRiS*;2I_7Y3hbrG3$@|K>eZFwbnR0_NVYh(7Cg6Ylg4)xXRV|Wlx$l1QleO+C>v)LRof_J6#?O@DRrSWX; +z4A0N&bIV)X3HkvGFN~H>40qql?G$vZ$++75DtxsNl-EnQZ +z2KW7GurRygU<`0SyV8}zzOdQk;05KaZPGYvfy8kGqY?Ub$8{w*@i6u3_@m~Ue3Rlv +z(QY8fc@>!>7w52I#JmiVJ`hv3BAsB6Jj8oz`^cr!o$RdGiuhQh27^mgTaaCTWK(H& +z&*ZtYa152HHXDECv_#Aa1@R<1Q=zuc-8ISW3J#o9L8-F&5MDPs)+BeW=jUDGAfHh@ +ziDg3Cx(27hwB3%Fx)!tSx(CB1v*E^yA&}&3W_GoXuFE$2~EKLnf=JGF&D)7mI{@x)8Mdxqd=kA +zY-UwwO8LZt7`d}=9;vzoW>#Skp>T#-Vmr8^^`IOL)G1L)2m3?f6z4gE&Caziq$VlX{rNuM!ynA2EU#x1!8Is4^bueFexmZ31j#=qu9n9?U}wasjW($9Im?#zy9gAvzCssBmQ7BN17xUzaK!cO +ze#>~qpR?8X{F!cuicIC1Z&n1oT5@#VMNnhU*9I9sbp7DNmp|H2I%2zeYxcj$5buQS +zS1}Z;-w)RRAVZr#vC*CtQv#*~dI*U`_U9ONJD@}{NFV@-5F{wq@oaazmFPSY)xjbR +zKq6ghMrX=)x9+9&$Wxk(qV%n%mohn6ZDRUoNq{K*F8@h@SkEaEnmdK={300o37;C- +z={WNT%$VP{u$Kv?1)uZ?2FjC=x;`%+h(S`64?Zf%fCp}Yh(ho==DCRv2}LA2;JNWWYcJ^@3)4xp06D6LO7xDWuxtuU=Yh6YU>AqwJpT@fp&^Lx@EK8!|Jfke1dopRDLy_84koBB2jz=u;;TH +ziXrnnm)9`5So&sTOvqH=i?wLSj&0A8yJlS4vps+g@j0%Ln1A=nlUrGggKnR4R@TW-_CS+hoHvXYX +zF#9C9Pmy519cbbny0L&kQ8*+Pp>F25J}myH4B_G=etW|I8BnW4M}^{*AwU_5%$(&v +zcXana08>eC2glO9af(D~C*sPM_cNfkdL8e#7+@+G9*@Sz5kq5O5%G<_CdiONl>o|Q +zaIbQ}t|G8JhCl%o(%6uH3XiCac7nK1jf7^TPnS~!UK#%e>C;N_Z#T!4V6g8}q9OzW +z#+@)kndqoq+{wuRX~DpsDbPM_^dj01s}hh&4Aq$n`~-so3)mumZ_xw&GN=#0aW+MGkXpAj4$r+63-r3XC`z?Kv5~uodp37ylt8;ENu5 +zhj7?@oeV!DL>Y?U81on^9gRVz#>Y0suBu>1=b~2}gPpb`Brq}l7{snhKprg)x*fU# +zi#OUqKUIv6Lc+HRvGS7%D+C`5urkfba9Dt?7#jnRlvPQZTZmiP!nvekG*tt%rU)v4 +z6t)zOXb!+_Cl{^9Ru*w^O%cMk1NnEtK5hlV6(jWflBbn%7-cYE5WSk<`8gU{Toqp> +zVe}oD3GO3SVnV0G8*Nrby-}vW^qfdWyuVN(k8T30evdn!{(%QUt?NpG_n( +zPo=6BCrJV<&G;HH$956Rn7SS)k{TOX?0-X*AZ(kqf<~Jar#+F0NJ~xmv_-xsm@=)L +z@;!xBhdxC@Tf`d_$Aw!2j?SeaN#qm)Y;+W|1p=1q7;YnRZ9+ggF|AM_o?ABKlNo6n +zlORDN`FZ=gZ>N4ECOq{fTvQ2?NzGVZNbu{+N=HZg{v{Ok145B8Ll!nGpBT#D@150{ +z8r&QJG%f46Co_^WD<;#aDOf-K;GeJ%j_@QOrvNMGjKr-3|HX_-LWb|Zuo0jGRWRpQ +znCB+IGNn~BRV^}QEuwy=WJ%L9sS8>Dg5ai}60{bU-7dcx2I +zf>RfgN0+cS$?={-Im}aJRSWM?P#U{R+O6rL3lwt60^yPz$l4&69Ro~?vEC|9PcSHc +zir~F~Wf7OdpG9Qks%A8K1K$mExo^PsBrIH(zi~>Y~v;`%ks6=+B +zd|J8GzbHpkF5%)FY;iQpFO|YqTv{D+w1OaAM +z%NLIbRU-TT4i7!AmdGvdBPT-q(~s^Dg3N#BxzC(?{sIA7>K|xHHV`Uh8YtIa%zm#| +z$2CJ#kH9_Nt*y7rbBn;4asoNd>dFPEbSoxSHdW(v!gVaWzC2oHIl@OCZ|{n?6eiA- +z<3WZP@Ic_bl_EcjhW?$V`y$OAuwrTRv^kafNJrd5?|N-Du&r#e#^0P6g!YsvFGwZq +zw3P9x5k`R;Y%6loh5U%UlCGuzh0^ANzGjccRvmatV`|1!iU0KhjP7pyX=dI*3S$ +z-A>Lg^@Y0AmAkaVVL&OKB`rkzwEsB}tE3{Bd8+NAVVjOq+cOxfh)h(G52PqZK2Gz! +zG*j>_EgVFjZV<4bRiYcD!+H`3ZQf?ROzuJMZoH1lkB7~Bg~$nx~d%x8UH4*V`oR^8`(K#jn5Qh +zJok@7v_Kwa=qDm*4?VysntOUxVCcGl=V04orFF27z|fUO_kr1=Vb-CKs~#d|!#5m< +zCew$RVVh((Igo{Y|>y)prPMmO=r8B)L +zHGX|2>b0oND}{_Vr^;U|eRKKSBU%}Mw8qb8IDQ}Sh@OTz;&dMP>Nq{(AG>jF?2f~j +zS=5*n)0pK&hrc1wZT@j9opBR~aZ3Oam5)1K8+YtsA{IJDCg1n+{~rvA*#8BIQ2*B> +z`rm|zJ}_`1#7VzZp^}ME17=AEUYA7y;&C6Mpk3K3%F$z8T+aCqZ@LuodD*(a7_#S5 +z--?S%X&Oo7s^MaaxAT{Y#f9g&aHUl^y7)@j%v+b0qj8^0Zyb{jt$oK&x(z%;gyhF+f^S286ZvkUK58&a4^|Ji +zuJ`Ocg8UwLecaCZxpeLMu0)U)M9&&r)~sF66~@Ae%3bedRbE|x$$1$7Ltq~3buPGW +z*^eI4D-z)IDT%H>jE`w`Uhh0oR8|9*&=dPf)9|nA?US1rJUt-(QsHLb*_qYat`jpk +z&j!!r?f-g(2weR+iu&Pp_VwjY`$})r`)1F)HEC2o2Z%$}Lmb!bZEE3qdSYiM)WaBh +z*w~0i&T&6d4Dy<;ulm+7a=-WDmXY0C`n%0{_7QVEQ@)?xHon7FvNq0!he9}&yJvq;zmQxNz|cqLRn6Bj0+0H96;%w_y;>anyrql$g1vR2X5_u% +zlql&@>s1SEPRn=Bn2N2n)3V&1>s=Sr)nG1S$4o)dlT}8)xUYyA{C@pHt9@hgLgmG+ +zca6UfHs;S<-Q4`r>JhNJ9P=T-F#IgTrG0(Ljz9bI?J+NUIY+hFw;Q<)V)NjOFF!1L +zh(W<6cqRS0EZ;+yWfcqQ(wB9TYbjw{OPis!rsqZ93$dZ)a1 +zo*a1w&lj80rfK?Qvln9Wj-25R7bZy@{V0cLIj-M#X+4jFvv<@-)gS8xQ>8Q)y +zD7&FFzytsdiF3J87FxLiZnhAC2V=~9b!#BFi+E`{6mB^GbzNHkWQ +zQm}C_Lj|`ai*XmMP>7&X)?qR^sF}-I0KXz+U>uyQmDLj#W{mxqzJS+&jten7O#GC( +z9$BQmpg-a>{VQ{Ou;|iOAAKZX_g5B(je>$1jN*iUXTz^iGzI!cL)Cuga7IzIWDH(~ +zTmH@!9HQtb_rHqv`3<~}i}iF3UdNMv=OeEb8<_RKPHy>Kpcqwr)zRP$dHQ#u%22Tp +zy8lh)?(ZTLTZu8j;BBt(21V~$$&Hl$w}omO#l}%3H;W9$N-Q@@%!f*DH};QJ_-vHg +zvXz?m8jRPFH_Du@m0C{pk2kbzl)FcjS}zz(v`pX9I%^9PTC9ScR|E-R*~)BThUWJb +z0S2O1W+yN(*`w4@6?xjx>r-2QOYiw?|sou&N8tQWBA^-^fGD*WRiBGUfOSFz3tQQ2x9jjHg-@nyOp +z1$ad5oybDKBf2CYILTJ|q!PrsMpkZd1mQ5Oj@Z8$gNC +z0fx-dnz{}4y}N`cY_aviA^t@Vb8z74bf7e~t(oyf1jz +z{*0`Nd!hglYRm33FZ^+6(#Rf5gPi4tpvBl5O%E<>GPky)Z& +z&Znt#pCZ<#+s}Ty`QK@W@YhZ_e-um_Dj^6a*5%7wTQayl56}*v`+8`}$aVLxRFvGc +zWrM?16t%9%?3=Uk^_;g=nI4MZ92cPb`i(oWzJf6pYW!h$q6m%`(`k1#t)))9XgEj7 +zL#^6f7lz^lQRNBk%cgbIse*B8UGcjmQzy_&{-chD=Jut-%4e~EXa`&kv|;`??NFSC +z7qjDes0XX|F=?~K?B`xcvwgb>X`2~8+LZrGj1UN!ok65{RC2uk_Dor17TAEVkf;d4vegC!GTGM4tV`tQ$E#EmYEOWdanB_ML!9#^S +zKVV?<51yS8TSpIGzXx^DRi9>GI8Ukc|CxBI{fmqMdXVjsTL{28KQuYZSHK0e?* +zepyvCclg`=#l0{0id5cIqg?zk5YFq1L5yCHcoFH@urk4pQB(i#M45}T1 +z@(w-&EAdLhCt-TJqF)WOc3wpscy?Xh=RG#jOVu#m?2*~yZNA;Q6m+mjl(+dp;`c0b +zoFg1f$AWc!Xt3prZC~fWe!bI+>$u0^Q{t +z7lI2Lg1WYzuweX{NnTuyUXG4fh=9K+1(&PrPGJhoIvzS%_}3cy-(+JHF1hYpM4$6O +zY=vmTy&oXG94YS3aNjdULAz)#V~RV7=ywiIJVGD{60mkf*gK2(uoU;Q}hhpl?yMX{{UiBF0Fs3FFI0-%O1)N(R7qBr6h+}leq(p?#2*Bj!d=UntPn!U;Y +zU86^e3UbP>#zT7Jrp&x0Rh_$lpUwPeWQ%aBFNQ%sL2oPM*f;{HbEdho +zBi#{-%#W00!)27C!W^G)nZ=4T6S65Wc8#$jLa~MLM+zV|N5>d00th3EHtu_zghUI^ +zJ-tJV_#0GNCs>S?g!e$6Zi +z48n(ayZ5=1=8{QaDe-ZO(J@m{2WO@TP+HviV6$El2~I9bNfLy4y`K!6AwKMKCg)+i +zo`wZxY=_=gPU9#_nLM6kx8?Lr#C>nq +z)kzXVU&b(hPS}>yx_8pXWJoV9Jj^j|I2pFw7ffDq+t!cTRdr`r=3r-zLzzSX%iOO8 +zW5-n8%lm?3oG?bkvFo&yV?N;*)cp0P6AgCVuP(b?m-FNy6CMHzi$T_r7H3IRG!T*s +zH)qHKJ!0$+4q5Ot;VimAmm!}d=RSqyL&Nhzl?$W#6Nj1bv1Ckqi>qc!W(O!MS(yA7 +zk>S5w1c1ESZ65P&=;C^MK{EMMO3<~*bT8+Ev|V>diz`9S^PW$XcYk1G|Kn37xsUaW +z%Y@3xkH;_VJl0VJ-*6}XW5JCwww^BtTi}2+r$@?p{<3g0axV$D2*wtN)=kIvPrJPA +zkLs2~x0blJ8$5)SP*(b#`StPaTlWC8d0UY8XAxtTZF;^#{!TJ<>=mCifL6!zCNs_uEKlnqG~_jfSpBe9;T|SF5mr6-wWgwvP})>g((59*9@~iKy*L4p +zTVeCZVBS-p_e))CcJiDJVz|k9ZfXtdvSIC{LbPDXx2g03ay`z9KrD&|0EbU6+RN~9 +zsUid`2J#yS`WF6ht-m<3lq^QAY2j~qyq%@d)bwP)Z>c1&VYwyW+sm58m9fQ%DfSsT +zf|pQu54;EbEeK&&X&JuV($bP!yPH}ck=@)M{6pBaW;!)=AZk#yWxcrVwQpJ4?Y0Pd +z593-xiDA$^;Z4@E(K^X0$5XfMZ5$k>L+|rsJFXhqyKX*TH@q(ga@-hj6c2r|BM;Hy +zynitBoQL{g$IuRtVt!dszqP^Lyq +zT^IadOtxLR`_@j`T^IIQjrO~kPzVk6t{z5cuxXn(GHf4+5ppG`ThV^bg)x>u*-U|$8V69KG@$jI5;~vygxW93e-vuy|o@1 +z_X7yf5U^i1J3I7#e`rp0_@nypXY1jwe#49D!^>^MtFy!3_lMU-M}DdUMMWc$_3*gV8I0^9@D_hz?Q!0bjovelvjJ +zbP%0cRIe(#BN)yjXHBchc}d`uG)UL0&h3s6JmWG;tIq3<5xWvDs$Y}emw+@avF@uW +z7)VjH?@!mSEgVWydA2;;S6eiag$hO-F{q=w%F|1bv+1uZep6(eXOLk~Uouu|UhDF% +zzrJ*$()MNe5yOVEsamHuB{l;MI_%Sw6E*YHv}C$ZC|RJ&ccw4#gORY9t47vWY_)$g-fJ9>O?rACu^MWJqz +zTmD4-6^pcQjm?h+9vZ*Lxt=Wl7`h;q^kG(^kE!R$9-N$ulAZMY$C_WsX7 +z<+c4^qq=vg(yF|bCLnQWP{-yhdhK9qj&SM{Q`qFagPp~q%0IiSjcb4Q)_YIU_kT}3 +zrXOrCRMP+KZ>`bkAl7UUOpgi{UdO_(W`hN2R3^3cAkN5aC}4^pEZ1>@gV{{Vv<`Nk +z^b(@6UJq3n%wb2ro>Ahy-#+ +z!V|Qf^DVEg{pXu|E*hg1dUT7{duf+Th*brA6sAwp6pm*Iyn0`P(VM1o^hR9l=R8Rn +z3!0u~d_0L&O5#K@?W#k3LhNrTX^km;6QlUVz_ +z{i}9&<`GVd1xhJ>gATw)kU?bO*#hqnle53blb^o`VNVY|Li|EdX@x2&-k~O#H)&?( +z-Di6GhVN$bWc1xHQppk;Lf_zl{}f#i!p72#eEAcEcG6{p_w|RKLF-cT6|V^Tnf%p$ +zD@FXfpTt}ffuX*Ho^LVXy&4QpFvkAHH{v~j_7M<5<&baG6(CUzTvrWD$+>6QHYF@$yiK +zepvXM3JE^O2;lhGt@c`-0DR*ChwmH(Pn!m6eNDU_RuEWSrHbPhVq +z1Jmt5Mx5CDG63JW%@0*j>6R59m>TwJs3k{%;R=S9uMu{jw1YC2TfX7^iJSFFNmpFV +z3}<@inVRh0*z!kK*;CWIo7FhM@<;kCV1?h~jdf!REcgr7{mT%j5xmPSZeUvK>{h+I +z9+PK+Ji*p_qDG9=>Cred8Ts)Zv}P1+3o(uGM)aH?$A4vVZvsoTuQEwz1Z;R^uellmS$bK7uaPiopr)Ng+rLK87n#K@?LZL +z*o)z+C*2S4+FRW|=3Rl~2(Jke6uV7>Z~lz9d~IPIr!uw5bfR`fbe&JPb)eMq6D?uxxw>Q*z`c6m4Pt9pt{(0=4pq_{sqK{55i5t;g$UUhfo59)oR +zqzefA@%=R+aBr>fPlxE&_D7DKPrX6X;ERf#^s$E={vYog8=oF| +zJe!ctI_GGyE!ESCk6PQ{k2Qui9 +zfQN?{zT68}(ug0b$HX!{;ZC;US+I-Og_gTRQS^B@Q*5Zg_`^+^VDsBg)@AIUO}d=W +zbGr--N%G@Xb)O9-hs`irt2u^WXRy(42e-X4wD&=BsYYyFu4oS$KWc9Jadjv1Ka4A1yM(ngUTD88R!wN>Dh_z +z*hntjGezD*3I(nsZI2Z@8+zMZOO1K4Z8OpxgI06|imPVurI6NC{eW3UZITy@V-3ISC5+-BL;VAe}6(0=DKn>?Xla +zz|Vl}&N92InkBwjxc7#1cZ}kUX13CvBnX9hi3qur(Y(@h-Ewv;H34-r#i=OOx>)vJ +z|6lq{^%f5oiv7AGG16Fdf=_; +zLhmLr9~0XEDQZ|CW8y)goC8#+DvMS~$?i)QY|4O7l)QZaedLo+6&^sJGWUG|QgtZN +zGco5YU_4`7e$L#Qpyt*r{->Zp1T1&u;0X*!DdV%6mPN^ +z-Z;f+%Ee-Ote}cd9%7YLtx$i1x%IZ^SQQ6LjrfIX@v&-zdo`zT)ltS9QUkY-x>h0S +z116GI)d<^~Q!KU4^)-BZ;P3)y0HZmMkFWNo;jK{yzEQ-8hw;#F)?O{+E` +z>w#8%2b47!TR(IbVKq=+ib9yNG_*<8-xs+`^Js8#1zEN>l+M*ZF1>0e-}qpq!M3#F +zscXIGjNuc*hC8LktY3^dZ}C0RVj45njl6FVETuG2Qi<{9+aMg>oYWDSL4oPe# +zm+Gcl>1VOD#SNEd|R8+T3cFM+h$sy@3m4z +z+Bz?^by>Cb__oo~+WK4D24~ua_u589fR(29w^r@rzU`A~?bEI8vor1Q_uA(iMV^1W +z@cgsY^RK?o7t@|Ew?1E;dH#Lx`MSu9pBG;IwtBJY`(iuo#cu11{h1ek_FjNQsbF;~ +z)SAlVM}?z#6>P9sGVBg6SQ?Z5^Vs9Y^;&wK)c=W}{QdTp +z0BJMSFi3*__4W>AzQPChVZdzMv|)MwV95vJksKipYM%_=;kWG +zZ_IPWB5zgP_kL11n9A>3(H6#&GaN4`vne_~iZ#h|%uKTGu3O?edy#Gbf{QI@ZNW7& +z8;0|VB|&FU75KsaM4@8J%@Nd*y_m8a>32rbyO%v~zAe7l91T38U8k!&>MO_>&U~M4 +zH~PSBbJZPZ--|rX8OL=a=)ph&@#@PRL8Wj3C8T3pq=mLH@0kl%?&NEFXun3>N1DGi +zJs0jQCzp_XdP)0RV2tGTaevicXWz;{4E(b|mRQvs5c@IQ_{i +z9=e8j#%Ytl#aCIvVw8OB)|!T`s;7&C&qk3s)}7-nHUYIUh5jI`U-mW&7F<5x;CMVHLFp{TM@OsU!4*-Q7%eyLyt!Du`y=|Y +z7~Y|QvxEZKm82C@B-B2XGlda*C2{O}g_%deBCFUKi7-k&x&%hJL_%aQ;UG*OmvYo} +zI%J~kK021vs;PCzH9(#lob(e$TE!AMZo9hHu?ZK;@ar^Ph|LoNDcwx?xUw1a{U;DeNO8=SXxN?xp2{urjeS4BdVY1Qq2uz=CJ6RGCAL&pSY6=VLQ|XC1-`s +zpeypF^Fg`_r4>FtEa#_5j_*GNj#l|MgkW23Haas}{mYk^rY7rW?z*e63{l|1DCj`oWoB8i +zCD=(o&@t%(b5Ny(wu?lgahEc3y!!IOue(1qo3u9fPhXPBpMUGtq}j>#G+4&UPDCUg +z<}uf=>2%dz>JxwDmFEkFC-Tf8eUrNMdTIiAagO`TXs7qkBP$!fkMLLac%;GU8&SOX +zS*p#o1s_!#VyTS!tY35aE1o15k@6vFx@smL*DfA2wAs&&yQ0+dBnjp*vk7;sqYL1!u2MF>m+WMuhXZB&Svfv}^Jb6`D$Qy9{9qeiud*i1ohqyF!FN}AfaxN4{K>1S +z>V34U$EUt|Bd^7s-CB}U@!?F>KWhPAo20y6`jLDFxSfqbw7fTrZZCUCT$g>Ia1eRA +zuuby)NtWi<4|EuWJwn;=*!Et`6vV&$^q%WQ=aUj +zzoGch#!m~$TdF;bwW}kVS(@K?xA_Vi`u2W{n5@!uA46=wxzt;X3b_u%!5i;*js$SR +z1$1o1o8_KFqdjx)Xt)^mkz>L|q+K}e3AggU_xqX)p18*0Iz*>aqe9lxKOMB`U+0aF +zi9CbqLa9V_xg}o@+9)$~dx)ID-sUNoh@5e$*PjUf{3Fvlz>F5Y^CE?c=oz+uEz5CGE88#SU#QGT?g$hz?R{PNVIyv&IrCq>AtNy5va^aENJdh2Ky9?8a +zWx1t~s#cHNrf!rq7vCBQy5?FQ?5@Gjw2>S6PM@N!biJD4$dgath_|$#O$ABsmQTc` +zf1(RL3!63X=?}dcYjTcObN;tWQc!aJPgW_eF}X5AAd8*<_cL^%$S>78KQPC>FsgQY +z^kgU(TYpiRX)@BStKC|kMcV6oQ8xK0C%jMh*nfcJ;hQ5YZx5_Fv4!$JuajT;*}l{O<*k21 +z8oXvY@~h1EmQ^{+!AqAZFeCp_pkOBb6Vo>g^_BlS+z7L6>&Nm}>V*yEwBW +zqjB~});W8XPKjN2p-2j)#W*O-T=&*F4oaV^n*G8W)I5kbdd9ast4c>UgP)%K9X?sO +zpd52;)b&kj9--(n&;j-)>$@l_`wA!kq(c)5jwnI>L2?hDE_uZ>aAXE-7-e!um-F|uO!CH_7qZt#!wh8Mc$@{9hpG-a`HN(ClBDV%U +zo%+C8I0xt`g0E2&P+U%(l=!LYJ{{uI_>95lGL?>~ReL4hoHD5`!Fm4`8#+(+Z00ti +z+JD9Wa>6uAmSoeZ0A0#@j7y@F7)X2v%KX;_LJAZGs$`h?_otx$c7eb<8O0pAaQmd? +z>1}bN-smUeWn5>T4y7a@`54WGP+XD*r|#Y8)OrRSLI3*&!lS6c!gtcx!bd9pP0>uQ +zPB~*?$=LC0n)ioKUl#~TZskh?&B57OeyJI>xfF@Z-Sm6*OAr1CCP%nPbd$ +zTQ@V1$AK~*%F3n@eR1XfrWipgnIpa#G9F+%aA9`hwRhjF_|m9I|6q-)>O +zawF83WIubI+0~nfx^lcY-(6i;RUqC_M^@SqsV1Aku6nijOY&1jP8^&3a0oQzesc3| +zP&C&0E7>iRP)L5*BkTC=aj5D-2FB~QW2WDxeu{+nw(MdyqU9b#4)ORh54Wgd&fx_D +zj~9v7aak&Wo%TT%rav`sF3NTwg`m883Y;kvO{C5V<@@|2Ma4aIxw(>ZyHGsTi&nA{ +zSm$sYC(`^~Zq=uK8nIGE^D>mZL1ND3TjQWVC-dc* +z*JIyw<_cG4V7{>f-=r5y0a39* +z0MSsSi4;Y_4$5B;5m6}#B#=fDdg!4^KqNFJ2tsHYP-!Yf5dxxug=#@X>2gP}Yv#;( +za6dh3&a8P0Sy?M9`}h6q{i`=N3G!`rOUNnVuPwukU;{}nwbY=)cOJ|e8SglG%zdK$ +z;r!|69goyyUUtYTou?;tXk@P)d91T`{Z)@~wn^s`lNb8egQ{OXRfSX?2>RY@alGab +z&~)!F@AFB97Z!YD*F&h!sXr&`UgYsn_XeYGiB3L&KdOG(Uc!8LL;65H9t!>XKv`aA +zF5arwJ$A3P^Hqq0&0|^5u0vvKIJ25>Lj=o>-7=U(JJgcZDCykN#O}?>aV{CVdzsG0 +zh7>tx2Gd&3DUz^YZAOgvvOL9MY+IQw$qrtbDdXN*nXPL0vNFf(+_w6uZa8@LGk@yN +zf4o3Ix3A4VQo6GCt;?YLKVKlWuP+RGU0MGr^siq3&liZ_OCPeY{Qm#d3q*~Jq~m#2 +zT}q;9{WmR#t~k+SguR)Lt-*CalHDhqgZX4la&X4`{ew7&l?^S$a#(01tYruPi^s*kByu)c +z%MizaRdtGcwtV`Yb*Ju@u|FJAz41xAiw#MtsiFPtMf$t#<( +zQJ7B&?ZJxw81uJVnd!TT^p%qmsWkj}uK5J{8y^TvJA)$1MYMvBWYUz5wCX^Vs|_$= +z3GKML+(*h9IHf5Av8W@(VioriA1jw6A}Kg(by?S^{aUliUr +zhd`QFc>HPDxs;Uu#)zrw=o|&=O(+~pf*ljlb_1kn(J2O$5cCYjV16pt5*@w7c93{B +zepTE6;ECp3sJZaxokLc!;(Vi*o7R46pVuJCe=s_Vw6lgYK9`>)2h#YaC6y@Tqy0Kh +zkS#W0)efJx_*<%ZW2SX04zJH11y>4d&zRo!@Tv3$M +z^Fi+0j)3Q6tbNRy`!lzlyZD4MujAD)_Bq&xm~aO3K}q}kp08J}O~jTh9%-dNGdlG) +z0r}Ko@qzdb5ayOZes0vPw)u4Rk+#**JIP)Tv5GvRgyaMz@m{0kJ|;C2aZc>jraJpD +z8}|7oBqhTZh7AHrWe3-=v#i&sj02IhVSy%zh9^n-6gw)J+la|B|Zq8dK8$| +z?ZMuiPmYY=d(xtVOtR0swKuR(b)wefSZ%@A{G*2+V&KOv*W^Fznbu1Gb-Oe<^hCp} +zcU8wu2;UWik*)*{g1j1ADVt!qH($-{$R4@hh7K(S9y;l*Yx#^Fe9tH6^U;55vpTuq +zRn?4xZv!^K&!U3EjvZgWY*%@rQ-@xY^*DE&qa%1Ny=jjRIO?-EBWfJ0VccMrc;>j* +z@d>MA)qLpuOlW8Q=oR9(Ge3bKgX-L|EB3f!N>V4ZBAVW2ZiwN|?@73NSSw09;5U!2 +zz3uCfezp6W_Z#>>h_i;%r#@JRZBMi|`H;1RKc4l4Uw`0^=gC5g4?n^x&VAlOvQKyg +zYojms?2=sY8&jX^zO%FMrt56Af!^d~_^j|=w5R`^5VoOdvB}%`N&ss8mGtnAXN5Py +zIfqYuzyeKC8aQ|U%Bb?VmGrxQW +zr9UDnq#?5Fz~!N-%eaV;l58p}A_@_3RWbnS9PXkz3SM`ZK(+sSCf~ZDAElb#@9DlCk +zOz%HuA1G6hUC0xt_$$maoiM*y2K@kx>cFHaqvP7<_fV$fMs`xs +zaQpx)oP#^MI8>OTL#mQjg1#Xj2_38+ +z_YcN2a%)_awF<))o~U#@o$gQbb&Qr*VeD9rEU{-en@5S(yFbKb-^{bpP)J=8)cNI^ +z`#8cOJkd5gA&{RShGQz2C!07@t+GhL64(8c1MKUBox>y@R7n|O2F=^{aqDw~_v70usL^Hv~=3w$e4X)j|7|mhi +zCmDM8`Nxz;#M*YUVuT^ZBiXk^7k0bGp|Cc%OMPO7DFsrV^MW0%>3uB^nuuA>91SSY +zn>k{@F2D)N8JqKe!9kBkaw?{C&v~)TXQWzlkhLf&lcAJD%OVRGE^R3-rxWDToxItJ +z)?iRXHBIr(p)4vDD!_|~Geytsi_Ucxj)=i_VGr%K4$yE8)*8uBV+Wr!BqRh}l;?*Y +zgX6u+6U|3Sbssa&<{aO*l7%*k(p15BBh#Qez=tcuGz9W;o57+2+}_7X$&YT^u^yN` +z9!9LgYY;a;*bz*uyX9tCrOmFUo81vtZ+)t}b*krZf(bk%7wMJj7TSc=4l9*C>OLLn +z4u4!aKIkTs$%SciHB0~Tm=EQ~MP}C?h@yx^D>o2vV592H@>1!uc9gtP-@}ADXF-f;dE!VvJzQYt15dh +zvQnbRB_N~7kA+J*U^sPH!z_U*+1z);UJ_IeoBj +zNKtScAH9>faOchJo$nUa-ZB+<325BR>{ +zhXPj^v<2O`u;cClow#|QnN25QMRZjcp)W&GOGJy6j}t_d@C+mcj2^WExMHF~8wis5 +zq(Wpsal&CNu*CUz8y%c!tX(CY%WMZigB>}aHp6$nI=J1_y~bgy&+Z*hjIBktPxWKs +z9%3>%h*U7w#bkhP*`oQ+;P#JL`Qs&*2Afz|KzFBB0&1t6o-VMh+hDlzyM=Mz4hOhN +z-;lJP>nozmrn0ajJ5)7Hd!7X^8_v4n*|Pu=`e|W?k&7R}zSM<4=Xjrgy|H_^VI?6My5ZTk +zsW^jU_Lgb8vlF~$OrGziFwb#<$9YLR!DS%8-uG4>?y=IEq87-ca+Sc`Z8xOGZo2r# +zX^NCG^ZJS_bb%K+N9gl_h)?(4NZ)IX5mx960;%OY2IziZ +z^TYjT!xofzw(EyfJbmM#4+CVv!KFR`!J=vA5nEtJ-q|p4Cd5~IbY*nuK()u;2D*#YfR8`r08^g;w6{iULVFd$uR +zoW-;O=c=)JTQ=XN$-Y`@lP^-KU%V{(OM97qb4{_SHJ&((6(ep|0r+w7Ck3EW-~5+2%TqPSuEZHmj}rNw2w`8Z=^=n%f~N;LdKUGQ`M(AkR_eX3Ti%J +z2R?t)v;Mi^u8+1>!O$%KJ?{mUU$o^srys*zStPbIsWg}W$k@f;W$GXm +zq_5lw{7?h9F8XXghB#nEbO#my7B3P&wCMsS0R2va;cct_ZXQ?}X;i6Y;(ONN;OO(* +zUv*`NZU=fj869f+b&r>HJIMdwtI>_oU-kTFw}T^|yn3_pt3e=jCxmfu?C&`4(4A}9 +zPsS!K{xy!{9(+Cf|Gi|pUi#loBq>Q|O`^Yxh#9=^!1UH6M-bktt}b=%o2+5spOSO| +z%U!!ozq7Iv-|OfuckjG#H>2o}KPji@X#HJQLE=Z|dLQMR}Hh#yuS?-e!KZu}sKqfYrMl0;25s8zs)Ps{zxp&u&!?g$&pm4QCTx{^hL +z_~E@P&nfpos`}4nEDPHJ0>SRmYCxiEL{ne!{e{BML4i)&EncSFr<9)j9C+~I=zy29 +z*zL`K%-YLCIorZ+IQh~|mWmNO=B>qrOA6PKC~*xQO<|tIfP1fw3VXxrJ$Nv&E-Y|d +zx`E%R5vW1P5^wd*sH>Um{jFc4xA=(e810+BC0$YA +zjEk-Pif9pGhTW3+T>G$(_I&e)mo>yqF0@)-i1=j^%)T9 +zp<%M?&kFfMt@75@{{~3f#0N`>Q5F+rvV|9>fg?L!K7X|_1r)t*~wlaTjJm2HKe)CDb +z!X6Y>F7@AgKuqi6&uzcUviE-{YqfmmevSHC`x+)xB92AG#6ZI*l!$n2^zY7aRri>! +zSd!SZ424BZf`U9ka6cjt#vH;502Br#2S*^fIic^SB7^-QFZIQ&NOq1_K~?~$t_h$% +zg0Nr^?F67`!-%U6#NQD_1tE@N0>Hp<&@uvCNcf#Csmg-BTO>)~{>0w~{K?9g7&U&3 +z))GxmnW)di=?V~5As~*I!cl;Pt_gLwkYtLXT~UhNFVb;$(T_qSt}w$j*s)gp*q-6o +z-R!Uy~m^>;@Wa~aCr1cA! +z6;t#9C5BZQnaT`zMbS=p#}HtI;}P*Er^1dpQn^0it6t1o-n8Zhrdt_31(Q(zx0a^n +zqwsB*TA)na%97+TLn-{2OT&p?{_z70#*Zmx6gtU^OKmnI3ixqcB&kPHM7tF!^fGrtnr3`aXCZ6?5KH?v~(x1c^Qsc@9Ekdf0A7xgSQY=g=VUw>i +zV#y@h4=$rw)Tf)LMe}1wLaMF+zK3(zZh)#ZlC&2S(+Ev)Zy>&&A~4a>aPMSEeli(D +z?(a%o;*#I_CmC%VXVnhJ=XE938YH*{K)g|sqV3;-9Wmf>i2icuo3fPnhK$n=$+ys{ +zwb{u(l+v2BQ}r$;yk?5+j>By!XLdNSenwE;*cnGKX+|o^pud@O2Rxx4g>#>N}Ylq3LQyIX|xYiWb|Q**ulGO@(l6 +zO6s&bFh4HF0~71fA0t_w?c$ItfaYWx_VPF?b6H!r4KTE0n0%o;Bzng?=rIO9-0MMgP^4I_DS +z7*60~(YSJPb7%gL7uDSiEM=h}rkotolYMEVP`|0*tOdIje{+8v;G{*2|AsRzaP;P( +z=~A1~%=hJK2aH&INApiX^V?@Qrq0}n8M32lYLzh0@C1HKBzJ31B&p)&J1CpzT{fyz +ziZd@&^ez<)gTO|BI(XXihOnH*oEy0~PEX{KTpGn0cbJ>P9mSQONUuCW?sTr$>{!<9 +zTq!=A`rXfvbtUMILOU>`pmUO5QSx@06yk +zRsl1)du1x`+)P>uJUxB6c69iLm3MWLKi53(?)8Sd1%|N!Q&Na6@b74a{RS~Fgk^-` +zN-smvlrB?bp7*dQ;74+OEF-cV%2F#*0_IXO133yO@K;_XTs&A4Tv6u+gM2G+a%nok1xG+GCkrXK0B{gc_rN{ +zs=nY!=n4R~<5Hac1jXLu`XsxG5K%0q%n4bkCaT`XF0nj&VoNJBDh(S(BMZPMbDLGe +z4CdnfUg3m2#as+H7Y{mVsqlF*`WsZL?o~uvURItZ1F)gosBtoPC6f1ME~CXVA_CVU +zXlM;+sDkD-zl~x7=kC4{G;QHWJ7`Dl@d4-xBz`oObR2KEZV~IA5cB0pm}g#Y`=!E# +zrb?(}7Q7`)!X;nI`0lhNlVk#}WJRweDC`xr%{XF(Cg5Fw{8XnxrM(5-SV6>W-Rzc% +z*5ggJUTr?d3-@=|W#iM|SC%{H)^(UaV0c%b9+%R0xpTveesChF{}(iR{_ZaKt8eMS +zh7G}J4nlqS+W3XwU*gw~CSKkia`i;ZV!i-~Z`&4_CcMw33Us +z&{HiL@RZfll<$8#=m}T-Nt?@)N0LFsgr~TUC#`Ew8uvbV=n}wp2WpUldOCs**PQcn +zm$Jl)9n-&X#{Pn3$3O2*o&QjEeroT9nf&u}>VBWc{J!oz@4Ttc%Cv9Sg}%kaK9381 +zYwG8JR-O0cfnRbi1ZAE3uj4qR#(hWA=K26it5&pt-*7TPS}8v +zOrJxDSeFx|TXLdQlj1#$oE_5YgO7uHhdmEyDbbI$skP5}NCnTi&wYKM!z9t}4V#j- +zR$a#)?vgRZdX($4?}{Q_b97DzFOrZ7yl;IO>k5=w+w#3j_vC6|mF8~jCGEVi6C`jh +zKNY(@3f8v6{SM}wZI|BSr`iV~rIua`aivyQVC~qNymay!psml{;m~)BQoZ6jH{kA6 +zklf*j*FP;RgoHhAR~JBWuru8C9~Qx7J51e}0*vL<(W!_tFwg!Y)9*VcS3!iK`__e&sAloeG@Bnj&7wiO%Km~xhSSn9s~V}fnI +z7@Zq5NMML)vk6H4a=)KlNBtm(8X_AQJLaa{O-!V81}X+>zx6t_7&5 +zmWz?07?60do|9SNJV51e4X_UEt7!|0KR*93#VMZ(#tD+G$xaw2_SrplS^AV#8i?;3&lzIis3E8?4UBbz@b?WIWA0s9l!bKkF2Lk_qSE +zRAal_saLVUnvf##TEIhDSL9(2<2Hp~e6qyhc~6-!lx)R|%7e4kKzs(6N4W=*A4JjY +zp0>+sV{9C?H-WAOY%)+_LoH21WYjkxsHzev(#iRMO}y9Bcbl63WhU}>d9V6v +z>0j_=Oml?)XE)s(Rxbkvr+98Gub%zFAk-*UP8psPq;UFC;9x!f%pk!Cg!K5U7~ +z={_ZWJ@tXhg&mBXr)Nd4;sZe+jVp6DdV=nM&zn&9NB^72eEGMg+R+Yb^cTtWGhOS7 +zm6E1+0elr%$s#k6)y$Tv_WneXyt}0j+^uGQQ +zy{IPk0@bef#hVqnmgSzIvgF3#Q-4}QdWi5 +zqJUWm@iT65#gGSxXndj}k}(Nh|45E#ZV-HB_EFWv-Ej4n0N +z>qxrE^4s&Tzq=xTkIkJ~d;7`?(W7KJe(=iL?LyGc0j1U1$}2>v58A7~*?Tt@?*-$n +zX=j#D0?<_FSD9w&zExC+4J!95;S*KsbKrIh!$2&6qWT!JsUIvF8ofaQR#4DxL3Ae< +zxB`dz1_z64sU=Xn>pfC +zVbLXCvB#7t0}WIaG_)HUZKzBR?~Isd0PH!jcr)sr=_ox|xHODR`*+qp%1)FmGa*T# +z0WMw9TohCvO%L?O8BbGk7yxB9`3Q;05>?q@j`R>%^feeWID+!jfZ7j@A$u_|L8Z2i +zGvgK+DqW=F>4-Eg{l6B=nw7#Ur{XdVn5ce6Pd25#D=cf6;?Ji0Ia0@!B!3vhNiUIF +zrxU>Q;p5qHBpf17nfRh1ai}x4ohwt)MIlFTNBO>t% +zJ28_@)^tijjo|Eg@q4h=Sdo(29q81Rs<)Kh<4L^rB<>XNSZQf2u4@DxFQV>P`Z~GZF$KZAIxa62nScre+Ba0>qN0WhSA~Vk${c +z727|<}B40Zyn2msXs@=CMA1&&GH*B+=c}z;KrKNMfO3xCb)x +z2Rdian~lgxOn~S3;3(%EDfZ)e17=yWOlXHY@Xp`HeF_J%8;`K(6}XKQKrN^IX*l!EBJByjNTTP)SU~z?T&9ZSjUN9StmWh;Z!&r0MkAY;0cVz(!TBUf +z>;#Nz0^A*6-0pSjK*i1S2AY*cbk_`N5zd4M-jp<=UQi**&fb)UfgVM0t}JJGoG4K< +zB4`BO@`b`8`XN}d%%aE+Bepj1Al +zoHf{0m^nhPFV6v)fVWu`B=twW9pS1oOUBAd+B-{nr%LK8;!KT7$M}`Z;mVQaGPjG8 +zd84?CjLOIHH(5@lqJ{3wo}~S%;i7dfAU@TqpURryOkB(x%8eA}7U1Kfqg2!E1t94q +zt_tiyodAL!e~T;9tKiy}Yf4bdF@7&-~A2BSBo*N$e~Qw`rGQmYAE_meY{eC}d| +z%ynn_HCAGkN?L(x?cRepd%!(^mFg`CwcF=NVrprysFL4^`cKfBUQD>?kY$rkwpHq( +z!{Zg0={v>#`R{wu+H&vFVWoQU^!gE|+lkUM5;;FA@^Oe-iJm(nad(Szs{-=K)8z?& +zu#kJkBo2~f0t`KgAbQW;r%sgUp_ynmGg +z3aMcoZ@sY5dGoMklcIMMafCvZfH}^czU+I(}jGqjba;h|)ZPDVl=3`o4`a`-c +z>$199nsZtO4k@dZl|7dCWt{|>{(|#d8B$0i&{^RCyV~_&v~eCDxJtH2D3bh?V83$1 +zNFql?60ep>)u`fX1rfE^GUg5I78_dF{Pqt9NkvBCy8w3$P%XAVk`+oUU+qDsS35JJ +zecFQO?))&)tKKlG{wiBCZm!gsK-gME=y1&H#y*Okx_i;GxH!8=93L$zU&^Y}Y#kNq +zMM*RF&@+oEc`7Br7B$)`++(Nsz1+vwrqT)wOQsEK5A@v2zSuc3)3H^hj@aGVzF4Za +z;n*oU+u6lKtVMJj_!cT^n{9qo@s*c+MKX)&QXIcGrC~0DjbI*;DDfF-o*U&98H;|s +zlO7;S^evhdy_-uA_e7v*7wgFhR8&Zw!rM*3SpW$R-2nhJgH^E@g%E +zv|N{;2c-i-twsK7&h_Ul*Ah6Ot^QBH)2|Key$toQM+&VM%*T_>=Sl6iE`##V_77g*_|1 +zh(%tO)woy^BChx>pp8DDVucV5$jyA-nLQfF#Ml)Bgml4_$f&xV&{ZxZdwCF7hXI +zod0iEI8*&E^6E+Ao9Ti#v#oDFy?*n*BJ2NAqNl&Oi12HMdqjnKJ{ca>$fXDHZ2=(o|5~In3~R@2W8KBky>f9CynJOtwn3^1r|ha901Cw +z<4_bBayQQvEQRx>f`ju-B*agOcFdR`k1CFuAN4#8((AXEfXj*c^9&_3Y>^Yl=jmNB +z`N*+dWp_Qrc%4#6$I5Mb{9X?bL`UH;O*)&mK$&kO2?m4*=TK4@y|WHL>H5Wk&Up-NtccR~;drkbyPsDiGhE +z4vk^S+igvzsIPLtJW;tV@R4;KX4`|O^5{Kd*xF|<4y#wYV#j}u;kT(aNw|Irv(z#<_63?B?qsOW%Y+0Qur`#Kk7GiJGfbM_l5~wDjNcPIJP_I +zyyIrg>Dx<9gG1j)x?0G6N6@u<=Rdt;J2XoxeDKq5bkSOn3!f~B;y5X8AxO)^%^gF` +zXA*Q}`!>6HRz)5J=-quZ$D3N?l?)F#P0p;aT9<4w9bb;NQ!zJIgI?q5Ru-`xIlCswJLEruD{MyF{{Po)_NwU-) +zreNJOE!L3_h+dx)T4V{Rp^_$>17U^Ba$R*{{a?uF9H@8}3xinMF6Ph=7I!O7QdJ81k+K-XI0vHEkVV7XM_fp8kbk&V(j +zh$5?ZJxfz9vR2$}-DMMBrIZT3WiRlcg3z*QEWXY@x$>Sw^f|Rzwy(C855g9}$x^9l +zhll2LqOM_KS0SYbIV1Fp=|DfbUT9x@A|7y;Ff +z5~`zz!J6IoVWg$tjF)`lx)#adIFDP-hKO4tWH} +zj04ngzFsY}Mr|Rtz?%KoYCUtTxXm}9wx>44lmAFAh!+=B^$GN7`3?!tROd#^ydB} +zh>E~YZBRf(+P$kV#R=fFdGD#7N9r7RnqmbB$RrLn@6jgjeFT(#RkYYTpHxklI{0>F +zbn*Vuv+C%@CvShREIt58^GHyaagf@Q0Orc0C_Nn)w_IvNCi7?pE)$58OYO?NJo+E8 +zdeqWGlynW#;g86A=@I(>j*FhY*A(6AV5RS}7%u<9MXq<#vj2sP+>`&b&OREQT<-Di +z{a5QOYWazebS=BnWpd|9hWIBGSR))O=Eu+30EQ&j=1;l!TQo2Kz18_Dy8IawO=o`n +zxB+dQ+6P|g57VzJ{+2jx{{%n463M`|TLYZja-UO8>u$*;U9`EPI#6(bmtr^;x|4Ua +z`G~f)H~~pD3gv+K{D($&0Fq$g$`HTz-ks}2x<}Q!VS#i#FP11$exDoea;*pU&L{d> +zu8xR+(_L=T+%9F3N?)%juNnXejEWk;_Se@}3lgs!Srr}AN7N^*OJ3iSH!^!>5j|G$ +z$y$=4@;KS7=F*`neuY5T^3bA2hzD8PQ(Jsrm%yO#oHXbW2OKj`1?&UD6kTk@E@Imt +zroG>iqt@Q5J6MZ+_h2#h+6VN8+j94x%bx)C52CN)xQJ9|GtVw2W#Z-k~QI0OXS>9*1%GE$4o(o+fVytM5)~LcFoj->>(9ru_FQ +zllkilS}_QWx7LrEaj^|ek!N<>2P?qr!9RX*>QQ&~AEL6+?)ta*iI|P=DB<39-I#cw +z$;NL6Bl3AeOtR=2r=Rc^3Uc*{f^-p^p`=VGbku>=?m$4oK%Hjfg$4pFfQTXiz(P{B +znV3cZu?+=P<&)n)$&74Br9*UyI~am>JKziYH-F;Du(`nQeo}I_m_!Bl#PDoemq9}{HYMfYl8EvtXc7#pfZkUL9bkUgsu}EbGQyEs= +zNljRaO;RTOMvBqAqY9J>7aeI27))Cjxrq~#`Y#N{inbMDC>AvrhbQCWT(Qi2CXkvq2ejt@kr&E<2d@2E=rzRe2*D`f=p~NAoa4N`T_CAe9}vHluQ?;5l1hdj`f+w +z{Z|S_!bWc6C#^B!S}@6K(=s&?WJ*8naT%QpOU7eqRXDn+YhBBTte8r8nVmFZz|4S= +z-oj{HREkmnt?Dm)no|5Xe4;b(SvXohDGuTk$yE|t;1lVvL}SN9rZ5R9OhKw718@Wi +zn}*!LMZ!2KOjw*TI#$w&212GGRU$V#MWXr>_M%ET)(k>a?F5(bem6Dfs{u`|yKYor;6u>6-2;&brZ@bnJl^rRX329A8) +zkP^^Hqni`)BL0L**sCow2V-aXr!(2KoVXY+K1T$!(s1;YWn!um*&d(pHX=KFB=cZd +z5(Snny_`eEq#7>MxB^1^jOd$^{=zHzU*2#cQ@YQ9trGx=-(ZshXA<_o$yoC^o8`P! +zvou;G?GZdR4o>1uK_uW=cIYexG&CJfI*Vgw$BEIHfD3#g4j^R}@|QuO0B_v!PRsep +zOL6y>33>3;0({K3&io9oqAasqg=v|ua8d)DR*{`IjW29zyz$g9HN&t-uox-86!bFk +zSO135rvFl?LLa=dVc{a5w%nDk5*f48ka7)~dkucG&5X6MOwPj7(Fl?-I}tgOH?)|1 +z0+Y6$L;7Y&VXNHwbusHY26UST##&48`;$S)q9ntcDD2JlfSdkh`HS4bRRhA=S@O6F +z{rwF2V_e=^SN=6r#@6{ve{`|t#tHmpL}~||^iL%1--$FHVrNL2>uBVuk+PrV#m7~% +z3Ueg4HN`zOFGb>td~qdjvrAS-VgcTn394m-aeyO;)T5CxGjLkw-&ERV=OmaDExqDK +zh_IOGTwtwwLx{^4L!{zXh>=a?-8rO~Chjj`*_9I|_opj{pp~l(P!>YL%1Dy0%1!CI +zjq0zwnOj8($Os=P?=dUvG_3H&+#d3}ZRC_?EUeP*NwZVEt-zLCnI*hyB3F}6g(t16m;Rc%2qyp3``A$U<06$t?%#Tc)|i1~G1N|ASS +zUt2*H3A(z`Md=dK`#He7kuj(y?(fb!-aZ9VxZ<&RTr)gXX|DDu{MNivb)!Ft2D|%p +zHmT4&?KDRQj!?K0htMT4$K!eGMj0bbMiEJ^^FvV6k~cxBo_XBtZ) +z^;RP@qp~uPUAHNr%>86c;>sU^epR>pVLdU^gJkGQ!rBUrn@iGfCZA)bKtv|$EzR^< +z{^0R?BYb`6XjI{=inZzbRRfyKiS(yVdDHWdsPa^Yewh4xlaWeuYj(}LQKS;C`QgSE +zURqOgR7GoxS#!64h2S84V5!K +zqIlpR5C!>Ej%;aNXeNG%%hYbk>>sbuoEAi_w6}D&x+zB@PCXp+uTG1vmLUjEJEy(f +z@+b&T5}hbL-%@K^!?0py?-(t5%9gy^Q)bMrpTyTG9wc9FzIEtQW$$7cB%p5p#YdKj +zk3cyBV`VYBypF?L0HP5UZ~B3aFIB{%hF`Uhae>dC)SV3KXyK+`cOhTecv4xsrGskR +z;9`ugJI+ou#wWPc1htUQe9+3@Qgrk5i +z(U{rD-%Ye7HTwNe|XhmXM-cX +zFBni~ABjTIEo4Y^WrBj#Vo26_Qz-h{CPp~g>)N)4@SujPFR&2X&i*aj>!Q0I5t`bG +zjPNl42WAiWar+f74@gUes(!w@%j>y*(tzRZ=jxvayrIukHWr^>HoIp0tbgZxzv=pQ +zdF-ECpYy{nK4e3pL&We70BML~MMvP#aS2}^adChokbC8U{iXG+pquV5kF8(o^SOMy +z;L@{>m&c?aqN5$Y{mauDL2L9OK3&}J^2@WIhZ^h=we>>+Q-uGa;lQNfO7}}qNkbu@ +zhtFsT*X~X0Q*@$-m{T{^$Ueo{@EI$ +zi^5S!)6xRW4-HR{Ti>ITfH7U{d$P_T3GOUwDtAa>(_5W|1Y@c%leynjkn*e-Y%Se +z`-}K?so?EO>)W;0Z-1}91!#_g_Kic%jEhH)!)}Zt?vG2p8JGS&j?|ox+c%+bW@1zH +zgz}9E)%z3bZzeQFj%_cjldb^gHy8cmE5b{zrnSzsB(ZXMKnOA`kTQY3E!_ +zdicm#&JobDH4yz=N-Qz!7F5|i1JEWT#;ZjDHcZB*9MLJHLRj +zYMz6<18SpBUsN9!mB(Cp5P!)z3lRWILD!29i0k*ex_DZhbmS$&R2udg%fbB9gFwli +z6G7ta+_iL!wdY|<+cs#yW%!)WkB1HSd{8ui7Z*3S8b6(@Nf^5uJW +zy)TXfXb=iE9G{zq%OWrT!ALY-i&%6?bmqJOJRnZCAx%hZQs`C^%!4mob`NNgu))~C +znFSS5k}_a`?`Wre +zrW|A{(^h}$PTKJ`PWrV4fodkECk|7?q|;QOI8yg93z`PK|GggLXSdZ-UzTinC+eB9 +zm79$(L6FEa?;j@p{Cq9m#{JYzwZq9<`9;#rYbO!OsGf`aJ^-{(z8b0kpnv#P_@4NiMqf0l--zAt^kl +zw1SUhP5k-MNdbAQV6Z|(#Q~$B4UrstCnvo`hP9vAqc>lqbPRjR5zQhSn-n3EO=iGv +zcw4FGBna+~5XlASG)bRgDP>qY+|F0#(r(iEeTrEyXH+~YKeGmv$}p|si8+Ywu|Nmd +z(rPS_DO#ZpU1WXi<@hnP4K_vVyN2{94;gcA7H+l!Z9WwB7; +zc%H4I4OYw{$eR2t7H%2Bfw^?)HjhhH<2nz%`JTJjGJU8za`?%cUrqmAK-71eivsDL +z{k~97EIq5C>K}33zd+REryqCbEkEt@5y*BtoivS7*?1PRtv2_y%alpya_{LowKr5q +zQu;0YgHE6IRV;C@uRi+v)2W9ihMt%Hyj^+uvxi^v5Eal-+bA`AKIY^rta8JhucQP- +zDCh6i%n_;hcWbsp)q>2!v1M2w0J@~7z=J8cjE}?I`0Yx`AqpiEZ-FWNM+V!dPAxtl +zaesb?N^poym(M#?;nE-fP2!cc56}$Y3ctUF{n?t4agR^#`^v?LNuc_c8!fewnWC+q8<5c(@%u}$(z+pN0`kK4hFFf< +z`t&+ktg|_K9)*N_{Ad4);n@9oJj-?(-?w-qV0;lYO)(q$4d2SP0vvPs7iL;`wNPeo +z|1EGk?!yZW2xIv7#xFOr*w+{>V6#%pH{z9OAPJ@1?iKUx;M?Dg2EI$PXNhyKWI)3A +zXIFES#OkMrjb?xiC~2#EjA-%zbtI6uB+xua*9Wi%3%IviLEi^xH4h<;ORea284l6lX(Gjkgl1FV-Z6LqX}cr7lS|s#Pil$yGYi|SEM|bFaD5=gJWAIT +zWZM#j%Yq=;M06Js0~0*}0T1_sH-(U~)S+DHiA` +zE|U7$jN0u^?Z}2=`1JBl>b|ZRYycxW8-JiHrVuK1t&4`3rrC&K6b{Gy7gX9n68}&o +z0okt$@3Ta+5{kAgpeP(YCLp>I8c7w>2tvj+EaNAa={zly)ESfR6(2vvKqyNVm(doy +zVzSY+bKWr*$|C+>JZU^~5t@*o1jEDPJy_8;WqA9gC^9BKwk$D03HJgQRk1|l_Tw^_ +zqSBzEf;#>PFX;|6M#v|cF%!CZ$&Wh|r-k@@Wm@E4wj}%ywsdV6Z;odCM!|jv@!mcu +z%LYk57xD9$m_y2p?J%h-FG{O&(!Ff@2k+!yVf5}Y+Gk<3`4TI}iv@+_)3~ffABN-T#oF=mzjTvIw +za5mX64L3v5pNTJQ{6Fly=U0>azpfjSKneju4^==EP|?`vXed%GSf*4pqCzMl(h?E^ +zPeLG}2kAZZDgwDkV9ZemGve08&`$UH1M9B|CIkt0^rXrof!#8tNp8X>$EsU85t~KaIinuVF^jHJj +zoTn?P$Erl0AmXFfSpEz2ywwcv{>ZnL$yLT#ZuY5PE7MB{V!;TIJth1nPTJrfJqTu+ +zK%T@+J1$J~)&-tZXc|sQ4!Rj{y*Yn_L~AOGgp6nFvG1(W^7Po6)wy(^EM7J@z@9~O +zO(^+Al5YPdNxZ_DaCZ4Z{)YjcrCrf$GC5!>_w7U;#EzC>Pib#UU0W$?Q{)A?vI7R< +zw{S$47LpE5MIO~a{1F}wqEpZv5029{1LA$0l7ji9ya}$VTeA8hy>=lF2Bg=A7uQS_ +z7_Aqmp&7m5oB^N6A)mC_$;i=amVRJKjAB-&p@9Z7uHg9QTn?23@b@l3h8SRN})aSFJ98vr$!X1R|$MC=(b~9P2Mx;8a)|@s7Ij +z?%4sh%1M#&5h~MJN8%%np@EL7g0zKnXNQ_7y-44|3K@suXmXA>sk*{2IzKM{?Mk_y +z54XOmBwVkczb$TjtwhjY9kNa@8cZ*yvK(`x%GYUMa;WC(r50|5@;s>pV{*41OW;dY +z=9jLl6~36SU7N^<+f`dqN?()f`cMpg4UPkeSLjAIdM|)kcVRD*%-BUzE&b9{aI7E_N +z;eqGbmlccRw_R^oXRTJ(cpEjAY`8T(5b*2U%2#q4pSCrID^vv`W*>?3NOT5=lXk-1a&orO| +zlh%M4@_9Yc3j--2F46@M-`%2M?eRh?FRg>JPNkjN&I))sf~Cbv96fSk#y +zcDz2Z#(!bgdSkA1$HFy&MIy+F?A+P#srV9=8#W2me^*sI%w!dZcSecFYr7}6&}jA= +z0!v)EeRzXqT*|tO+=1Vg9UE?xGPn89tmwao@_XIuOzfKv>)q%lH|J0VZ7c1=N;l!U +zl?Lc6`atKOD-EWkhLZK0FLW}YrgCnRu=5Lf1qF)Gcg*j`o;xs(83N2{pi046s+cJFWe;{Epit6Q!p}F)q42Z}9X16I}ALXoy`7&%m0=cj|GamSD>A6f4|Bm%~goB})R2BrE2uVaSp4u?8A +z4+$~B+HFJb|KrMPVEX7^Gjj^Q^PuE)aI+}hB^4$c&o7;KN%Vhi +zr#%DjzJpF3;$UhGm%rM1q_Vk(95jJMKRWkStk(wH>yKwm{b$c<| +zT+A2&b=D~Gj{YXQ-0gqxh)w`PXd}KuM!js7pxwW%W9g)ve<}oywcCE3zeTRZO5bAs +z{QcIW$7QUggTHMHX#P|34Rh-eBKIzIY1v>;w*~3~N5)d?oa*TNZ;j1u2(=xB$OiD@=BBq*-54@{uwP#5pz(~s)vX6y1r9$+72z2TxCLTe( +zG1IV+a3{(%4VzvDx@ES^=ImuZ8M~EWK7Yi}1g7^q3>CH-Y!tyVaMUL*(04HGuPkrQU8^+3O|w1%dL +zCC&}C`6C1SjYr?%?}0)C45wiyEhUn?$f +z_LXa%S5tVUsIGXMqb%H>OqZKb^~bUxm>|u4SSDN}@P_ps324?N6SFBCTdH^enx3PE +z93DU&-KMRIqX@AU_Pdaj5=CQ3p`P?@NlaBzhWsZ$FW^Cv>sA)jWjhPO1^N#dXo#j7 +z744FJk?fv7D`wWSg&(^cp$0!6Hgwp|h=mt7JbajV_hK>MKqS`SVz4H{$y(l0$WW=+ +znX08Lf~bK2!F#OLU8^i*_wC*gllq*L02cowZV`{MAw&rri7AQxuzzQ-1Jn#>Yq7~Q +z*@#F~anyx94l+s%mD5C8Ws|J;VA<|V-6DF~(MnI|47BqJ$V;+5)`JgX-JObMB5+9ik<9yTU6r*C{_vKJU4CC!$Khn&EI>VrXD^u1DxCylu}g)K_hCl +zXE3jsK>_l$RZ8!@ +z6vfi*hjd6Q6FtS_xI)-#eyh=mF|qwb_yu$DC*aReeflleg|P*S)Fz5=om{egEe%m0 +zX2M1ftKByfT5Z~>QP4*8TWVQq#P8mB3jSBTwak_6*FqvRi*r{+w!H0*-9}6kXzgaJ +z%ZP{>IGtB8#;JwadrI2WJ;z?0ssH~M^x5O|AEf7J`bcl@EE3cIJAD+~K1h_@o6)cY +zLLcKH&!fQ^0}-zcuDF8GC-X+$T;gk^+MN3Vy*CQRhP}-4!bc*$e995g_p*>O89p^B +zqSh}=K4KFCS6a5f)QmFFvo9oT`b%|WI!qWst3Sp((#@BXmI +zCet;ypx7#NKO#+eCU2`j+a)lx6hRv!3lE8!IQ<$w{3H7J_~Ey1DJa;{I>iGbzkYp| +z2KLsR@C>HPx1Z3b`#4@g>+p+b*c8E^m+&Yp9x#g@5!~K&6dp%YeIf50(`%c&obc!D +zOWh4;q4p`R*tL$%SzWtm^hIt^+z>%*7o~j&^=>)WvGawAPWu4tT`*TxQvxTo-m6#) +z$yI#vOlp(h;a>INNZbE`^x!T!m_wbe=noUc=9g_0;leMi9bIn&jbaXh(C6mN_je%& +z4{78tf4TLw>+KG%=~KkP)sy*9agEiE7gLf@i&d*-R?4@>63>_*^uYxIov$`Wp(S2* +ze@r;Wd~UdF?PrCJw_%Db!BZc~{*bdFTD&;95r#E756LBee{$@2`1IE^u=WZ1lVh4} +z;*MM5PmR9nG|&D6>EENbRf8aLy7qE-K +zfv4eDBHLi+zIRLjA7*v|3eD<2(pK0SUK?~Q@%fKcZ^{kZs5YI*ms#-7ePLJGQ9aHP +zkFr8*!88%q&Fi;-cgura%ErCqc$o885OH2szstRu}HBhS}Yp?>o%KKDUxsv<{t)4?tRunf)~ipaVATa?Ic +zg|2QCUBRY;y*Gj&9NHhkM{L<-uzpm%_FGVi#2Th3>1 +z4zZtp0T)TsK8DQB?T^FgvM5L@ +zc-99RMGp|--+IR%>8Gd9(=>ST1^w}F=P3Oyl*Y-(_lVfSET9h=o4uNtFc($&i}ko| +z`L`UAUtdxvGWG`~`869dJejCJo1Bn^$Wde~^H}!yBmsCowoxM{E3FJ`T{v=C7$S#{Cb0&d_=%Sk)mD- +zXzjRE7kuiy`MAAPHjw;4^??_~75&30y=MU?*J-S*XWb|z$g$eJwOf3AzyOM2=J!C +z7NlH3?z7ARxla!+Gh8>DPRe{sP9X<8Y#JK1_L~RNoCF%1`#gH~$yizB}g{#OF3EVqm`H +z|BQa_lESHW>GxLIHz)ct#H3Y+ry{ +zEv5V??&CnMDCjS(&O-Q_lonMMT;;?C$CaN%N%Z(czBJ~nDCNV59bJ% +zSA5+q#at0m4H{kbVS#sHvSxm@rk;(Ebt^s~Ez%X2{y>ob%7OOUml-!*@WqW9V-Jj) +zWb~k^A02A@rt6*v^4Sx0P+mi%-gRD9Y>(N~IN*URZORj;#3 +zE*e)obFMn#(s&155hp16&|dPauR3R-p?SXH9g3fqmA6~N?!fa5d2TTlU4CyecS2Br +zor|(YR%W+*qJ%MXRhw7qx8iPO8s4z5Yw)bO +z{br&u%NQb{wgtFc4;)IWa;*I^+x!^UU1kV1pg~7X7UblWlG76~7yv8<_Txyx1bsjAFm=(l>pVRC>?)XR(%Ot=04he~1jF +z&vthh7#Z>q_H7pOz4{AsDFZ!O1IZDcC?vFmU6oPSUS}3j;NH&&1XKG1#pI#1%E24H +zrAhwbmsD?2`uLW)dG`e9~V3%nIF8@FN5U%x3xw6VTrHfQneLdWDC1 +zv0_^;KP>OXfCeD~9=UVljPIuF4|5G3>Yo`a{({j<8mkpPG&A?bON`gz$E@^y4JF1+ +z3dY4|$86NbP0jtvh+^kw#ovAj_+TJ^`1M6FGUOEoXDN8ap7x>|Ji2iD(WTf&eie^A +zwm$N|d?}#olDFDK@RLhd^(V;JFNG&zXa$!-?>(aQipBSmk}EE93MS*bCKI1bCV!vg +zs!d%ppGrS7l}Vq%UL))n;y( +z&orNzX{FBy3TFNn(?@#$@6tyH{+k~CpHGi!=1UR{7Jp6zZCfZyKRtNdIDQg@NmofH +zQ_4@}U=d<8I}Nbo1uyOu96^_W^#lCsOdqKq90?VxIff>{JtcP(Pp!*4^(mhOs~`)T@L! +zynr0uKJ7D>XpLGEdvI@NtH_3vs7geg75T~0EGHW%PWSeT-OE?}ds4yJoyOW*h&|o< +z%Da9%s7*Fo{`4v|j}|ltAw7N2;M9+~`RX;5cudxvbK$1VIieG4thVXsnv{;5`PW15 +z7d76`wpX5EDh{Yy-hMCpBsc;7NakF`^rJ;AOAUdSdLY)O4cxgi)^K3>i6{ukjHeCL +z?<<^2*QBi8O?l6dpe+lJuA|ksFdRJ{vaz8)372Q_^}i{BLr&Q-65m!1u1b_j8rWGw +z4qPHI?`R@NKy;MKPj-AIRFhY=Q((>lG8d0TKUaxGga~0ytmRF!LI~n@pn=?vE0U74 +zB3mryZ^#l#Vhs)#BR2MR!*HxXQQSo$w#T^^jxP?8*Jf>Am2Tg1^!?ciAw)OR+2WA3 +zr>q3u^P59pGLhgXtcxT@ju3Yl^TcFzG+`V6P0R)lP^P9z +zEdSl;^V*UL$Mc~C$yeuO0HKLwoSpsIopQFhX-s)eDyKInPF#J(NoM?&7Bwjyu)R9EQT7HT2;Ziva|0~NE +zRoIQmdoY2QhOLoXF{uDo?S|VGI|6nz>mCaXXcKjr2mp7-*Mb0zopd*rzb(0coG0To +zcIksr)duR>yHx8Bci6+-bH0q+h1A`ry!TqyRVhoZEpe~s3ERgX +z>Vm-%ABC8F*Y_3Vv=ETg;5+#27s4W(TsG_1N~1X~AEd)hYVy +zhKH6&yC-wWb4FSjcmWIJGu4F*o2SYvg8u)#o@r0h2Y}i|UXNrIkoZe=6t)|dEChb5 +zj&>(s0K03OFZ7y#-8H|Zms)>j_gWqOn3Obpaf@kA|KXh<(=raf)cr~sJlgs(qtWc5 +z{z~?ceZcPD6|ies0ulm{)oyQLM +zH`Tn25G6KG|3%>bQ}_08y!&<#FZF-=kB%APYTnO|nNX4RZyht;Snwc=GmsTgM-GiSV(n&*p5pwd6El +z^fvb=3Co7UjU?zFE;F-BncdoF`OER89kbYTD;+P-T#YTA6@`xoT7QbDbvs}Fy`dmL +z<9B}aJ7y_{vNi5Hj!$COLa=+WLfIdq3>=ns<4CnFq%aBn$FPR%&+N}H&ve0pJxGKq +zK+m-M_r+5O??UpIKi8Ah;5&?B#v6}@)g=^&YAoNJ&e_Q!Bz7$YR#Nx4@xR;@C%~?x +zm_7#4j@jgyxBA$Qr!VV6Ey}*XKMLlA;0IT46n$UjoV$5|Fbi)pjC>!+iI{lKT^Vx! +zF2_KMy%HT2y#i9HqEX8c9l1&1ZzKe3&G^vXu>LZ97i16t +zUFqJ4kl~Dk+}GLo1j?{KiQ+|P51v~@;^?m|07z8Gfwju)+Z&&wHQDbG8*({Ck>f?X +zzFnVCo2#u75iy9BlXc-CP7u0H%SOH}HXVHPgG;|tNqM>w^^}Yz83GyJk%c;83w;r9 +zya8TcC=3^dp@e&~MW@*m4GpB|YLurfc{mI4w>R*#Qe;dAB8_Cih?LMusizR+F);r_ +zh&uEu{{tQt#fJ1BIt&pcbX`b`_oh!6idx!5ydpDjtH7N$=8_>pWtB|yX3de&$MG>? +z6O2u`m`V-ioh$^!n+dXuF-6M7NwByU3f2@rAOsxxRgw@!%h^RJAX&;rz>_{!v^V^m +zAU0GHb3vEkg=C!0rp4%ReCKF!8htlem6(^#oQ*6@5SIxmrF}>gD;HX_T>^~_jI{w5 +zNpZt{;dkaKL9=js-NbMt^9P^Zi9kkSVJ5O51cN65P(~p8;VK8miD{e+1tr_}T*5{n +zv3J-B$E)JLw?VdNHF#?gM76NT3VP_)Y0vKIc% +z5KyR2L2cMY9K}N@8lwBLA{LLIUv=#R2?q1*6M +zT=;QxyV!UAwC_mfZoNd3E<>M^@@kTzq?g!l1Wn?@0Rvfbq;GMXxdp# +zX;I|PDkkTVxD(#V!)47MC(QFoJ!dYPV4eqq^4j< +zE~?Wod|P-nf|F&vdhJIxGcr7KzkP(^z%}FmEjB0dxP9)^x$NA&xXQqcLJq{uKF3xN +zSG6GZ69y6f_!kV~3Q>_~s>3}=d6)VV#;aq8S8~s0r}`Lp;&RrxI +zjP^$`QIRZHTAE>Ah7o;TBjZ04Ab^*-m|Zl6NJ+{;gbajeqgXsjx@Zk2Qz`b`K;!^6 +z^Z8mrURA*ef`h0DSFz8_B1Qg3f6R%tSe-Ik9iF(B^}>ZFMUDI`yTp-QB-3BWX)D_5 +zQuRKe#+tYvk4ccc=fIo +zEN}RHJN>dEZ(y`6b){@Bt1`vDhUN{A%q`rUTdrqU)m@oz3RMx8U92))Jm4yV2MN$@ +z!R$meXQD#Fh})f=XM!%?!H<(eXOOoso|DR<{7MN@&FR4y5#NL@4h6A;432N;d!-^U +z&t^W0m?x6CRQfgL@biOAo18qja{ZWFc0V<8aGhL~8(Xq1@c}i)e2|f=98(uhsjuPi +zbMqdXgn}rj4<&cCTGY5L>;pRTQ#|E@Ps6j^2BYfhNAaZ}YjQzqci%1b#v-M+zrnXA +zSHG4vAd-F?1ko&kqu(U7o)VOdyVT~gA#KW4?GE)_ZnQav +za{b)A#Kjo<+He(Gq}o^VCY6{=_7#yN_~zL(!a!2LlL){IIX1!fc5jc;!K^fR8y+bc_~Yh@SR@lq_m~0x+@7cUn6;<2Wh$=wc3Tf}y5|SC +z81&!mBSqUhr#9iyoAFQwLXz&`7_Fge6$3O!@l0RG231tcAK%E^Fih9F<|=W)BiCj_ +zf5FAcwp825U7de|LDr9bcWgC7Ml$1%e=Cfys%mg7aiH{CIJT$uwL0as&9t?ZZ{&d$ +z(LSBUXiE%m!670ZQykD)PWW1=oSRd-lIi%sfA^>e7#4c37c7eh--m;ywu1h;po$#< +zE4@MbI2SEeLVswjC>mQ`+j&-j5~rb_vU1dq9sNp%)MdPN9hXwvgY3Z +z_&ZHs#`y;_7-|;4gP|>5`=nX&@6tDJ%IV^)UjX8hgpaY} +zj!O(s#-ep-KtC(}xd_}&TbzNo9zh8P&)2OjRd73YBjEEDweL)>Xag38AKqC<%Uc*ZL2s>Z#SEzGU;2)i=1_CD0?lkzL|d;m +z{~bP9FzP5@*1Pl&-RAVc3T2{Wei*frFVZ}^qn+7}x-iTLerTVC?^QR;hJ`v9;et?` +zf?p-J^ag*HwwGUuxO@WAW(V9e@t)}VEJZ{O;d;Yl8tR%@_Yvecx^wI$5{NkW|i*y +zq-2}q0zb~xdnaB-8L)(KiI!vO-3mu01B2h!CqOzk`zD?=v;KRTpL7qm_t`rBzSZso +zwXKJrIl=$<{1EZ5)30G~tNP^0p5+(Y;b=3I9J6_SntU!=P5Vw1RuhL)d9qw-{w|yc +z+0TcHezWBwi9(4F995AIWCF&eI0|D%(2@b*sH1$q4m3jHv1}?H4`~uuOdYTzyvyLp +zSn%MmoIn)vVoD60MSy98#-r6;(vBpERf;u*n;j+-?8x}<-c2=Tzk*{NdVZXBw=qK2In>O-;i%m@f9Ly*0N +zJ^?um@RL$Nr5eG3E%S^ +z^7>T2Q4n5SEn_UM~)Vp7JJ0x<*ed_Usoz)Y{(b%rf8|GX^*h_QjWV{~5h9d?vnPiw09=Sr=7bQY +z?u3_+*PWdjO>S*2)&EuJ(#-~1+wQ#$pt;XXZDnFAj=b|Z#zcHf;k9WwcT+S6CbD_WnA1O^(a%zmo>(js1eg`A-B{O<$gn@aZs$bS+< +zHe!vM-_WnbJDW-TcqC{RefJ^0R!hEdV>x<{2^I!jO49oW2{g9jnYk~Y%Q5<+Az}AF +z6@L9Trw`JdU)y5e=dBe@ppo}ODxx9cl~|L9mC2v0ns^H?@7raenxQo`^M%0vo0Fmt +zc74>7H%8>!lX;R*1?%ri=M+s82>dTa(GZazc+i_7C7c6vhqo1w>d_Kwg{{G;x +z+q0;m&my1-`zfIhbD&r>6G3RS|2-&{r=s~DBsMQ-l1#lFg&)FK5DGRUpE@el|A{$x +zS8o136$}6IInmJU4XvIs`N06ghspxG*f;#;D*V}%C +zO%J4~|0G1liy&Xj0omOf+dzADj5=tqo@dBy1=u5V=Gc;F^qD*q +z`Z$)pMvgh98*$o*6~slH(+yj9qGAMeE!_y0Wh#?`J|>{K>%<(&!mu~k?8NL4VI}L1 +zA?pVa5lLZ!BV&geMRpT{-;#l3MS9>Q5H!j5RAk5+vB&~iAeTYKMRlw&tVr;)F3}t^ +z*#;Yjm?z&>1h1Hw6ag*1FTn=^6>umSr1)$>7y#NyAbTw>M8NXLQdY>1*;oUY_?fnZ +z9YDNi8z;;N2%iL&ZQ~d&bQ5H<3j)d{a+pX~9hv5=3)oO%p7tfX^d&8FIP3kvjms!` +z4b-JoS!p1aGD>mOiE{Ez{DF+=He&VkQ@adVLUv+HTO!+t{1F`F)=eqRLMRIpcsdDn +ziXpv|F%FYl=hY-m< +zBaX7+oHAfUJ*3B(T1^S-yH+uOEgqNpq>A={oVD05u{9?_c`D_yPqcbC=js~y4uITE +zGy#Jj=0rGZAUz=`A!#ZlWsSVqHN~Wl8s!2=WM_R;6z#yVBL~3z6-&Y>HB2c(Q7_MFfa_ItZ)quc_`_JVK5K!qO1Ubj$kDz{W8S;juy((sozh=G{{pi*mDV(~?x +zw%LxZc}m8Gd~*Knwmio*@DuVSI2k%h8QTW9SK8B&@e~WCJOz`G-?S$gkJQyD=GYdS +znPfLthqcC)R8^M5lZraS8OQZX+mSrVKt|nMMzRt;*qHUComR0{dW2u>wG4F-lrnYm +zIJPBv)3HC=gB|C~+EDB< +zd$Hn#YM!G~#ooafCbkl~9Ada$7G_cmv{#s;3&ByZww#gzG~;-7A(s*}H(7Rqor9|> +z?r|02Ru&wu7Gb+p&$e@0`m)%hvQR}vQBAV{IyqpyOlmzg#DQ`cRp+Nu7dMzs9ju91 +zC&yL$zeUMqE+FO!8K(z|+D)h~HOXf<)%9~+<8^YkQ`MsG&mGa@N`)QH9O?CXibGxT +zH1P3e(#tr6P%r-gBhw8JD_x0qCY5sx>tBT@1Sn--BFef5rU{BCqpzHcM8 +zzx;c4Jukc#R+Uk?&=kdPv^9h#Ib~X0<$^Jv(5aATZYAC163K=1!bNVhN!Sj*j>kC-`HtNEg!7zH*C3pxPH=5WXtv>tE;i#3_<61NQ-+^Ndm>e +zrzVb6t8A3+5T9a)V%!_5zJD`p2w&%D+!3YHP`B}w97X90UGIFcy{?knwtuz|wEssE +z_y;P*^|9Ao<5LYt1;>>#J66gi?5ayAc#AHLuWOnadd+@{jcJ3tGFn*lWbMRSt@8G2 +zD?_NzK6WlE^6nc60kUn_t=?=^$Rr6b^!@p`TBsM-d|`!6a4gi?5r)Lj27p>w_k_Px +z>fox)@8gBp#v&)U%e|=3-y8C3rN>4(xYvN6!t+OkbJD(&Nh$)b#Lz#! +zRiOKQrt}R@xA=8bzRv9kMfZDd==7I6_PaVK2YCd05W~HyAU>g +z0HtLZWDQ)-Te|#S;Ytza@*d)qRfEAvG-S1y_beZsA3}< +zooI0M?CXxv6_3$jE(u!T{ljCl7CS0+83O-4f;ckrJK1wf%>VXQd2KIG-QG*)6_<8; +zJv36dr2E8kThaynw-0xnxv*nYZLELv!rmhw?eR1|bHP;Ng8B6^)1)!;?_-8XE}Rwf +z^4#kA|9xBRzl!;2|2O3MzZUZ3hb=Bt@?y6wR+VM?E>>4nC_fcTj36RpPe*JNOGwoG +zkYgxY_@@CXs&)iXWA}*jme)-#foKDD>ID;NM*vug27*+MvJ%!$7P5&LXuah_`fMF? +zR;%$SN=o(kA)`lw+9Qe+T7*Tc~4nt4nr1zdmSoWrrm!0NyAZRZA+hlh^Egb9v0e +zYLD%vC{P)P75~tIJkbHGeUh? +zLBw`{f;C2qAhw*kV*Uop;SwaHga5SA->GVL>z^%?PKOj8kMzJ0Z>oMoe$=Xy4gFK- +z-mPEJcJPfvsYo7)ISH`iTqk*OQPoY412iRPCAy`hMqS})6M&(Ug@iU&4dd$sm-5F( +zIthi?i_Vf(*|%9b<4?wRrgE;blEr)l)=i$2F0sdesvq|WcEguzsD9%pJb3hue$5tR +z<>gtKzxTd)9ov{Jx5Pl*=n)Z%XPfDL9EM49^OnZ3s9nV21bQ?9tu5U#n(

    1i4B1 +z>+6I2A-g1#SG$$w`u8J^z!G7et-Lm0QhGVn4;)gHDC|RGie1Bk6h?=r9@kRfTw-9R1!juyh#2*^lYx9p1{S;o^8J7V2+;)&uQ4cvKxQoc1cZzn>LOP9liR?Pf3Y-?nQ9FbbWd%ZyUECSIVlpM6 +zjb*6{HOj@ojR;jpk9sB%qna%$h1A@ezwHkCNFd|IX*Nq{*#bdCS}OeT8IQ?+NsC!> +zA9CPIAgo^~P$7grHeD0dif{IN%Cy{tC$gU<{iC^W(sGwmUlVQNp4jt?x6x0xr3Mmb +zWv;j@9k_zsugNM=k0<~IL7}i{V{z0yCV=SWp=_4{=|e~AthGP^P~fac1OS(@7P3_c +z`;qMITrYjCl>Pq2VyC!(iuR~@#D?|@*pkb2n^#v;cDQs)o;I@9g;dS_*AK`aE)g;9 +zC$QDPhN_0-?mafYrPM#2J(j;W%rJz;#N&0IxS7e`z4x!iT^YWh_-3`wT=E7d%It#X +z3gxc#{l-*U{wu9rYrRJ@uO<}@$EuAoFiSl7KkT?7o9^(?w~*0FcISaTDzg}pOHA$r +z=mKh6+dzXt=$yHq!Jait88J3*rE_k3;_dr@WJnG|Bys!l+Nkd>C?0-~WCE(6V{P^o +zNqkSVaaX=~%_byY|H=Gb9APBq-CnHG6OyH^$@pb2v1+9xpHqK^%V(So&G}DL@s`Nn +z`ksHAiU+@kZgwpCA`kEarPQNgt(dJAaFF3l{tWgs(JyP^O{c+wxaE(=w?TO`)4QC6 +zZ9Qu5elCzr7_@aZCNcNz$a}Js$-tb&zmMEZQ?|~A-|8{HthL(lY+hki*!}jM&z-Nc +zGp%2=#=DkD;t|qPY(eWA`uq5UwCQh;j9P8+&?viZ8MO^i6FvMK)I>ujJv7#koVO{t +zUth{G0-3)d&*M1_Vz=JW^PfsozR%li0%^_irwT(g5kuRXhIS2l! +zSNtv)C^w`C-}TmP0)foOL+XdlELvR=d74*< +zkXZfx-VXFgf5fjAwaqSb+Kd}N;GwQ0&A&wmbteuJ0A()o6$O9$!PWXty`RkHuckyi +zJa(Qu_Ffgv&X5U)OkYp-Fc*=vL@f%(V|F#;BG=EsU8=~{L>%*Oi122w=Bt7bg +zd{5u#y2t+U?bXY(8~dH<16gzk4geX;dqau_AbK~8X5}5)qljT>M14{W8_=PPQi6@i +zbW2-CP-Teezh`*xks#y%9X?=&rzDHwn*~tVRCP{RB@ofm7NJBzfA6E=TmWc4ZOS?N +ztZfL=C<4^eSR0Z5$ci}14>ct*I-SEz1&q!K2BDvdGJ-{p!gNL*L50tL4bh!QO1Dn* +z3(Wt0^=x^)gND~L8H(GDVkGIp$ICE10LAw;tQK#YTN^95jW%E#L%>D% +zAjq*BNQ$9u^g7{RYzG^>Xh`)oig4k__Bw}BjKTs}Ie<<~Yb7U)6w}(q_OFWRQKVI5 +zQ=;do;gA>thts)2Y03`qCDSpJ3Fj2!oDoPjEKCLeE4{;w1iaz4lqe^i#B&G+3mjg< +z#*9|R`}fCJC`J)n$fcA}s&jl0lD^^14#Hwyazf!q7Gg5lc_nUK5Oort{2S~L=Vhhw +zu@qu#B#DN^avEF`Dx5i{RSB(gw1oM{^}wjl*)cmbQjU5j6FBkp&g>^H)a#SL_?#4M +zEon>;jhqVkwT=Xvf&Dc1Bsm4eAre{qfTmQAZd$E3S$ZwW1j!1Xr!Gt; +zJu-~2LZ%>HGMAmNMUawOISK2PEX)G+=mPK-0S%g>9Otp*d{U2)GyUfyk_=&elTNJ< +zVK!KnFTRPf&f)+lWJaGb|b$D6d#aURO%et}d}%i=vus@Sl(k&o-~oyJJIAqt{87i277ImZP6-@T*j~5+S!GaSIh9dv(=tD +z^;YXy9toijT(gY@O;yun1lA1bK!?{hnLNge0n&MujO|H$zE`E7#ZL2o%=q8ho3_;#(n;HG~-owXf2j1P}0| +z;_Snd0^#51WwSx}!*B3xs|ce26LWR0tBnDQH{fVGyMtb!o0^bNHabnIq-KatQ#xu3 +zyWUh7ipM3nH=CM@Y|l%y*8B;7!oO0dR+w8H8qljnZw5;TH>x{$;Wq}wbMKi_dJ-z? +z{W{ac6IrfVEe@yyhFc8P7jQk^6fB_SYK;kD*qpX?DZ5gnr{Tt)b-5yqI~D +zXx(A99W7MiTfL%biKrQM>u}>2Wnn6adWcM2Bf0$FsAo1zJF)D00$F7d*t-<+&ZPdB +zFV)dM`pDz34e`6^yw*Q<e`gxGBLUb>-?f@G_H+_ZfeGMB%Hz_*(4_*766~*m&@btPg8#QQCUnz=!}5{vihSh^d`oB!WLKYJAG$g%GAkn6*GjXfWXLHx&NSO{(KU +zYm>LW;K3@=+zk_JvEy4y>(w=7$u+%?bBo#qfS~|iKS0!T1&KO<3Hf+ +zz}2%})d`O#r21n>EOuY|H>|1AoyNQRL~WkE$WE{zruuz)Ddb)c;RQpYY5w_ME9uGO +zF`IQr(7O`58=_grV_9$a-FJNa;{IRAfiW>LogE)~RMzkNe|d6LYK(wRb=l}{wRo-# +zjn!NCI&AQiWBQgMiNk(vBRqfna7ap>fDL_-J=2*ik&OMgdT>GewvCcHAt?NHRmyft +z1OJF+W7#AAd-@itC!PW-#-E473~|{fpl@3$pAtkY{DooWVV|KWc(eM2e6e$*;`zh={8fc~W=!RfXPPBpMay-V&PWUZS9IfRw=xZJlz3orHOa*WgY +zmXQ@%k$Yd*H)ZTJ5YB#9&`#b#sAm9fOCUjh@_ExP>yk}x0yP#j0%5k8ZtqzkLE546 +z|6%XFyPEF*ea$pdfIuipRU@DjQLzC6Y5=8L@V0;j4Hg0j7+M+uk`PGfp-4iJrqYD~ +zN>@UWUShcg6g8kyEvQJbalYvNJ7@26W}TTevuCeavo=qF#UHT3^?ALo%d$5LuX_8- +zumwxV4$>@RbHhQ1YNPn1M3FR#(FSs()C2)h_x4SvK+9IbDEMcw_l(3=2_mUlrIQEx +zXg>A=%ZPI{Y9?&#WNNhQ9;l*%LcYwN(iobJR3Sa|1VlqxM9B^-(=o7`#p}mB9!u#? +zGhwPW0&1scn}NP4r|1NgqUXa=mr4^3U%_oqRA^&|xM^(RYHRIoZ^j%fzch*&)!cY$ +z2a>{+J~cZGrsP180>30XN!Cmo1)`9tm{@dBWZ;<3_4y&Yge>@4Td;2b{ERnO9)#Betpkss`<$9g7Dw +z>|dqXVZ0IGmap2=097l-F^3 +zouc$wh9$y8?2H<#-`beof=(1XSp5WpAQoN~DsHpYWEsUcdbEJ^jUFhDh-1`8Sg2?* +z+Wst^NfAb9F1J}~OsVQzG0{puvsoAc7i3;m|DQ#Bj{FbAvyvc7Z2prV +z8y7~dB*+p>+w^QIqMUm#z;7--SmRp}yMqo>XUo(rJ|EooX}o%817yW8S3`H4fp4uf5f$w5)jHTB>B(P!Dc>iO +zZ!nOU1XG1y=vjqW(Yn8c`w5)`FKsyHqUpLe71E{njXKN&j#&1cQ`}n-0|V +z2gpHmVPneMTq$oNqK?Pb6gE?De3 +z)9Aiig{}||@;Lrm)?+v7XQA3WwS&+sA%FPN`rUP$IP{~R|7)q~mFt}8tA>ev`vMG+ +z=jPnb-+OiBA>_$x3;8Qse?A`d{hl%4x?l-hmCgDFWA^8M6sdgIhm^}dS+Kx}?SA#_ +ziHG}RaJOx@F{NnO2$aj-8TXI(mO)Rpe6P>enj2fbH}@d)+t`hJMAP*EUE|aC}{#<4;LiYcQ>hKYv1w_&V +z+t3+Z(&S+9v~CE{D61yZF_Wa8K?c>CvUQTQI2ioRndPJ+yX+hu))BOwOG4<8%v=H< +z3{uUwp??-T^?w#S66Po8;Cp<;ZjUgc3flw0Ff#?ghJv?vgvoMg-)0$Rl<;Z)7!kHyDk~%vXq*EASS|G3duam4z3n3ChgsC2C3PP@oxE-geaqzj8M|BZCwV84-$ +zB;->;WFZ3akP!CMIoei*bIB!&WXCoTaN24E$_4ZZ-B1XRB{rphAx9f{z?g%#HEo7S$A|pYxxNm +zCb(1lsCr$x+az5eNXP&pWBkZRrU?eB$Yv0vk00=d>U{k}b!Pn_$mph_$T_{B4+P#V +zV4)*S$N*MDIk94a>}@V>6d6~!JQQv=DUZ?zs$xl-im3M{q73Yk@@(VMJ>u*NskrJ4|~n`mkuUri1&pGtyrK`6UOEsx-V +z!YCp!v6_>DKt%?dLhtIi%nU(oB=INo<0y4Ww6rAII{KFh*w=|Tg|us?U93V*oY;Lxg5o?#sOph*$?57pV?Olm|#L85qHIE*O$lxo`)v0W_F~H3Gy)Gsr4|-s&PBya +z>nDqA)2cbayrQTQD*uf>^2@|Ev0XsSQUK-@&&)2G8x`G$ME*(3!zI~aD%t%e+1I*K +z<%SZJPG^Rz$eMKJJerKi8BX3BB?YJPl+99Be~Ws(6p^%)G(qKwL79F95Fjo8-4m{( +zPXB6}hT9#ydFi@|Zl-uLkIISY(2s+hPID!2?iaC;)6v(9SmE$mHR+9chZxc +zF_8=O<)3(7--bXRF`!x3Cq1*fH8+_4K8m!hIJg3tvo$I&-6gxaBX4p!>K4_ca8uOv +z(z;}0FNSGxTD1vu$uqrBU$C8D(5RL_>3I`A8PMFBr9jP$tfvNd)1uWgXQ;OxA&W{m +znaTB;u<7L3;wZm5>PrdwAxc>RB*=>99X9~^7T+M&QwgeBZ1uu(3t0$MvD{E`m2+g3 +zT1h(c_Ho_YiS@HF4S+oT)td?~4`K4~bL)?c4=ayKQQ +zgZD(8+^%wKoB-?jmK2LBEK1MS^GpCeCov`p#%a8f=)fJoE=+jZ;#1;!d-k>Zyxk7v +z&<1J%l}9k3?R3c2$RNwuN3JucEOgVBXc>S(T1F%6s{w8EvU#vVOklAX@Q`2I+Z^9u +z4638uSkpsWuNS($Aqm!%F58tHnqE1tn^DL|Fmv)3pJh7lu7V&V%Zu(}Yb$@zs>T;8 +z{W@f>J3-$?$JCpGt)HhJ=%Gy-6yC18<2iL#fMAn6Y7|WK^9pX6nO0^vS6-u5o+C## +z(0KO@X!bochB;-k5d?ra(~C+GTx5$V?wLcm-%QXQvtU4>c$+G1FkO!1)!8)E9UrV4 +zCzR+G*ZA6&`REr<7{pypr$>AX11=w{0f)1b6aaHvz6Z%SB<&6k^q!2AOqY*!3-ipw +zT4idgii^i;8b2W#HhI+#K7(`^fQoyBr9Gu5mrWplIYA$#F*PO;JA_oJ;M{u?x9ie` +zd+qOiv@14@xn~+I)SW8$EW?_cDIW-K8k@MErE#-yH>6a|PTr*aY!KQsgKjPfxX>fK +z6ho@r9ek!Pw`rDl=iO{h0Zg;3>^pgCias+V09&+>5KUW8$!d#HtGUbr3e;Uf +z=^Y>>y|!6w+I-HrdA#GUPjO3g%?+oAGUku2z5BEJJTb +zf)NDn1RbnV*5h0O7G4*AWSXfw2r$2~?F^k?T-i}yTQyiq)vdL+uRUkN7Ifbr{75_9 +zl^;GR_0kC1GK(w}QQ{|*t0S+3Nf2twyP=c$kol8-PuzW^XNhVYUulUCub22(LcA91 +zy&VT@N$q+(4@q_+VsxSL2mSI4yHd_}y;$9yr|F$B(p`2CrlivI(55@zn^@?5wOF>R +zbfjx2AB0-ySv5$!mFdS|$U!8CCnCxQl~pGrFP{*Dfx0|lwbw;sFF$uLQl!_gEG%5q +z=YNtQDRsXo?0ci>J}}!i(I~w_dt}{vCH;j@-9K9l!14Vb>F!gd1Nf)?^RZr^WW{q9 +z;w-rAYi}{!NE*4uqt*$$26Uy;=E|}4D^$lEC#4m6X@CAD?D)fV=ttY0Uebzl(|PZv +zd;OCBnv0tbT{KU9v^C_y<_9kx?pX79d&uKm@JB0MdFdMDQldco+%eq+d!mbxJnM7bwh3lB~lyH+N_zjGk*7n +zz#aQn`xG`?V|KAIT`0aCwt2%nHiNlJVZURdg@#DPUF&0%XstEE0*=8n4UgI@$k&3p +zG&iIeb!f<&60I<#+8e&si2bw8t1Wm+Eo;4XB$0C8UkdG|c{nE)VnZnutjJ4|MJWbQ +zW^KT>ct%$XDlLbLv7BpRLZvLk2p#m?$QLs-&Zx3$zsd_g)MYzjjFypxvZdUcTDNeO +zbpr}_kT!(dce`5W47)e8ckaQgubI1G4c)Ml@%5>9)r%mgqG}~`C>Ae?)ZI|}V*ceB +z^74DBRbEb@FMDcgABxOcXKzzpZNLE+YclJ2b`Y(dyu~qr1{$n`GO}dV`ip8j-#=Z`#oOLFW9atd +z)4gDmn-BS?rm^~R)*}GvT +zI~9zDZ;n^q{~2R#B|SeIfk#;)2B0}(LJ~_J=7?4lghNzxAqsXPxv!wqHHx!MsOm#7 +zjF^pSTqrRWHp}RIk-Suw0MF@Am)N0pd9y;R+kLr6Rl)P}JEKSq9bx3@8?NZ(JK$1( +zST5)$bnNNVhn&+ekP8CYqG*xRb{lvl*5z8mLk_Su6Y+8tC3#&uyrp@pb+{rZS2b=U +zD~O1fkpZ35Jj{crmnkT&b+N*9gG)hA^5pk5YU?;SF|Z<#%d^5}Xp&xM((N0_c*?)M0;#=B~p1s4X?^#`U?< +zU%0DLY%DWq@lkl|EJ&Hfa}1JVXqw>PY;^@iiZ*+%J;B3?Q%sj-6(~9K$0f}&ZpLh^ +zpanWIhkX_;S=!3%4C=g<@(aA?wAA +z6e@m=-E50$v;=MhvsBoKnZmtsW1cd!gl)iIe!8D`H1(IaT)^TBbhHy=1J??o>tTiQ +zCvthyf!K{=>tSgpTV9{rnstsEilvFHLl!wjnzj2Og`mh&uv}>?VYn)Cy`1~Gzp)EK +zC_Ek;4H=SvO@v@&gIuVIjszJmIDls7z|HTB$_PLRkjHFYgYkaNQ=QE9ED0C$M6y~H +zP`A!u7)}gr=x0pk<<0&K+5I3T0fTA&Xj?+&a@l-!ZQpHQa?kUZ-{xyX=rSS)0HOb} +zf*uvEop^-Wq!4OsBmv^`L{?(+pSZlc5i~wWvJ#h9=qIvu>7#OFmsTuE$gw>;ipdbP +z>FpIpS!a$Zm%FrT`c`nx%U)C)5VUK|8)=YMV(#Ja6aScY7PVyEu@T@@ATGaSNfuYz +zVvxTQm#^#o>ttneP4C;y*&CAq&;pNw?4nr7rl2!%B^=Eni@lh*fE$HPH-?G;h +z9XhONp3`p9|0xbYEf8c~r|sd-NitYe!SZBnw{zL&W3nO?)~K%67Pzm<)Ou?;K<&7? +zHbO8!AID?JG1{KX^`!yYI&?2p;sJncNN +z)OSi4R2HKNJFH3@%FcpS^_+zs8lV<$?U&*p@G*Z5u%g+on~_)cNokJw`lpn;zB{FB +z(dV;vq4kQz+e^;$&hRo)M@{4VB$eGGn_v0whG~CUpA8w>y-W$i=DhPofFZGBN=L$) +zufAj}?CFo+n#0e5h&dq-l@3R%V?@>e0yjT?&N>Ztye~OSi!V4kleX|}(th9k^PY1H +zZcV$#E#*0vcD{PeT%<409a#R^kv%q-G{~7gb^cez5%9dbJp6IbNBQPgzupWD)+rp{ +z9I@5~wD{kt4TDVSl2Cj2Va^Nm9NiFEkH7*E;7f^PYgkwsjy!G$N~pD;`EP8 +zXEmA5G>OD<*<~i|b#|1Q!H7*0p*0=s_Y>?iLRa$z=%8EaA>}D- +z!6aSDg;_UAPt>E7?rb0@yODIY&w9O;3!UI+$pdxYcghJpax#SSG+Gu!16|I5YYREw$;+0QyfQ1N*`aArAM8y3pq6rz0o8& +zMm3eD7c6EZFARpLxI|$oQC6mjKB#L?5PaOdck1*VhSba?;ox5m~2FTadiR$V{|qiVu}nAxPHZBPRd*{AM=OWtPEWq-!r1 +zX87vzQ*~o%Y|{zI@OjYPlc3Zmk(_D=GW2z+fWzq*LlKNT38Ms*~l-q%mlhz{CT#5p${+O8J-g&LWt +zA7DU49Ll-w&d71;OpWGbzDMMotx2xXOQ#iPPfW?bQVSWa6Lihy98<}4x94xyW8d}6 +z8>42a>Qfu-X@H3sLIdLzGOD`#|5gdPh)OUs3$kzs8J&pN{uXHUENf>+&IDEZVtSxr +zG<~lFWwrWEX)jVu5&vk^O@|4=lLD~vw}2gt>$q)#h++R9`qa#2`!MI?OwCDVA1d28 +zo#so+^7|H$qaPWbPMcPXv69X@YL*l#9SAWhU>1NKb_bPCr8}ox6Ay9d_O~{v$82_> +zB=2TzU0?`M3Cgt*cO61%rb%_QtX3ooH%${4QTWreG_!!qX8h}B0ih@Xt~qJ-AV*(4 +zjA6!V^&;uj2pXhm_tdXF?+$YFDw+03^@u92`i_%y8K;+iTk}woA1M +zTc-SOBWdes%=N{TJGEIG(rLQiLTrpmqIJVGI`d|{va3xqcP=re3`&(WB09Q*x&%p< +zqp^kK^jl^D(C4AW=~*AAfw>Z3k7CI=Qcjq$q#P-TWtn2s6_Sl?o0(u6%7yM*Qi@?c~Z77ry&&p$Y{u>}H+e3`c*mV>ERpc{Z&eWK+4|TbiTUZFhcY +zXH9;wM}7adJ4GdxDirCP-vfa3`ut)#*pxw>Me$B^d-bWMGAR?M*>UMN3L7$`H-R1q +z()waZOrzT6y2ixl{1gM~j#<8mD!p|9##ol17rTPgO?k_mCG`62j*LLHncFk8@?6K* +zqrCf9>+kR5+(6bu171F3Pk&QyY9$7q+bRO5UdDoS1LI=IQ&* +z8Zp;UG?^=9Kg_FA7Ye@)hpHJiDHxV`OCxs(rSCh7Y_|E`TG`pOmf8pH&!{?EQ#I>Z +zyb$o5$=CSw=$%<;F|vG4sw;Cw;jAjDoAqJ=5-60 +z+29g(Fs{H6KFh#G68tL6nb>{u>ctVSC1WDN5NaT~dPUP4?Cec2Bua~}cv*C`=3gZn +zLOm@y28_Wz^bSY?WN>5W!1BqC$b(mCrJbcG+e3e}GXXSnvhxWXIf(@wnMKIig2a$j +zRyLRZ5E(bPtmkS^*VUJ=kuKj9c9*`p6lvpGZgJTI+fy}isTbc^U>uU32hq +zg<HLo9y_~lqx@NGQQRg6d-*k~hV?u6b8 +zzqGvSq2iv0%C0MMc-89<)$cvjeDQGY&xe4TD9oI)2Lmuf}e`Ij)k>QI+TYf$= +z0(j1z$2&t@9~*}}HogAX?A~LG7mxS;e2fD+UjHd%hoOK0tzUwnprB=tAqe=I1J8j$ +z$YsznO4Cl5(;SXeGC12Q%x&dhv>h_-D)Ju0sP6C@>8!ZXk$|<1*0is@*_Et!toUqK +z<*gpR(Z%je`>OoD4AX$=k*=zOfoxp3^csgdg%5AoB^$VQ-zj=r;B>>`y2IV#ze`-I +zyk2zQEg3Aww??mVtS%j{@_1bA+Eab|c@6P(_jSjbvKI}MFVio2YRX?VF`zPQ57r7t +zTe(V`&h^$-yzby>J7yiMt9;ud*m31$Z(Y^IK#q0H+C%ksrXCj_D>>I!e|Kh3c(Etz +zP($_m=d}SdFZ&v5KE4u#%d9)xSo`U9XY!`={f%{BCjS3hS)a^d{|RpWPs-$f>2v;< +z%c}pQtmkRg>BX;uPIz&)%tD&OudKLn` +z6V%*_24*&>4P2*A^nu<@(J{nG(BOlA(a=o%6;5O%WAt{$U~SQtzxSR_R1 +z*Unli$epb`gdwH8Hbkw%W22E9unJ!u9qce{=|!6~_HUGPVm2cXMj8mXTndQUbBMFq +zxEDhaapcy&!3I{w;)QI)`Gn2np)KV+;+G^g2zWAc;x;&2$LtXmK7(RMh`%<3giM%j(BXpQ091U6e7Mv +zZpjEkv5Z)wgOrKZHpbx_-Ul4_ml+eeSJ+BB$Y*~u`?)ihEA-L9vXK1+d#hkxXMb&y +zI#|nu%?}5EXjbGv%?DPS;c%f#ykOw0%LsHOe!Q^t)soqPUMrOFL+t?thn%LLtC|&J +zL96w=h~c}HG5#AER%j$ZK;F42Ed}Ex7WioF2-IbrDN}`A(2UMYSR=Z?g7L(Sit&eF +zGem2-@lPeRS)B5!fZP!_8D%b(IDeL?Pb>$TR>PI^R4zdRvU7c1fr-uho??pp38pSQ!+DpN5z~e~hWe)7p!#GDkwX;7l +zeBCdH2y&ZtZ<#LmB^cmF$eY;p%zNkB*1H%4O0>>LvXvR@LCwV%;VLL5f(=46N4KJ+ +z*V|YcgQTlKwnDs9IQVIQTmTWL<%BGS;&F&0%5RJ;fcm%q=d#WvORJ-^MQO;*<=|PX +zEggDwh~5jxwn~WVH?gTnG+r)U1VVGJ$*tANaVs|*HG0~dfGKTud1iDPrH<)`$XUbC +zL~Bg1UnywBN(rq!)^d=!%!Fxel2NVQuO@lgKYH&m)L6t>pS&s>yT3)*j*;;zQsE(V +zdvkF21)!Wogdk$UkiX7wT1Uv{I+Q-}u3C?}`ewEPV?8+C=zxMRU`51TxV%;n-uklR +zPUS8UoL{S6=WsF}tN< +zkH#aN#xS;TEmwJgBDHlyMNXS>;DfnPl+$|T8C9#NEzdAY0&UG)r~Ml5a^UNA^5DB9 +zOw79+1X1={IWMmClCI-$_v!gcL0_r6NzZUUZN4f8ecQv<@!6yF`8$P&ZhJZRJbPL{ +ze^(fH+neC{{Mque|2>g8d+Hx^@sNlyeR`pO=-!s?)jcmh(10ezWsyQ9P+7mA(l{wq +zPU-78_c?#z-V2{{`dyjxi$F{J>)nqb6B(}nn{Ly)_vK+=O+2_nrNR4mze;Z3;eYy7 +zk{BZaQyJbVj5O(WLj&Zb1X;ngJ$OM8sEyKk`vSxUcz630<)VZS1nb>m!i#Bj#?9vQ})x((&ZR7fZ`FjMf8(i7?gXH@buc{d}Q+YLB_9V#4mGTreWzxjD+{$inr$z6f7L +z&RWMmLK|;@ryvsI7;nj0;eIL!1 +zk#-+`_GLrB0p#6tN_zuZnnHiBEeLt;)3g+Z*!TK82ma%3sI-KC4%1dZFW||l*iyQ5 +z8OKe?I=Uonz*s_r*g3Df^Zq3cDbgk1>IjAmg)MOWT5G~in^1fTVF(ux1CX-iQegB~z3g7NM>Bd=wOhH9^(dr3t1-63i-5S5W0EqVlNH_eI$qThl1v6*i4~p +zz(gd_i@&VPz00L{5yRDUqo~P%*Mx*$l9(|LNF5a+yBi9ee0>Hf +zD~O?wYS1hmw%mMfMThVQAt7-RdZ>pWOu;0{R`38jViryD(XZLoe)LetCqBPf!bXg +zhe8RMr}!sXpjICX +znYa$jVSofv&!Zg%08)^?KUO9rrUqgGuu@! +z&#(YY_lyQwRd(x%60ubLaF{nJp+F0eK?034kTK2iF9=V1piEdqyPD}n8 +zNIG&U|G0YUNdteKViI;Yk&`Ac(od#fgO@x5be09T22KT?>5TE~ii3U=*beh7o<;2Q +z3<~awd7^gnnsm(VE;2Da$fDq;yJx|C>a{Z{WymSt`!$7z1d7MAh|>a=_q1Q~>Fc?t +zvo3b~U#@3o6!Yuqul|DHq!X@>cknJzZtgi9>_rK#HzgVZ!E#*OQ#)S!w*s}A?CSbk +zhMc_N+B~{RT9IyvA-~8CbzS##>FJsht>L`YU8SU{@iX~ +zh{B1A_8*D%W1dbqG8}Zcm}G;wWxPC?Vf3t0exZ`7R&~gLreS(*d9dU_Q5CcRe0N$1 +z{3-8TBEA$SB;lj?`LXR?LJ_(JjQX31QzR?th#S#DQ^$zC_2CD*$wza-sOi@~udW^^ +z+<|mdAIoEWtcdd?6`6gh89564sB&)t* +zaWF2>z9G-E{Gn$fr67M~dGa2`w)`Za0iZAkdxHF<3cfrG2e8Y{Ga-WnWd~;{oF~5n +z*U+98m#J31Un94Pc5A+~;^ajA&f%uLoO_TV$em3fbP4Iv^LqzlM8<@Q9u${h1TDc( +zzXU6f854jlj22yBtWxoXDl{ZCe~GG4T@W4c%)4CEv@@z@kx6>S} +zH6t(QT3pEJ?OrqARc>+NhGCBesr#R8hDPa{gT3`{}+47K76nK>&#Hql~?`u8WuiET4R>i9l77Q^kwpK$@zi%_r5R8zV69B(scjl +z_r)(WuLhc$mX|?rJqZK0+C#!bZY-3rWQ`{!;TRjeQMU3)kI@KK_rlRgP158j2OFt3 +z#??vn7>m-&FC2?DtezZ;F>2R)9lP_X$Ll!L@xs^f7W0#@6L4_gQ|tcKo^KNEHWs}} +zI%qugCfUhG|82_glb&y{xwsd-O+8DRddtU0>W>R9rFxF1dE^(3r+ZgVjb{+s^(Qg| +zpL$MQr;Ha(Wc@F(^Z$3)%gnnfvCXFUcb=TQ^8W70xa4?qg^|Z`=Hs#9oA>JolpHj@ +zkQEQ=&lD=y>W#-XV5D+g{2bA`cGzx3Im7kQ8uxmEz2KJGTa>C-AO +zAJAPeqf&JgCF$N$q0;8&)yl9lasWTVng0;1pqMh~NzHkz`7bGX$QHb{CSzBE0XGi} +z4}58+D1zaS=E3C!*03ukvAc;_1TZ7{ik-cWb}s|V4Yibum`q5@0K4!CLtpVziK*s# +zDGb9+maw>KmmOa2`(w9f%TB7S^T5!iPwB$=(_9qhTQMB?uTycG3CGbz;#@^xC>Io( +z=-iaRd=^iM9x&_ITSP?+to5v=+wD;gSNE&t@Z^Zri5>n*pg} +z4xkM6I~6UkuxQnjn(4wQ9cjh*bdks+Qi^fBpLy-FH*URGF6Tms+iGK%n;PaWDA|-7 +zE<1->(3Yoc6u3Dmt9GWE7mN2^Huw~~43(h;3YQ}=rXr;;vUy+nkrC?3$){w55cpw`ly|R^p!xX-TGI`D%uGh}V6Bu! +z;4G9cr5Mo4U?99PEX{X{r@v0xXs|F46pksscCql~(~5Y9UM<9@yw&DW!wUm+fa8V)!hP(?C +zbx;_-Y7WPJvEe_2y=3S9ch8JCt}L*?(RE7eb^TCZ8L1{l7Ot6FFF|h&taCi~f#%#e +zdEJ*fnDJt+p4|BE^2gv;8n3?g58hj0FAp+aeg432`nhZCp8?Co_+x(tEc*kozXvQE +z500*^VU+el)ldGpb6R)5WmOh)R54=+M8HaMoqNYN#g4TZ-8V9FIrv)cJXm#G|F&qR +z(`$7A&L}?p6t!#Q;<_7_oz@$rc0`X{z+#u$!a?l`X_^-__tmu#RHd$2{g%31KqWk8@lNsY(0JaH{Lnka +z`>uOP@c?yk=p6%h^t4m9`e5kGzyfNBhsBKA5M%XDX(uC=!lJcw>LgAU$vc++L>uJw +zv_;olB(p)%yiOvwlB@D1>S|-C7zV&zK9CxMr3>@`_7WnkjRnCz^phLDf+4U)?U!HV +zD78oYqV*@gOWw)ZU_o1m%xs7pGNBeE5zR(EHpd+1)!w%hL0D%jB%j(L_jkSY#UL+$ +zRxK}^)&a*tNfYMs>UMSA4??8TwHB{7<4xDjir*;^EuVo4JATV$O2?F11hu^cxev$i +zKgQp|!HsF>KJctVUTj=8`Mcu~G5owM2#bu=1bSBKVIeQ_fGrjo}CDd*2#2zv8|w?HhDd#Nd`Od#n)IL5D3To76>;hftYgOo368?k?{6U +zAN=D7TxVe_OT%B|m%^%kd^CFXy#okd1!k}LbSt3-YkI^V2$6pY)HRjyIO4Bs*f_U5 +z|NWJ|9KD6`W9F}ppS5ue4$3YUv1*UB)L}7nUoY+jKjG)-ZQn`BCknl>zV9hLj> +zc-a-&rPrj%`m|8g@p9dQ&9C?OKF%I{cW#|Cg86X@C}+uMFJo&$&JTvHHUSpyP+e@` +zfl=rQBkB->F=q^7VMF~9%oBvLu$r*9x^#jq{h5kvV}akSJ986RI*%Xr%Z7EQ1Y5?sf5TgVPw5pzO1Ev$E<5G{7t*{6OR<1ip%Q+6g1NIcbj*ZR +zL0}!^5`X3gRZIk(pJ2Tu1ON(Tn%Ac)X5e3lc5FbR8G +z6HJ(3??gs8^I>8TJ)KO43fNZ&WSZdW>t*Li=%D`uBI-~9xNITl7*94uFF0<}7tq*1 +zerLkJD*eucI{`Bn&O&4ukCS0uN4v?&L-g5HS_HQRn%Hiz<;dc2Ub4nRc~Nj*FZo +zLd>#rC{q;Lz>U{Ml39}2J~592}wCXhl5RPtI$K?Dt*MWRoItxpTP +z$&W)&gW63aW|swFLr5A`FU$rPZ%84j>QlO~k=v0eiXMqB1(Cz5p-r{%0uI7n3|+Z5 +zasne6hzt){s37dVGq=Jv)>$_qr-o;SN_n;z^c9)13l%h~8f>!^MD9$+pva0*@rFF6 +z6N+XpL9Y^r1F+>bAO$8yP^m#b;9R;VMY$#_gB-pU<^RiwcfBSR9z`=bmCC0CmDuse +z$oz`Jl$m@%T>%(y`}{dKayJ4;>ZecJq$}&EI&os^YN%y)ytVd;_X|^X?8EO+11_Zn +zyLSP1L1{vL(1=~Q4>iD_$J?nF$zaHI{+7s;p=g#%L6x3tmP>}+ZYFnGjhtwIb*l4v +za9VKllz*#zVnSM2gC`q;%8(k4i{z)Zp%PXYi!lARefT*_koBqb$Uz9+KHNTvez54e +zD~kN$6zLT;pn59H7m-Fu%hX1t%-H+C_hh^8UZE?%az}gC$HpU+u#kGJHqFU|U$?-U +zL!}R^<~W(;D68?qRkOP4a_TtD*F{8iT8tP)gXn>F@X}8LDR|q&zA1loV$S7LIUQnwj|vLvG0)aI%W=3aKr +zgbk;yv%ghtnt1RTYtcS&hM!4uNZg~A80y8|NeeJu_F|tg$TjQs1)$n-l=9C`ss3tG +zpn)H$o)n&*VWN`lV3&56kT$IY2jdm2 +zCj*>T4I$I&X@u@v+^L&Ry2UZ7$tHY3Q=Q;FCH4~~qRhbWR7dIpDkYpos;MV^nettM +zioyhgQ_#Qgd9R#H)%p3CIi=SNO6lY*Id!C~L*jIIQAaeFh$z3;SxQ|fZLBL&7%ZYs +zlx^uK?;gsS$zSnsKT`coT8+SyW>>q2v#3hHUmG|Pyw6S+K5u(B- +z@*=wnHp`UYy6)&8;_n#Tdb_|=RxNp^A7d4D*T_6FQ9aS^4Ef|H5WrRBWCZ)Sh4`73 +z?i{4Cqf`G@_X`Y8D=g;i_RI|4RCApJ3YDQ9a`2zjjlXJsYfisDs009SMY`gu{b1@I +ztdw$3b?IQ8)8K77C$nj>;)-*G!$Ss@vnA5q%UL`ost9e~l +zdxmfL6v@97)ru!5Hp`ZQyKvHdPXj;NWh1|bNPjOMQop@(5umPhUAvP-U4mz3RfynB +z(;5FcncMH-D~py`#C=;oAD{p(FoS#)OHIZ!=GQdcLkO!pn?7)x%G2)~+2<=-6et^3 +zp=0AQntod}Zcaa4xiQi!mBQJUIWXN3-l!uO8NFEV>W$E?LhrUn4<(7$&R)WHkY`qE +zYrs5&`LwmV-P6*rjh#v;cW*O}ZFMjtL_KZW^P@F!^@9L+&jV*47zmN}hHdRO4|aOD +zvLhdmt6P(aS32DvoX>2{O>LLPQ6aOiFsu)Z;c_GwFZbR?(Ro(|A8vtr>@Hnhgld+hD{(6Dc?7c|`1_b|eB<+uosM@F6ZXOg6Kc1HgF` +zU@QhIRg0MeD?6BxfnPrcKx_U(*~{Ml2kfQszve9ebzt;=?ZD`Np0oTrq%!1Q^1gb6 +z^!?nPDQ^%|8w|rm8NW4@_*ji;Ua(!I+FOZOb^P5n8F-Y70uDYSeLP{47fhjRgWM3Y +zb>kN0$}aQ)LqC-2i6L-&&6p9>5+bJzv4n2ueJJWc_a3!^V!SK&Z>@0|jjvXwGr<~8 +zI0+aaEEm4?nZ{tNbn~5$ZIT~Y~h7#JHuGjbLk!y#P+SJ?OPT}YP)vJxj0Iq*e +zcWZt)QzP9mI~c03>1Mm9_4LEGjSqvb(oujxI9@n9QprXXm%Q4&c>Ionq15WRF)7%5 +zauy=W%A=lK%9epEl_L-D2j;?9>zS$Dl^jA1c)w%WW+pXaBi`OvAfx`H;Wq3L; +z-WWao#84M*bBPu_kXIITxZyw$l%#o-u|Zyz2ug@IY}T~4EH!)riZE!?T8zh8U3kbq +z+#9;MCC(JF2B1I>rrS5~2iLi=h{=E?2A;4ABx{(Pc4X&U%`HczplcoFpUP(LfKGz- +z@C=0>#n$;nCx}k$V@if$+gig}+soTpFy6vuGrZa|?wFNCu?Bg%${1s0GJ?O9#wI|Y +zjz*ple?GMlzkiigPLx&byX$^WGBx^l-q&so&m7w-NbC%7%7yld> +zRb0RA3mh0dgDli`-170`2cLOkR#Vd(SLT=f{p{3se)W@}vY;CouJ3(oYF`}qKyKDJ +z_wp|{L?9Mw?&0*0tHlaoz=$zF>$d8q5hHY==hd%o{3ZgW-2i@20?`-}IoCqefAvV< +zfUox^zMZLSF$#!zn^ix2RAX3<$|)^Mi#mnebr#a{!Hf3?RJTP!y~mWcx;#Md4F~It +zj7dYm?Q(mem=9+z!ggpkng05yCKkVjyaKDNn#BE{sPM1`zZWXAhylB#aoQ8~upUWC;oOFm8?%yh|%ztiyT6OTo<1D!y +z(yMbB2NO>BMcD)ucUhqAnojo_fZB9p*GoADV&O0&&ar6)hW-^d_zoP$q#g?b>x?S@ +z;sWZJL+`yDzXOh5tE%5MC=DQ}Ep_MiA>S)8=F-^N>iWMobHT9x;9|6CySYpgpdyu+!D@yLf$gxsc+ZLc +zXv;XrrKoy@Hw+=oox;r5%WpG{wRMiCxG*aD@ytRJ(TKx0iQ#rY#G<%Y9H}#uxc0>; +z8|P3+7xjlb$6bXQ$%#b~;vhxz+mn&nvk~+MN(!^VlR}@p&M~Hh#L3BMzAo3smK&ps +zc?P1)L=2^0@juwD`#fetN<+op%wfOhb`i&&gFw!S8|~7tQBvHfxI*WYqd>x) +z<>}8x1)uJs&w0ci^YmACzV_f0RVFR-5J*{tA(MdS>kqW(11H3~1vZ5-{ +za{1WTBOHn#(xK9w;qRsbxxo1d2nIxeNg4vLy;81C1!fpzWobk_?jY?$l~so^YNtxy +z&KK@R5hpdurl+fD4X!2gCF1!}x)P_2`ToEDxOStPL3@m5J$9;n>jz3(g-7xF!hkzW%@I33ff)Dt1T3uRAx_trjnslvrcnSbhzA7gH +zH{8C3ghq!7NrO5XoBl1IVJsbQ@6aH)zm~S4a-FM +z>mR+RzU0(rp_5F&ap}p3V7)T%{a`3>I-4&!d5?Dr#W7&NW8Q>UwR};_we}XJk&5&4 +zlm@xm=7=VT_Ui>BH);#g$hNh>_0M5+gOYX&)UIWW*s?46=9t=z+KHC0y44P9u@^eR +z_3sCltua$P+7?$^-rm1$Jb#;2bHjf$?vYm$08rp5RhJhcy7HqY?*~s8Mm;es{_AGt +zIs0mm{PjMskekT~bsqxCrf+3*<$`_unf%PF@?lzd{DH=5HE#3KCL+L41V&qr6FbJ} +z9Y%(o40gWNuF@y-u*T_5x%Q3&sP;Uc`X_n?cSpL8uXUb4c9%^1z65o2wnvzW1_R_| +zmxq6&Pt@RaE}p;CgPl60am$M5*el`HsiKdSjS<>OR?8@+P4N|1=?Icnky +zpp*2QLBS(ozq}(~NBFVr%Px!%<~yT#cj6*RVIO^hX1Yj9UH*KjRJEc8-eM@&roM;? +z>s&#VVr#YLuUU$^opndOPDUuLcs7Lk)SJUXb$tlBV2`T4MtAS#xxQLesHbXwLY1dZ +zvA0_l#GtNUbEdx})GJ=KzuvrmkF!Ue)IfV%|54|G*13UG#skNV-OKX&ZmB|#ay;w= +z-PN@r-55~x3gVtQ=sj9;neWn{4ebinm1E|(BdS+S<}P_=;DDv4hd4HI?2@P|J3#;mw5M^wdjPo9u_SBf$zIg}$IT +z4!>2~)f1~%&ow#Lw?chwUGC|)^e0?w7QA@d*tvblsk6?hD|NKT*QsrDw71yl-k8(i +zj5Ax)>3;6msHW4%7<}P|%Tli6v&iwu;_>P3@tK+Nmz(2Ug$J*VAG~#b@V^GBsQwQi +zl|_YrUjiNfw~g{=W9k3iCD4C`P8L75a}#$jcf8H>S?*kD@P>dywVQ_!7a|!JAnnEj +zCp+JJe(9AuKL#06%v=J)51(Jb@d<vx7$;QanMGETu?L&A +zFWXakMGcKv7AVEBMT)u-cBHAv*6kL*1T-A1Ymydf(&g_!y28d=eED0bM@B69 +zHW!EQlTksAuf9C?kk?CvCBi^{FVre+W2Yn4gh#X&v8-M#2mQs9=!A!5>+e#!>iV_9 +zwT;4N7wzLV-t}YlvBhL)WlIpJqi2R@E3O#{%($dVL{L~js$^CI*NCq={-WS=}~ +zw!U-SNEDflKz)%E%EzNbozvqgzF<|fLDw$bh(i>rgZX3hs5{4Ahu#&lKr3)5vO^7E +zJy6^SUZf;tSH5ZD;QfXd0+vVb)bYk-_{?JLY +zy{f`A(@jaESVr9b!oUEX?%7+y47eVa8z1nrkbfUW_tI&MaF3-4_)E2p=okfRY>8;Z +zq2m$Qj12oC9N*pr(^VCrEPFE?`mmJd?yuz``I?SUw1$Xl=T77qg;(+~o~5hSx!Ow% +zn?UTPtSxh}5;B&r8kZ>)YZ5jf{woXWjYnRIx+Ow)b_0ty@`1#TE1tIm@y;dnJw|q=umKW>6hg>Jl) +zDALXX_|aGdKN`0C=^F?Aj$;rPH3(tmBg~AR4EQtOoCr>{f +zD~!nB6I1c#5=t?H>=bg#@eQqC>m(>4LO3X3)|st(l#;Hcx#HN+h((Lx_@#3gu-#^{ +zXXyMC!scUmImiqx97y*23ktVku@sfLFu +z3zqE|*V-?Ghr!qXv{~Y6e^+1RO7oWc|oahA53BL6B#TV9VcYurwp=tMvm)`U{ +zGd;i2$=tQI&Ub0c#F|E^VFqOz`8q#pKQ~V1Tp3i7HMs3U@1SlLq$4P8I`B|2Oy)4FC +zlhydOB5V2RvUK{+Aq}{B+JP}wg$VrtrI(*$c`28-A14hO-G%P?*IMW9Om*yw}!MOwNX257-K#Uz7&pZ##(iUJs_|C +zt$a>f$arys<6+*r^W4fj76^}|07KeS#%X(u$rLp#Rt +zLEOo>GK+e5iAu^NCcITps>%C8g)H9_OrP@MdI)w53p%xKhHy6lUEB{8ZyTGXfH+ME +zsX_n(pc6h$?Ai4<(_0%$?P(vku6v#UHcG!xta@?Z^vp@U_J`vTbn1^6v!Q(;PW^H? +z(FnEjp#AnNHXvf+heQxE@$Q#H3Bf;}zkFuSV!aXq9F>Y!h*ztfOzv{RDW~liyeS5w +z80ZH$DpD~-G5wc;zGh13lf3HnzAkQ>Y~)UH+R8CO2;TNaJ{hEAKbZIJ;z}0NXm*F1 +zo|NEnaC&L^3lnFUo*&*OE)0pXe=)1PwxQvO`{s!- +z6L#HeaPXk*s%N_M;)`FQQ=^AsMKj6)aAb1E21ije2BTlD7$4LZaFgB^;k79xmz88h#0TaI>FM#iA0gF|pP@Tw51pJ>z0yhZ +z_4igd>37$eq;TGpPDa$n(H$be#HcqvPyd{}Lis$Q2;Lozk1Z4*M4VqY`kO5T^ju%P +z8e&I5=c9NCa+L=m3>#g;4u$CB|GGftt@;VcM9#q}A9N!u=^--AKq(UCwFlwM1SyCd +zW=jp^o1iR)^4n`uR@zAD|E!;YkkI*-`u$c*X*cMC%`DxJz_8kO^ +ziMUUy0RbI^?bQ@~hG2~}KA0GrMkEEP`|s(9lhu#Uw2M6NNqEc-gJuLZ@z@E)(o{FK +zaIfF)N!mMcQCsLKG)ZeAK3<0Qx+;*95l+kx*`Xihm>=>Jm@jgNT^f$RkBA<#3-y>! +zo~R1v(BlUY5jOUeGW*cVYJYQ3oX*PG!@u25JaUjS2AoCvePUwcSz@aOGJ=3$OAxGg)C6YikY1xxyS~ca>({+Pw~ux;_ya%gI|Arc2G!2WW`@9j548`iINLbL_4a +zv#Dz +zS`75{g`#3=p#YCrcv+8bYe)BfpJ1#3imT4&*cE$shFi)MZkzWT8%h2&0o-2{e?^vF +zw!6NAS-hHfeG92ziazS6Mxb)8dzBr9+?V#t5GP-A8tMp +zW@MNoaX)OQci=CFpb_dPuaQy?4 +zxIdgKw{o-7AkWq=ug9vO8CmYFkzqTLFJM`1EmPa;MQ7c?pJd&bCe{v37o<(rew7VW +zBBy_!6fasAux_LtEnt3@tGPP`+~?JokL0)^8g_ctTT$z`84@=(vUbWgd~v8gPAmQC +z9WbcQ$5#JGP9fPwDos)ch&fICC6o4bE4uX&`c*HO%**o$T=mBC&PoAd*$S_^tjnPZ +zxpqrs65n!@V9nsC=O-OL9S86v>4Lf!?R6j$-8i`_lh*v{Sxu&4PB1g9hFM~Hv-FIS +z|2YBjg`$=k1T)M4q*O~#CnMdq>bKA{+7OS$ngV7EZ|-)VI~01wVpc7KVIs>QY(}BrzkbC|h8~ +z!ih-M2sG}hzb?x%S<9)DFa1&2$P?(D{$3_N6ACw~7VV1ORvfv%Hj0Do-C+@LkIb*L +z%G#zK{H;*poV!~dhL2p}-cjXKBIUCm=h5o!L(~OFJ?m$xLgS?dOzS*Dq^pM<-e(UojI +z=48Zz-+Om)-p%>_4d(&pu|dMv{TpLLV`II!V-LEWdicigssKXD_<+uM=Z*2_W8)+5 +z#syj9FCxc$IWC_hFEzIQ&$Ur3{s+*>f490T`EM=d-&)H5b6U!O%T}stm+vgKEiQMh +z4DAl+QdYoSGkP%-{E%O^(yYI88TYq|unCTABEKxFSx^a*85GSpVW +z)lAe@qA_0HcPxu0sh`i%0)?M@G@!!g`!^h>LIQS?_C+)bi)k1)4xn^=_YK^eIK!|+ +z?^syrH(Wdjq98L4emtnW69Wcrd9b*2n9f@?rFFUbG!zk))A-=YRl3lK_Rbs*SqL># +zP91ri7~3m)EJvG)?9Z=nf6|ZPorZ`Vf65r@=fH6^SY%G)+G{tZ*n?2cLhQr)9M0-Bi_z;})==e^MW>=q+2E2c0fx(=)y!Tn*H>H%;ttHUVT+KJ_C8sBoEYHUOztm3Z$!s1RNHd#?R)^7sO8{ +zq9(Ry>h#<5Uz>b?q4bEQ;*@%%PV!tO`S8z2@hP)S)RyK=>Q)VcGZ+YHo=%7R*S_Rjo^PpJ-*JySqODEn*p8<=~e620VO@hHF+eRNl +z;cuxty|7&;Abh?TnWk-Ax +zOofsb2F3ij3Hd5Q5Zmk13dS7r{z)>l6~H(9Sz5NYXK_-&SW$lt)wF}WU4$G9ox$=6 +z`p%-Af94tBIEh-yu?o1!e$kyxR8^WL!pEMX&DIeP^;80dd&X_6TcPOKUgmqR{-Im- +zzgJl?I1UZXpa>f=Ap707Dk~52whGgKy0>lZZ9wl!nMP%yd +zhdf}hqkj&0esK}tNC+CJrR24DyUE(T<`c*E!iH?-_7`o8U6DEp%6We78z5db0bN^Owh1*j4N)swe4&x-W9pJAriMP_c^Uy`r +zv{En4W2f`I3|Vcn +z47v{kn$16x4zhF*IFJuE?0&=t*5BH%UX?f*$RA_><2PHWzX%Arm}%hB2ZSZ_c?bNM +zSvWi>F);4$iBLk1VB5P}qWJZ1|9ASVli5X-`PR_McPV-3@yXpcUq;$oB@!Pzfv} +zooS@7=y0mzEJd0@VpIER{*{jA4O)ND746dQ#(Ri`%S(g +zfUWD;(;HqDcVC&Wv_1SbD}=?DBc!T-*#2fKlnLFv>x=I$epEkqcO~)E=A(NTL;HQm +z->&f1SDt)K&vNRSc`~uCp?f5hBM)9N-TRX_c}gTY#*Ft~Q`>a4{e++9IYsb4T1q*f +zrG$U#dbRgnbY|)0#TU}y@6@7CSwttchu2fXjs4>0s3Y`07})E+sEh&x6K8K=gjvqKLd6D+7v@7PhN5aF2LASDX` +zDLjCru!wH!WM4}^@yRG@Pm=#a@M#ayDbIwp)!0D$ge!Dvpd~qSIMI|!zQT^vUWj8% +z`eT{V?}$mK+7l<;VJh}e_*ZdY7g`b&8LbxouAREggK~3$4) +zh5|ZB>_Y4#%Y?+qG@5!^(PZG54E5YR#bc5b>p`f>595%N4^L31bRidimuUfv(yyQX +zip2lUGEib8EeI5UxII14J}TFOrtJx9BZWX~lG2G8XAnX>^R(BzxwHa{t3WfW9Dt#y|6wTjgu?}?ek1sRPI9eX-mH7>s|AtSh1kDod5gNC +zE@9-D{M4&qQMZwC9CXHE>1=KL3<=<(f<-bX{|83VA_e4mg^o1RAro|m2}+21Ud39B +z_gapCOqQTtR)IyXg`VHL`M6Ka+yZ)jdwvpqgUTU?|A&@>EA(+Fp0bZFT`%;sD$u3V +zC#xy%*60`1@(ZLR$UKz)8~x2#PY30ol}psuqbj}Bgvry5c&*Vc9AH(c+`p>I^v&#io?Ax>6uE`WoK2; +zu1gRLU(pI8Ytn){N+%q`rbqI?ACl0HiG0$5*Eb5vHpH#1B4(>ZIy%Yn-$S$*3F8Z8 +z*~6uq?n%xQ*^gwYs=M$93&PInl{{Ka-?8fk$Gp5?qH=w;Qh=2GtRRqtrU$>L$;Bct8?iuEdQ}L77(R$ri$N1rA&0t@hW +za>=_u+0)lQqAEuw>st70Cv+hnhOZs^Oglvuj$#+45wlg_6AAjs309;d2FW**0^aTl +zV|~AIi^e$2WTX*G)-N!#m_>U!>az6e0{Dx+RHd9@rI?z4U6IJ9i~#kU851>CB@^Y^ +z>kVsCw<_ifN9hf(WMT)bo0#k6`t~(;!%aq=4Q`%w2J;0eleEKSu`$}HglW?0@CG}c +z<&Etc<$}~30@XLxbX%^V@VfHJ`d!&W9t7jj45{b-GDYEX5h1G2t5yrIf0PSyUPu6L +z9Fsr-N;n46D(r&YZGnYSD)QzTy5QmG{#c{%^qTU=)h(ovn+}HA8&-9vQ)=+f{l#~; +zBid80+==Kx2q~>3S6K%}-O2U#Y%7?*xsA>&u!|b7t_2lzj0|^3s6|mM>(0nDCD(LB +z?T!I#6qCR$8%3Oqa52hDSR_*Ai@z;&I(T#nQad+gI$@(3H?0F@lM9=7@#jyA~gA_3Cx+WZy%5 +zxMxEpz4^gF?@m)+B(BR99Ut{S;p6A}p+IUX=w+dVt?Cv!ah>+garbJJ@+pLSwYqzT +z>7qK|KKs$0ySn?l5BI2yh70r|CpJ_l7zH82!M}r{boM( +zr`OIl$?vf_lsZK!O8Y8=!twA()FTLR2~<_rBK#ywZTYMA&M`1W*kn?)u|-z7nGIE> +z;oDyq>m3q=DN1Myh-dHJ{ZJ}mGb3!iF5+4|!glO3f^YP3hMCrpoW`)l`V=nZkWOUd +z5<&-9cKI}Hwts9<7$%Bs)}Cn|_%QP20SW341GAdmY#serF1Z|9?5}&%rx>a*YAGZ# +zw&T}&eT40Z7FXi9nWTT+0o~0JGxS50x4M+dB-7}ZN`Q`#IF*Aes(3sZxZP>Jnt@W(#MsM_`xGjNjYKqeAeUH| +zMS)1lv#@L +zz1T8AR89N#co4OB*DOlbt4*@a)?T){5B|!+QdIpk@`k`w2*%Qic^WSgj(LVoo(G?7 +z7NrPxylq^bPlPX}XQEkfP+IF@d3;9t-J(lBm*x+xbUvo`d$Kkj6E*!T)5G1IU7*Tj +zO6iW>!aC}GpUdBO*$bOEsMf;HnT|7#mW53*NEI8!)43ncD`guu57wjHil#5#`dEhK{# +z2fzr6Zy)#}XoOL<2~waXPEk=cJ!0ek{GvzmvKLEuKgShu4x9KpQ0x60jQk1Iw%slP +zv=klr=_f#-R?xoQ*FXS3CxxMH9j*2LCh|@*K>nobTfHyE$7%LgUPq%&gLhbp^MrpM +zzYzl}JGtX@O$G*fm}VkrXbX0pah2o>-6F98GQ#KuEu6R&J~8U@t)v$;CUYxtUf|{K +z)bgJ7@x9>>f%Rq1+P!aliP4Demj_b&?t!Xq#gv+G;ia>(D0pL-ybX74NU!fp4+O1l +z!!-ec`(@qX8v1pwfJx?ozk6c$KQMW{d04-j$U4SnX#0DjXU9JiJ)5(rrga$R9D}@5 +zeq-3O1SYBqSSR`$BevI?bFQaCB+w?{{n+Nb_TSewBWH!b)POrs9QoapA@@q!Igjq2XfW0@dutbjE1@d6@3Hle7bRBG3LSadp4eW6*y7*lehCk +zduS$lhW7(YS}N0hJ&c{cKaN-q&O8sI$s)O^n626g?!;E@WZw#(^wrNvvRl^4aBBl` +z+y_i1^D*@kjwGH~x-9==vR_J93d>n^Uy}p)1hp~qgr9e5Ip2c^s|{Z;)T`%yJRdrl +z2>+Y$F{m9g$vwHYMLU8#1h +zR+w~)$@&z#-Dwip(cZ8nos4nC9c>Aq4^0x{Xa*cE0~S?oz+bZHMGEnV`c0=?g6x$Ne+Rh(#uOXynE`B%<-0rAa{=VHgX@iwAXX|yp)A|<9-uCMIP8% +zoZ{qNgxL@UOFV5 +z?SByw!)=cM)|PAR{>jTabE3hMzLsl|1S0?070O0iz$!K7B=Ahkh#9UT%-BWfONZ}S +z$Aj!iQ2RKMdE!Yrd4P^TN%S`y4*5j}<*QM69s%c>5CDJEx*MK1VX~71_Xv9KD7&k7Ehm6OO5=Y>a1S($#op6Vl*gcfUSxeAIPzmjX +zi}NuAwa~(9A4~=Ey7R$S8kWupAc8(en>hPqOxi&m|=Z +z>&3LOsePoFUTRc2J4RBUm}MExtoG?tCycO?kI>2HT&fW(@Cb0cLq?X`l6pL1y6l4( +zG*XZ~DFhjH%#+qerSg~wC-uTctAQ&a$VFt-(uFitkJJcy(pPG%pG+iPA97;S4)9G5 +zaS;pLka&_UPih$v%=k8T5@9|` +zWj?k4k88sFojw6%lLN?Xnp!r$9)49V`6oN;9x+bL0HmHodS8vdW>5TBooQL)Z*H5Q +zGC_J`8JlmBmCj6`pa*ST?qpE5E_ZCN=`&L`CR2gb>~lm01e;7_<{TiUolFY*j>|OG +z%RR!zKkZk9tCu(8nE*mKsv|v|D-~UWW$c=e9bYN^d2W*k8VMS{5NEBD6i`S$e +zJ5$ZyhlCh}Gqv-HR^(b>t8C<23M%+4Cw5r(KTH#)f`Us9Mf?-_8V(_Y9l3iPf>IGV +z73_lbxq{;pVOKpPY6~J73UWyW5vOGFku^aGdgc)x@p^w%(Uxg~D^SrZs_@7%=}0}Q +zMn`f<=^p7v*m*7sd8^E<1Eiq)R)rK!PQ}Fa_0{X_{K5m4CGR?mu#`OSq=2O<`s;#R +zVPuq@Lt3g{sgp()AU6=0VCs7CV}o#>eNw_m;b=kb`c%^Q4f0H^hy? +z!ujr`MkfW6H4;Uo%PkOD1g}zky(pE=^p&JsZ7SbagV=CVdf!O7YiH@OY@wz>o*PO+ +z7?ru~&WMmkj*`m`PG$ymUe}o*HrG^$I|hBSPo6>rnH;l8i +z_am|R^-&V9tNE&`{9L939NPse*7Lsd-09P(qN*fv>aLhUwe$m%6)WpiCgJ53dQ7l% +znS&aFm>e*aMEXpk7Ki)t%R=qGir=;ei%k0}-4A}=UhSk2eZ(sxvoPk^M!7=`1DaH; +zZ&ygLU|hUeb&^f;^S)UqhYz&&dx^XzVRA%W7! +zyk}VRE3aI|qAtuKvsyOpSyIE^{6u|Z&0o*b8r4&3M(>sCu3#;WGg}wF0r$POGBDWk#e**7`BRQ*#{&X(^=iA59fqMCzhHvx^vAi?1Cm +z9KT;5yK;vn*9E)^>0iwUh)L~4c;S!0N^OV|83A0Q^f?B(7ZPsl#(xTrB>VXG=5;Mg +z^;D*0+?7dY@Cq0cqaoM$gO>~$OoM>?J|GW|Zp+W!P0uLzjq=#`cg1Sk7Zw|OylX3x +z;w+Chy_Y9FMP*$19`i6IAQjO(s^9a#opA+Kz+V&i_>P3pvnv2AxiaN>!W>3x^C~un +zrNX_>ta@g1dKFXMcCp-d8utac_o-<@_u9Jc7Ic$0Mn%u{0+Y)(SG;!G^yzhb?Pox8 +zb^8y_^r<;tb)WM(D(I#VIbbQ+XDsM_cxk}I+3R#&_b&`z9>cp%N^&Ci;-C8+rDr%# +zGEQC9<>88p)-#vNMt-1CD>pRcH|Byxv%SUy;aE1I*u|rG=uPWTRJTh&DjTEAj_YYX?ih-(Rj++A@o7|Cfy#LN&JlDBq#-Vn} +zx!%UQX(ap>(KPqg@^t0`bf1?=fzd34=bQB}8R)2~@`JdY%(xeY_jYD*_lrGDyfGlid?QJGq +ze!@uIH7aD3i9nt~Y-9ZMeYW9?)dO0SJ2bbFC!{X!=Q(_w1J2a`vVN?zlzQLH)=cs` +z78c`1J#JZ_F!ZKG{wbb}dxsTAo{Go0-uzW*pn|>n6W2=!%TA#SkW6(B70|f}ZFw5YYsO_BNB{3(YMC +zlNhjaZtpMhUy0%~jp-5My(&xCIGjeg)E?Z9o#W3y(o3@feGm&F{>E#dlR>~y66;xP +z?jgOS0a@3#Y#Ecgxc9E|3AS&PGr4@bBFXl^l#$BGB+Czct|0@>z={xNR7V&JDbc6M +zS=wp_+HzvEeJgaY9Rq29Ez6El7&6ox>ArHqEJDbWtmahR2Dii@W6h-0{mbISkWES6 +zyL-j_O=xaWU?|6|Pygctuj4~cy2ExP<=<0hY+aP|!2LlYgUnyJZI0T$_T>spgZMz~ygLCCy%m@?j+^ugQG>tmxE|G<=;w+SEOzpN0 +zF!=8x<7Aid3FkZ3dREf*Mgq4u(8;yC>*KegzeK<>x9#qN;My9i$AJ?buAnEfF?Rlk +zAk;|a9?*+DAl1zUda=DSit%8wi7Vd|>3$i&A~96pt_sTxzz_EyuynqBkfA>a&(}_Q +z6Tye+?qJ2~_iy{xu?|l4&#?|m?2Ii$QLzjptIqw+O3DC0QVHf09|l;-*tH*P$*IJ94M$k=25Ryx-9K*=Wnw{$Ze +z1~vShi#X&7)KA7}CC!2#G(FARH^u|6-Ok||Q>>i|M-x;{vvvI3C1Ky6_`$yj?%(l` +zP=Ylq0E7~m0s*DrRxx`J1ib!zf+f6q>vxajwZt#Wwr2YHlKbu-5_mLFKhXl|W1de< +z4y|fHX5(GX7ENPty&4VIKL)A&kT7S(w%^n8Lt|Lm-|7ivXf +zil*;a_TAlhvE*ko`|6|A8VmJn>Fm+om&mnNad;y>vD654cR5ziT|4fSC-@my-}iST +zHe=@miKpGCQH2h4H8Oc6H4%E-Nzs?Wo|%F(H*WPGS}r;eF=Nl^yk&wxCjZ!+*FxX9 +z^#$~$;)oM8%=Qk*(Kaxp^fD;c))3jq31;^Hkm!~QZ%SMK7I?*Znw;Oe3y8#a$_>J# +z7AO*`8A0L454}3?LE7)$5Khf~f95f($mq6mZSWY^!`s(}xqU*m{33s*ZA6AmOLde^_?Nba@PvPqh!p-?#9~|rq +zXZK!O4y0aOh(*Ee%j)!FN +z4s)%btv8w%Bk#o9KTmqN7B*8w;J1tUpc?uK +zPS&4*{3Hgfu*rP$0lcdJU?m-SK`V37*3vPh2*P7E$|yWiWZlonj+97@G?W%BUyHgn +zPjqAZVenB0iDb8Vyav)gmK#=zh~29m>!yY{EgiTuaoh-QAi~nHL}@j`cWv@98NoxA +zet90T58Y#|+sV&~2~XytT-u2TsuLb263*yFma=1#EJEETB25v95l~dlL=cc6mBgUt +z07Q}zO+qJ}u}Da?BU>}$0ILO69eflK@1_=ikjD-QUMG%VDb^MQcO7_1h(4%Bv!DkbWQvroz;tltTZcIuq!Ql0d@=~%7zrccFMH5#c*0II2*-#?Z{}zR +zE#ebB;vcU?{K6n{>b}>BNh?E9dy&bBdMTn7af#9-$2BC-h}}6M{x^%Fr0xHdhPOf| +zxk-~o)qTC~L%T_F$OGOUffX-kD0Wc|e? +zXgY*~r-Cl)r+q;Pv1D>5rTKzKG6^-o#EnFVewHpRPLv&dj~!=C%^mrJOJWCL$m6^6Y}HAf?LWdjFR=^?zSBrW>& +z?kO17M4$r$l?y9!wF=JIh`28sIPOIV!ek$GNTyX2iq^v>HUcoL5G8{gEqbX!65)ad +zXg4yo-YR(Ny-4@_BF`j&QU>j8QvS!uL?ws(uNp;#HNlrU6F*G_4UZIlNy2{{!7GQw +z7bT?*O$7~HC_mR3MAXapg~`lzC;+CBB=O+}r2GriqT%_(M0Od{B3XMXcSApmi4IA( +zs*Hc1HD{l?-8wkWBEroh=JrS!va#@hV}MgK9y0BF)hiDR&Wt7tYpIpgS(Qv08q76wCIG^dHFB{x%T1G`5*;gP$$m;(=W`(JY_gP<^k4d+nJZ|wY*lz;|fZXH=T~e+yvkd&%7y%*is@>?qFRF`rha> +z+_E&+RLZP0w99euj!IUq7)ieUO1CPj8hqZ75vB*eeLwyvWecM?S%(<<5GwEc%W2n8uh;SHB;Bv1=~Z5rr+JTN60PfN4IwFx(H3Ch +zA#~eIR>UuF0_eD3l{cZrsmQ;mvtse)SaQC{55H$^bjFVg!S7uq>zR#Kon*uM?udLb +z1Mr!~@Oldg<=?}|eK=p{^SV>Kjj&05QhrL<@u^$AK5erWfsw=%g3NGq1+dDU%hI9>0`)vG7-+_je6{NWyvRbKls +z(4)4VI!kW)3jO-<(u{xe1aAv9N5Fln0`sCQgEP15GzSp2p?SYsdfjUvce!59ZJ-wSvr +zO)z?KqXpW;o)Ty9a!*DA`ux{&sDvKr5f-J$>3*QSDbaKk(sJ%|FMRqj8Y{V@!xg$c +zm;~^Zo!Hm6pI9EWZ9(cSpi%Jcn)r|Wrcb#*l|=K!*Dd9wcg#)m(NW3xbG5@-s7YH> +zXn&61+KYZH07k@cJPv%2Q^#vKm +z(~hDAQG+%e*Vmm@_sVU*v=#c19up$jmPP!vj%)|}y8JfZ}nwdtQ +zhP#$cM0Ex-eGN^VACu=s)GiT$DP<6}+3#Qz>cwAF5iktpYZBMQh0S77UDWh$gve6{ +zttqr|N6U|OI@es1Ea3D&OLztJYep}XV2qQ>!8}6%<4^=aPSj3ppUP7#*!46{z+b%= +z`ng^B>WQv{<)zqQIdRZQQslBzZ+_BT%33#-x8153omcjK|uf^n8$KHlD +z!Sj)M=#a+2{h`aLuCP4u=Ln~fB1W*?-2<;k;kH% +z(yKcXsonB0PtOGTU*3E=>SU;rSG$B^tfu}>Uzmh|f()w|^#XY$QsK3IXmx@Bi#YuGVl3i~ +zz7)uR2jzyAc#;ZISv@NOB?#kW94amsa(|TQ)>;Oy`7sAS&OvCubWdMZXC2hitMuuBOrqWBNWlCjV3HwWGZ)R +zjzflT;!x*0j>%7!X@O|20xgzgVP6haLW1CLxzZ=2$R!_ND&xN(quefxrRgf~M4niX +zK^!eZ30q>o&oKqt?qbgeYGD=3IC0yaW@hM?YfG^vwM{KsossK#6dgZEkBD +zI94C%P&6~%{HGDSV(?ERHg<0x4t_?4+XbxONM0~``FG)3_goJ|`|jw=)jMlFpAJFM +zLn$x6mk_&OCTv~nBzjD)-FttmKN_KM391ccf$l=k$T8gNed%6a59EN{3>T0~`c}7n +zIshmqzb@Pocx`AIs+~#C_7@W+bno>C06zL#^@#U-h(v!J53J< +zIB=l@>lU2T{UO5}&em=z&NUX&?UEfk;u4?nuK1n)-o6J!;o#hj$T_QS42S5yzW|l? +z?%;&%BNF`Y5?Q~iDhFgApH}QeZazX`w!*Nsb!UJutbDM^6Q#+fBb$NAXKlbR2(7iw +zE}yJ}Q&4HsCF3euMXcB`nsm(s|@lq*7;6Zd- +zqX~uvR_L03>Q4~64N#Iu`I(W!z>$iH|K)bv^uo#ZBWTXD-xcu7&#Q!52ipKwqaEn^ +z2Wt(V8S#-@a>;tgzvPli&Oyag*5cLMj8}cS?X28-a1^fi*{7EscM%v``luZ2G>_Fj +zY}fP2_lg4ST#|QiOD+KfKiun{hOj4B>OU7O7S3OZ(R=#kx{nFE=Ow0NZTiiNzpHo= +z%M1RkPuV5$SMz@^Q5HH!D$>@vFa7xVQTNUm@8yRSy`S%6Ji;2?>O}%TuTel4_8E06 +z3`>?&V(5>bnq9A=U;n(QXv8@duD +z-2c|zd44sy?|nK6frO4o6H&2HMK_8p_o>w#H#mOI$wSL$A`CgYIK4uR-vbvG1 +z=NtFul&r=y)=8h3?gRa_N&wNQUXB6o9JKvNz?hOGV5rPsL;-`0aT?5u7jJ7tq{zin +zmE)3)s27~j`i+F-Txgtj0ALZ|Y#K%FfrSO~NMDD-;7iff)BZ`m=s`r?Knp)m}YpHV{pG|tm0+0=;Ym77$Z8>=Ens)mtv +zImJMR{UN-Vv&z_4+Bh#Z*oZ@B&_j{aL=+tInGNi$WY*KaT1cca%Wc!%j7z;p~Ug>U^Nt^w1ep)K*|;5MGJH +zxgtmzo4kzs%8Uejg8Z!i=i!88Sd!*2L(4cx(=jPe2q_?7rU)4`Bf*+Y*oBd>7-oWc +zLg*_XvIE+hnRqb#4VxItIhj=LWEFh$iC&V0bDAnY#h#5j3{RzM;oAgRzB6?1T&V*I +zp=;B$_deOXtnqtBvuea36~6?rLZrlY5P=ebb&SZM8O6NiVFe?>R|y#}IFZym{OM_| +zL +zLo(By($0YjRT^`s%)Add$*0A0-L%tXdkaEW^Cqs~jU3apQDXLdruXI&vtKE%(V^2_{~cCicaafwbHR6qUjLaeRlh{5Jm-x>F-z88y_Z`7{iIksGQBM5~rI)=GmB( +z?1A06D4<0~WQDASPQyy(9GUxl)0+`EaXJjBYaQ`RIkL$THBiqt8;e9IhO%dK(Av3= +zwb{QJ#ow8|P0$VL1(i*nyJIQ{7!l`$UrjaS#FKKR1cqBd2tL$Ht6@q>eq +z6S|;pIv7DyyzXqWTm;q$$+p4Amyed4u9izRu}`rp&idru@MWuL6-^T=)(jy4OngrG +z2cwj(HO2o~8TMl|*n2ecH2%(5IqCgaSVYLZk26&_63Y8ll5F~L_@R+^Q=nUuBOdK-apw}_+zM4%=eyxZBC6*RkdSr&W`)`Fz$>^ +zQRgtjU<_`FqMh8k1yHn21I8OFlQ#vqElz2d{V~=XsWB#mg{GSL%G#Y*!}LJXC&f!u +zU^V;mYcI{!e2lDXH3Zk_##+=w0qA5gl4;?>X`Q|^Kf<0)xWBPdt58{{G*?HSO}f4l +z=S^h$-i?<37NEJ2YT91{_Te6vD=(amh+g7AtvI7&l@FM=?~OLxX5ad4wPIbVQpK5f +zBC+zsUEUXMow~zVFv_1@Ty*>qlAMM+-Z)FF`UL +zybtS%=sNhAO8IHCd<@mPB;-2Hq;3E(#VEPcWW4LrP^W`h_bHO!<*E)VweGV$T`sNO +zUbt?f!p`k1zb%Hc9_=;P%wE`!e>J8 +z@%=SQq2TtWgD`%%eoDe$lJ-q^sQUeqLceySxA4LO)OR(JhTfck27QlBIfD +zodJx}-|F0JcJ;~lBmN`M6GgXY8qWVN5?Q_fHb(gmK>5$j&i}ico&O6!30P{^-_5Xq +zo+j;$SD``W`otvvK7Cm9y@RJfDdB+psqLs^M{M_*aXy_zz{o8~kR1v*+{s6Px0>YXCK?0~Mba=Nr+6b&?t{6?*UwZ~S5poF%2L4>n$|X)vejZv1w_1WIjv@6-3z1NXR`qpN7FAt%+UYYVMn+fr +z)aQXWV2iD&6pwa0T$5EaYv~8exy1>FKs) +z_>l6jljV~EutY6lKX*Z$a&YAn)}^jkK96Aa#NuM&iK?@5J7;yr9d6I%W_lk4OWn}c(>wr*p^MIvszxBagVYM#egvQn>@c9t4nSJ%}bf6pfR?Gtp+OsNl +z3V!F1s0^#)(AUB9=-zjd7QP)C65ND2bBRCTlkzt +zm;-s=-;cb{YLg%DyC|}^t9{p-F}~awH_ip>p=9qHvp*&zNq59z+k}c`leT(V+#9ld +z7Qf5f5GAu+ec@My=f>%O6ecJU5lvg?zJ2c=FbAr6}qIa_o*HB;fA<(+O<>g{d?Y$4x{x>DZ$Nxo5{#RM&e^Qg? +zk%x;r$iU)+u-Y;q9Zbkef4K!vM$JW3n-9KPiEnIqg1k?93Q!Z?YTLv^i0V}T>#wRH +zm9_4VisYxSAv;km;I?3VHF{#b(x`p$VTa<|7pV1H6a~4|C2d`^*A^h^!yP{t$p`Hu +z3}V}r|DaQ3k0N6@Oe*Ne3|Zq28D?fT(Sv9^BH_S=iwcooq|5^bO>NRz8?{YaSd)?a +z==v_Uh3HAm2U&4?RO?n&uTu~vBSG%H4lzRh3=^4AYv+CB_JpY0%7ZMel9*%3O`DIL +zr;cPjH@mTa|BlDk0mL$8=d;goy^oOk1$d$fU7OF)Q8-( +zYM0+d2ZJBI$%=%k_qejY!u=Q0elHl?!$zEyhw|sHv}`=*KBkJD51qUEVq>Tbz#pGM +z^X}i8MeBO%xq)6sXwAapIHavEF +zLrQ?sosAbA<;Rs$?H3)XAn?FBBOZHj(H#IYuYTfE&JKuP>jEOjc8TI}z~bkiDtk@^ +zcl3%|uoK?p)kMx|ebq&OInd6xjVI!^$ss>o`P1~3%E@J&nU|qpvbxI9rc6Zj1u!k6 +z(L{V*CW0q$!^kmBr{pe#D~#BO%E2L%x);JFT_4Xm-~ImgeuwU@ZAs_8e{D<1AwKAW +z8QGJO_qsn|)wP%rw9HWAnr1TGYa_3~9FBoly0xZKX(=FZF$py8SCx +z`vixgpt~86({Db1l_oZf{<+TG+5S;OxodGfbv!;jm_bkuSqx6L8~9!4F_-R +z4lH#n_&yLEG5;nwJhawjPtyGu*~AAFmDuLFQO%oqjd%;-pg>vl9P;KYPGV#j8^b2g +z4n^%+jUp+NG>oFd8$)L-gXUnA9Y&a)RzVew5s!x>dnYJe-ZwWU{I@8H!ymxI2cRUE +zhe=h_m@s7=S2^Z|A(4eA9@m2FIO654LI(wSv5`;}0Kuq72Ec-~tpeDNn1~5nIG(Bj +z3%IC_`H2eP{zFMZNI=H5j|Ll1jz(CKj~aod9kEXQcoiR_88d(aFdAeG#0hgx83-!l +zo(u3LjLM1_=G +zC3M_=%dq`xfOCVkYnVpD|q0ZdxO~RY|G! +zO|3VgKQk0LJaWnW*T%$`8B}Z)YAeLskA$DhO{r+WnGdHN;DB2j;t%ksA@H=3bF{1S6dkmKDQ%J@%w=rkHgoJR%Omj=Qf+JWL})cLeUtgUyg7fsD>CN)q(I +z340MFjTMg#uuS)0CoSV*MOCCuRRml$rhR5c&1+$g8&iy$7|(>6!A=;bkQ@)?ENK{^ +zbC*KCfr}01c1b?V)JsMOR0Y+M6UrC_v91eGYNBZRv&+I4&)Jb)l$uCDRXX!=toT3l$>GcWO +z3c0tYIR&FWtcSB019C3XBSU*MpkNdWM;2K$7B3GKTli#2=Td9dip-ToGmUAnqu71A +zSh!7)gb59=#X8{|b;t(ye5h!A8dEbUx=TI#1SM^Qi9W}@b=`<@VC|MPtnjE-;c`Q! +z9j~N?o`Rgshb)z1txA`ta$f1M9x2_@n1)^v5|5Y!&M=_n$cz(DlS|_LEp=Ho{*fr_ +z7(`QyR~}`W#=f3g?17iC8wqCqxgP1EdRXLf*f*dgE?jw{j+ +z+&A&Cq_GfFn_GuhD~wlZ&P`d8Gk4>RL1rc;@160BybS0Lwgs<(qr#cgL|4yYE|FnH +zAqhti)HH_p+gVPbF5W;Q^e+KRdYbi#d7JNF{dtJkIhMMRAHArWE;knXZaw?S9Bs8Z +zkSu`MkRd4{(Ccu8A8b*we))~L;xEIDNoJ`~BFKKWbSfX~V-h%O-m=NaCgn4#OhH=A{iK(P(0-gx_7HZNQDu +z5VF=?l7KVn-{>;dpfFulqtXaLmxXIJy=cfe>(`{w%sc24t+C1@gw&_363!h7InI?Z +z807X&-C^lA&sr8P_*Blqnigo)ljmBDl`AJT%gPeVT5a>3bV2+i97Y}A&X-@o!o8&sBZdoTKmZ3F=gNI2m7gG*_yvC9t2EI_(_1zL7rF5;#*( +zcxJe*ezi?a<Kua?9s^dTIEA81uE~Y +zPp&{rHQQ~=ea@iTm8Gs9mGj>AsNHAq|9(!}H0s2?Bh3=_(cMmo9Ck{Q{g>EaNo_>$I0|VSqg*~s%0tp^>-vGg@`kshn +zHx)5~ho0aj$t^rUFxlM`vDqr{NS0I_1nV?-9y+I(a#rj)SLE`bbH{Plt}^EWwX5vs +zy|=#imT2_dw(G08(U+gxm!;TOnJiW1>U{5eUyZBt{nox3v;IHNikdn|_jS7_DL(2j +zdjv>idjQJ`us~`3_rBSI0RSa%01O80U5AQ_fHpyTAmH~E{1ps>Z-O?()vUOzb}D?k +zuB(t+(n*usZ&PShQ`()RV&Fe6thp^nQ$L-kX8qt!Z>IL8O4r^8W&OGOUVVkuwdDf^ +z2<+T=Z!P;tF_J2=%chR=^tM%|u3KMS#WS|UZJQ#SdH~ru)%w5etG_#ZAJv(-%eJ9v +zwAS}orCWbP^^3-kcYQ^+jrU%*5WdX4>~FmP`cJZmevqDxa|bI44xt^D-Z6rC57_MU{?g-3x(A14lXXop(Y`YsfE9D7#+z +z63P24j_pm92D~R*j- +zZp}=7zi+-i6C_KGg`FhrI$yi&bWucMH^_ctA^1u9DToEHb3xStz#RulpB?_L5BM&+ +zl_a$#`wrP=skbZ**1C1b+UrwRYi2KpCOm1kJV_E4g|@>V1jYhR+n2=O%%Jl;iiJr} +z5ZU6(?A;6B^sxbdb-{WXLeEG@bBlk(zX9*M4EFAT0Lf%IWAKV*8nBS9XcdmFd}#w!IHPIQsuUB +zX$o>Rj7(EcdCHAW7!s6`;qp#{sh^4ht8e?-d3|E2MqIz^qLVSN>-8CDQ*D%bK*#T~ +z`-2=H$}6Eh@^Ufa@!2v{WhvPKRKDzlQ~P>9q*1P0ZTIN8x8^9ND^5B4rlvQ+ +z{FJ96Q6Q?ENmCU9#ggAmMw&ddY(3&oB9ZBO#ay*9HvE7AwfqZTdW8Xp8lt5eLrzP0 +zHV!_O_S!YCP2P`@d+H+%775Ufc|9^?-rvNRw%}3?%Cfp<15KOh3Xbg$jdYM8uI-3tB+z}IDXgp_CL}vsJD`PxG}pHP;rKI +z%C?v4+MR~|yH&YVsjw07P8HZ(C)j?k9 +z7q2Rn8e61;{Gx1K->ByD+Kw-Vrn|UI=sGq&s{eq+?|eBmmm~d4ZSuyV@?UBb(l#*z +zM#nFo$ur;@2gnjHr2syYgRfU_;kSGy(ND-?8ymxaI`TRUzS^RG02JiW+WnY!#9u1d +zHPN0JWs)cgxz-`(`!QBA;F{FQHK`{f`n0DLZyxLy +zBD6Y>$+*acD+0u1%V)w?dv7fSh=~Jmqf>Q5!S1x^Wf_C)8$eHorQYv)z81hFv06#% +zn*$QsUycn{O_>z0i-yF1IdGXFW-{zNplwCX{G#T)LpULN^L$-yg1-1ETGOM1pI`D4 +zSRfguB$muI>x8q+e_hzvkFz_AFomQ;vzhww$P|VrLP5}s&c%VBt7)u +zJ0_rIr+mL} +zBKZ~gP|imPjZQ4%PMZ%SuWhu?2ZnKqJK*1jn$#iDD}lwQ<}Nixyu7fNW@Z;9VZNv3 +zB(!8DnhKH(Jh{=>S^cFP=KhW_9PvCj2)f_41g10+`752n^@Q%&^X*>$PXcL^G-;sK +zyv^q3V(>xpWvoxs{Qi?)dW(xbwy(7;Jp3RFRfv8y>i*+Zdo@-Lx?irva&;y=Xr(y~ +zq-2h0U#7pMs#<}+YSH4wzTd;%8jgA)!oY9N)hvB}p%pcky7}`Z`|!df%}lO+?ML(I +zVszHG+hZn{pqHzMzvN}M{M63HzJ!sx#iR-9k{2BQr7o$_#HkMBFA1Nn?CZoY;|@8x0k&)|?gF~{N9U}e#n#+!|n#Di>n +zqZXwCMo?s;t$hRkIm&sqx_uieil83LF~a)t@T;)6k@7%4COUcg<`Etqg229N#J>^H +z25$Jk1~P1n6FSbrh>M4OQ%P0xAx>Jw>JnljnK5d)jA%|Yg@^e> +zAU0ZI965wHj-(9)No@^1GXeX@@|uD~e_X-F=0@slHc?=+!5K3+`G^pO3W8W(%Cs=~ +z;A+ZuLG(Kg;XMbdQjuhX2wUK#T@VKR;w0o>oCJM5kB(?!l&{iL1k!DYG^9?>apNrA +zkwim+glcc-Az1v0imbK~yy-eR0-hZ)i{9lIUAu;kAj|N$B1nqV9ewcerlcLbw1d`| +zy|Xyq(O?Zfs-JSsH{UGGXxIgyeZ7?VLn+IUo}#>t+cBE^Fh1)LFNjQ*6#m+jB=!<7 +z>f}B|kp9r2w;?jnj(M(x3}!_Jrx$HrLC7=?F71u`WLWsM5g$U!m$lAKgypxDL-2!8 +znx9zh4orwM>E>+gB^b43vo|V!CW$I2q)#(1Dd+X{Mz_skb1PzujT6#(Lkq=Ie+=a- +zgA`n6vjsVrP90EX1z}K*sn(cfIFd#1y;a|Ydp(=FM?8dFW!o(58# +z3!LaF6yS4U_-yAL3=!v2=xQN%y2L{m-8+lD)0=3K81$2sht4fEeniMa1osn^$l9Rbt=jP(7N1eyWHhb#E^tBtk87cEQUsW^dKQr9T;lN6*p2$AY{4 +zsIj_1@sTNatuSTHDOXK`pUx)TjSQ@A&iNz=4N^(5I6^xUAI3HbKC7F|s!ZnV6g&16 +zb?OFfPE~xcjufbb1vcYq`WUr|l@z(iMtou1T+$!@_PY4nRW@+Zvo#kEF+C{+^n5JQYrM7YNkp5vqTOOkdT +z%*fnq#wGewXPA*H2OqpiV68+x;O9OtLDxLazqR8oR%0G}$~5>)f80Y$4A2)Y;bfm_ +zk1%IIi&=Fc>RdaWn)uvW*R{lXZO)15vM*wFr{E1gly2V>a(+x<+UO1QB)55}G|H$N|(oc~MxQgg>VtgXH +zG^9UZV?z3JQbfLw!uEeQBKqkQeeA%|ruCfaFSueN{_a4|-D*Kjm9RQfyJfzojH+8! +zUBJuB%O&fJ++~E95f$z!T?a_WATgU~(i$bvQZQCEq6r`Lr-Do$KFd$;*c4Abm(LzT +zQ<@Lvewj@j=|{UwNDt1Z$!ew8ee$vZb|Fg%2KwH*>iz{iHz?(hlodY*Z(sV9&yS?* +z0ZM#`tKS)YAK0TCNl0%=SHEv)zN{H|^DZAk@>LiFuQHT*kZYzjyl- +zx_egj+%OaPN(naOw)lr}gO~X^#asXIj}Vt<$6cP!#|aapT$<{IRnLWqQoVOIdRx!- +z{&}Ofd|Aj$mdclMDf)i3*vz%WtnW^DZ`&^b^5VH$;c_2Wt^a{pf1PW818Hkk(%jnL +WI^N&*y`Qi4Z^=d;>H_@3^nU=qPx#L}ge>W=%~1DgXcg2mk?xX#fNO00031000^Q000001E2u_0{{R30RRC20H6W@ +z1ONa40RR91y`TdC1ONa40RR91FaQ7m0CNQd_5c7Ew@E}nRCodHoe6vtMZU+odotW0 +zhYuuYdik>fgWm*T1TJCc0L}p~YJp +z%I*Gf>kXEs-C|i*8{M`qux&eJnbvojZj8tr-RqMw?oy(;UzrB2^E +zTTQq$+p;wPPs|h`mWzLX$r^L>@UrYzqOD(-2FeWd$!u +zd^G`>0YSY&qeEkQZuxcHio|HkN&{sA`o@zsmOqreb)RLKRa{_4EjuobW8qzk?^u3& +ztsN+LYtEQnJ7N`4_3?LWg|b4`HSL&|WR%Owo_PD7Se`^ap@E__P$r;1f6|)daCT^~ +zxHOJcx3xU5J8q7myVhT&H7x@FTHUfYGZB(V*RoBk1?dYcC0p#jlAnSlPhXH&aHEOV)VSuO!B&O1o~yaQfu5~y;|`b$iZGLO$$34|_t +z0WNnUA@gJH{9tD-W$-aoDe;leKp`413Zb7ga=l5bF0gfNimto!i%8dv!1)~V8!S6jr?6^0S)Yg_C43!%A?+% +z3E&5MJOX|}zgTw_Jm8a6We)YQ$M28pzw}Kb)5`8(9t+oI#Yr|pxfxa@yqPuU>mw;Q +zrTKXi>9b!00-ci<09TFNP}bF)&~)JZy)UjuL(gr&5s_n?oA!3wz3tb}R|*tkATEum5nX84iuKLZP;iP)IW)Va*DM +zHIoa_6GtSSN`J@YPPlz(JpD@WocS6!DeldMlRxcNcjDUjL*dK}z`h6V_T>)GFyB+R){sW|nRkJ?)o<+^qfezxI2Q +zcV?tN3ebl}Cq@+HaDa9JyxTp}0(@KNICZji?7tnS#4haBDnsi%}w;cF{8Yu?|rn&z76fl$d8h&&Lm4bIiuARi8De2r3U?>?8uWC$kqZZfs%u9 +z2UKF+733i*ADnVaw}^P8TZv17;HO4i}5rn;qH#_B+9R^f2wAUoDCt0$koWcwrSb4sIZJxP1Q +zj%dpPen9FwE{sYkaUR&6DJmo#Z%JEWb*V8|o2FP2A7`-!^6q;H2HEV@AF3V=XYXeN +z7*t1yV$s#fig%JCc>%wZkDlPDhfR591@gGE=*||76o=)Ig~=vPUDw9>ZD&9iKoHgGrB0 +zAN4+&Lla{Uh?Tn|mLDF8R8X;#QAMtQ9Tj8S4K&k!x8lgT{oEQQiqh_AGAoc2Y!uM- +zr)c+MT5?`dQ0?du+WA@21LFb#tyOs9o%bgIUn2FM(aced#XE|NrhRi44|rb1lpBIz +z<>J_OmS%C1Vrh#tJF>l;9@rHyY8}ck4pcF7b6Q5i)=>CvS8fH?pLgeJPU-?hcSVS< +z20|0L(KC7blbmt=+sE6#qWBt2SW`#W%$)S$oemYnmnb{CH4wK?fAP$;^CF>~F{T;e +zC=gIOdI{kT|0!5GFg1N}>wPVF+%o){T_-LjA>mBdK#`ztvuJ0f!}|~aE^J%`VC5p%`#8P1W!tbe +zAI77E`qitAPv3uV6d~dGIctWZrgg|KTfSv+7xP0j6uELrI3wHT}NUU&pIz +z@tJFmUA_VSs~k8-WjP`o6n +zQkVTU_`>>+!Y$a!4pb|K%qITJTvAVQ$?&?tF3Z$jZdb9^nAItrB5?^$FqKvVtGN%U +z#Z=x@O!*mCoRq>Vz;otqnZf%lw+XNX+sdHhZVTYm7e3Um#Qr+K57@?m(u{OmWex+xGa(s&Vb>2TxbRVib1YjtTFNYp8j2Joprz^5KvqDp50 +zbJCo7TWL+$m!g@m^XAPf +z)jLWz-gslx%*@QD<;s;iymaZ(P24ZXniboFUk`)cbZ&YMD6imq7^Fb&tGr6% +ze-kqWHb5JjWLv4}!`khZw;=Z(%WcMM#+R};XsDUIVy5Zac>Cp}J;OU}K3!FM?X}mo +z1E9m`->t;Xkt`X&!WY@?nSgLIrx#oO{qNMNll(5vqaF?3KjJ35*0E#9VWj10bpy97 +zO#a>S<;#x(faxB;Kc@Ww@ZZPP0lHWgX`3yMy^OY>r|s23^8)UoDIuzS{7; +z&RFKs#tc87*FLmS$av0Q=1(J|12P__t#;7GyO8HBA0YEt#w;g+#|31|y}62V^(3Er +z(jWc@pta>ngMPUIVWX-k>gICH0-s9#a;ym5AmS2j?UPm>S&uA`vGi9g{Q#1#d+bdkJR{YN8_ +z$k!IqL*i=GaEiR +zuz8JZK9^=B1(NOu%{)y-=PuTj`|bi==-7GTRi6k{Y4t%Bxl^iK-7+SLoK;^`rYG+JAw|QL}C5Ya4ofc#m*%@23A%>3b#ev8zLKR+^^1_Da0|z@4^EV +zOpGmz$uDT|%Gc9T?gv<{ssd`|=PR@e$N{(dFPmXWICWVe-hxJvfR=sx7qDA|B#JjOr +zzk&C1k4CyT=|#jtJetITKw$Xt<;&%lAXMPSe#_E4g^agvXv`*CeA3UkVxL`XW5V>1 +zI;yhs@gnsHlf2ZU(Pj(MKY4kH0mhT`@qhia*fhFn$C$s2rTcU)x%-VH6N>^Rz%p{>4ZFzuV?0r6-h +zR-svaD8S2y}9+d +zMgI^3cvS@b6>h*}kB_*O`0}_mJvbBAfcMI!z8~~8yaF$#?kA)_^74hmW%{#;^r@$SmN>PL{wOk&;yZNJ>)5el;^O$h +zt)R&vz=ws^FA?ZPh_0Lea!t1jbfMCYi+xE*rxlIJ9e_IVf%t`RpUME}$^p7Y{yw0< +zwf?-97EsXtDCkjgNnM0|mKC|@9yoB|LgMK6g^@2C!*ppgC&%#~v38Qp7iha)K0@)C +ze7-+p3B*=`wp{$>So*2Vql+fAj}u{i%qmo9SKzBIg{+dvekR65k;1*7oG)Es&>Q?J +zLBtfH;=7A1ac4ZB%chs(Oz1+BBVuLy<%rWTY``@&U+bqJ!^=*!W16*V)~xAwOQOXn +za9@l9>qHCVNKiNGm%Ho&yx8{RkR--NuNfEP8P3Ew1;hur-sW-;2AIH(pjq;7FWj

    r|9O;-#ht{sBk!|FJJ4s +zfp_jN=Bs@{Gh!a*yM5oV+AnMqtnts2^&Sq2i}lC#%9Sh2sY)zeR;P0Qev)=faSXAD +zY%PBoZD(*vTmEo}8~V7PP*574w1VhoAx-uy5&ZFyr5PLd%mrNGwMz{83R<~ukLqRw +zFsr)Kf^1a<;@nVmIhNj?l4PL%{jw_qe^?#UkA`oquN^rOo@j=T^`>rlnJjrdw)nQ7 +z-IiY}zI$zGPkQ-BW)I7K@s7nYB1)XCUcLGr-ZG@AyratP+qcVk_vTn(0Id#6^pkh{ +zGjXIZ_VZxHwvs>Or;$E~hgWa189SqXaT)z7iyJeR6V4(x10ik14UIjyiAOj(MnoQp +z3aVoiL@bK-U{n0(P>#J-W82e>brMfoqD^?iHusKu#( +z-5;o{lN2Q{3VfMLsuB6Th&PV^5q$8q`z3QC0Y!>M&$r?LZ2x(AR6O*~(F_Ut_b-1w +zj`UDJ5A_$-s#QxinEn+*$Cn@I=R2L#Y_c<@e~hK-FVRor%BEDuFtskF{Gt($&=P}Q +zIx_V4t)IYMm&f7q1#4y9JrgZ}=k?$)PS_%TovicTksZfL9e-%@=1+a>h_#vt!IQ;1CB(eBXz$zg`m1L^f`sCoSzg?MEqjV`0JIh~_HXh_iFyH2u)07+F`K`xC9h1|Q +zIbOcFnB&a{ksEzNO8pRS)O^=^z2mP>*;pQawe{1ybkCkWYp|hf=H*Xyyl~+{0euc* +zt6x-KI2JDHrr#5{&`mty5l(hoV)sTmkm5Y`d#u6qbuUFi;adztyNoY;SHU*#!vw4| +zDh225yti$Eo5N;rr8hKlLo?U~1U+Ylo9>K@$bq_8d2x$;!2y0C60t@@z0Oxs;$*dI +z)m~#mD^Fw_NM+)>JbU<@Bg&a%)9Fu3@`GXoPvRxCT`@E-`=8W(26yN?WQhfo-^c9Z +z6-o^KY42E*-U^x#up$%8MRvM9y`{e?OL|s5~;M +znh!b0v6l+;o|G-Wzx-#?%GL2`#l`&A&69v#EsbIVx1o4ZsOk%flGBlgg+Eqlw4d6fNt2OmuB7<50KeS4 +zQ8E6%pYI<4`Z=DsDjF}5eJ?8knW@zEKp`W%nf%R)$@f9~9eVZZb*?vQd_^aeq^)FD +zwCHM7m@rPaestYJ#R6z2GHE+fe>P802YAMS)v5_PmG43Krq2V3=W@kv+%;UJZCP1a +zvRV2azI(tgTUJ7JGnjfSxt6fXjpyOp(>#lao`lKjhfCOp@;clTF@n*+Jpd&|2T$Qe +zbDgBp?u$8-*0lZ(yn#A1()DIk9G##(%P&m}pSuJ|Y-m~51t;1ifVY+n0l9cr6^PC1NfU#Qc~npA{ww@(*aL62tP!L1tb8E-0XKayH}I50G3jd +z(CY($cSAAP7qJa==+I#y{o=1g+SlO5Y-fBMxpuKy{RD4gK-!A&&4XtzSGCys?&SSD +z_1j_9cL3~Veje(6#%kD}@(o-gkb3~0b+qv%R|c!!0l+AaRI)`$XMVNz$b2#3cR1QnMrhymoFwh +zxc)^2)LEM-uSuR9^k;9zVZl@@F;kh*LvMDeF2 +zV0G@{M!f7b&P|)gGF#vYkjImVPah4Gse5At1Lp@Gt9piDt89prD7q)03lT!Goi2%?>!oOXxkS$UZpgm56B-;w#k?FAl% +zI+YWj2@MplfiegEkNq0G4Zz2F7*}14ZdA#+X>S&|d`I{~%dNQ+2j$!47eBlNM?wR~ +zYoN?QFJ0JlXVVdeZrqE#cXsy5&2jLaHyKz7G(&9sg7OuNR=F<@@SfBiKZ>%aY^TY$ +zS*c#;dWQ5HW#5HFi>1{-9>z*5;RNb6eq&>`P?;LpYM)m-SkkG)%)4;z0XHqXq7Vahu00000NkvXXu0mjfJf0r% + +literal 0 +HcmV?d00001 + +diff --git a/aops-web/src/assets/loginPage.png b/aops-web/src/assets/loginPage.png +index 186f70d49b7a5bd864cbabe4bffe2f71d6e1d15f..a8394e9b2552a70aa2388f81ed4e2e76c019d707 100644 +GIT binary patch +literal 37699 +zcmXt91yEI8*Ou-lOxzox$sI!6m=4ei>|%Y0_)MTl(&RV?%v_WFTQw`C}t=5BtUD?shlI +zp}Ds90=vH&Cu%=uY`cl+csX(Lj2yH(YoM#z#E{T?3LmJ|)F!K`0Fgl9cjLMT+)6e{ +zz`wu0Lp^r)_R_BugG^IVPz+?;2;+w8i!#YprABH9;Hvy~=f8Y3cq!RRk1QTsU+^WE;* +zPkb6Sr#%xk!fk&s<`BA>Yn`>W3xAhs17UvV8mcjy~GH5_9lm^%b{q +zijz3*aQl%qfL-fzS)+82>0JMMg7W>CNkhE87`2`(p1w*{jpIPw{4i^hTd4+cJoZZ= +zFhN2>EG#T3#%id=V7WO^=&@`y*}B&L#FyXfi%!Bp +z@xztzjjfiMJ$-%YNbjd$tM%D0VqP~%XPy+bqv{pPS$R-h1nxe1(~evG6#a4Zmt4P1 +zsydX>(SzgVVCS{t1{H@;cZnOSe@arw5I3yirYd?rnFjPV$&EU<8VR+Sm9cNUYO%)1 +zMV8-BuM%;2QKoBkiv@*shOcyyVBZ(~r!3#}83Vr{SSL_$Uy91hG^c&mtFycX9X){|9M?>c(HS7A@vB}9@7Jk$n>!yyaSh2#f +z(ixi);Ux}fwI}r5Vfc`8x`C@!1Ki`QIGG()fnK8cfFmdvr-*KA+X^{oaS%uFM9IqS8oMZcxLud3QIy~ +z6frdBA!Pi?T(JSBrsMZH^sgGB3f_Ub)wvwnP8zA1Cq0~n@6b%&sn%U@iGD=j7?~X5 +zz%albJE9n$oU|0e?!U)^P;-h(SAF`6EzPE7ix8?nn|@H6%cfoakU-K88-ML`nyZ3y +z1t(jpG^aHjF48kC(0@+=U}YMUC0$6G<1|>cgO(%OllV8MOp_TWa0JXd{k3;ih#?G# +zi`wRr82&XxL)`~n-9*MFyu>a8pH`5anROU-Sml2cL^5HHgfFHC}+%>et@8Mg81I#*VfkN)iFu{oF5HIQNH_E8xdWOXnn`b_CQ`ewv-nLDvn@IrSfx7zT;3v +z=rJ+LS-`oeL8rh8#yCO1wQac*NC!-$R~OS#B=a3kPfssra|M>G+aq@A1Hq*hIQ)V@ +zv;O6w^yAA%+TjPc>F;Jw`OjO=!aSm7E?_RlgQn@CMSYo2@0;}OVZvIh>>^qa6GmQTGr0~x#_*sa1D*x=ejVe0dYKSAJT%nJ{+WU~0H9V}aYljs~REwE{(S<90 +z`bRbD@-i_7hD5XFOSl@BGai}&w!$+DSY9q;e)w`c{DMSwT!`|M^%3@w&{SXg1>p}U +z*x#k)=~ypAKL&>c!%d%MjQ|osv~RjU4b-d`@qqA&4FbvmjD)y>72c{z8PMr +z-S9$(OLwI+K8?~4jH7?z!X4!c`V*8g4s(~M>Po;*c`Q=#&Pq-<2PhYw- +z(vK)J^%8;@{cuxFxLqHBH^e9)kC}r6aTWk=KxrP-?GviZT4sLe?Mt^!qAzlXlJ(|7 +zE!IvCNW4<@40*@~tlR>xh1PDByrzHUCFH)jmlG-!5CK`hOlsbnyz4igIx(~gxq0Th +zi`{O54Z0`;Cj|b)_vGz+gxCq0=chr(ZKkoN(5NT|rFyY4=B4l9(=T;~FCD$mkXEU= +zex;jcz7yXDwGUlCo{poVFu~_(I{hGH?l;VTgC|A8VQyx%BceY)SE1U$b=LgL`VNm5 +zK7BCL17L96ccZR+nv?szya_VJ@=n}_@&1mNU=WGUA>Pn-PC^{ucJgo5jFW%)Oh5R` +zdeapZSW}Pb3phPvbKAvK{Ipj=QZ4IEH}?~$O*h4bMKKtnDnd*l9*~>6q_8|oV%`{^ +z^TjR<2A0Om=fVDdeU-R3`Q4s3i~7QvdDyBEul+N@f6AXdgS-M;LF#h7QAIwJni4ls +zI@)iel}8~J^TN@l&(XB6(Q3L+uVB~h=sMvyp%#AtAT*_Gl@Zt@p+rs_@Fj0Wg?4Gi +zhS#F^EBh@|31IaMszWU~J~ysWBX#b-vADV|Z) +zaw|&Ec^|iN5IIcD61`Dc5=HIVFln{pDIz98)Z{9plS0V7Kf&1xjbgBeEd}L1qci@4 +zhGdom7M~GWrfTj{e@q3^&ye4`ZXD +z)W$n(ut{3t?H>u8vB_cAwqLv=G#gB*IfrBR^8*4Fs0aXgBf&jh&D@kSrSe+({*47D +zQckCYVH`ZtQ1&K-;cC2V!b3e%!5_$;NTum0Zj)?tV5(G~Q-gZ|8SbI*Ayn}*HYXhV +znD61lWdJJXl7llsX(OCCc`V?X)L6AX_a}T&D=gp5R|%VK@#Cto +zyJgzrn^07cwl=tbBmczn@^@+PIJ +znF>xgW|x+h9?5yZp3oLS&)b2b%Ildlr;1+U{7;A#!II3T(GwkQdNcAW;!`Tp-m(uh +z$6ruW7?e@V%jx7|Co{Pu%iquLx(XC3UR9g%LE$xHfB*iy^(uZwuKThBoFM-+FIWW2 +z(Qz*E0r2j%G~OaJ*>poZx=Z2gzfQUB+v-OZrPQ(+Krx;Dc%P+Hm_$|V%z^+AA9X`> +zE#?l_nfBw>b7EUc){w+P)Gph#+v)jC?$1(FK(bHO_V4LecV_$z{j@d%WAu>(#T1J> +zoIXk*0mz6Xl;ktK4*b%*pqLe@?_J#EQvse^pl!q1#Zc4To{%|ks==jt+)1951=il2cN1;<| +zq)h$b1UU0G-w`09pl1b3B0kx%@$-A3$LQafH7>i`@@65oogz*>dtJY|ri50qR2lw& +z2)_5o2R6K`!=q|y)vZRmi95%|Zx#7ZWB`y +z=+DgyLQ-lhaih5tK7_a<+Rfi!VFjuY%Rc5t7k2S1lP8)I(uV>M63cD32RsL1KqhNT(+X +z3W|&O-@gN%Z3<>)W+I2HssUdBk`fMb0f$hGhWGSMNRvjcjN{yVH3_T|_*HTxr=&FC +z(6SzWe*XFTU`Fy@tZ_M3G6KD@tBZUy;7!EN&W>HcA&+C469}aNX}QdVwHgSO2lGS- +zOw>pOm%tE-IGIM$9zyJ9k2Pn6p;)4jBpO+2X6B=9(Yb{Mv(aQa2|AD6u{1^&mLK6#Px9qA(0GH_cL#AFHI4O$kNsxB&j8gYbywC=-f}H +zslTB}IH3XAO7y<35Bs9)8Fh7anLm9&z=w52h(N@n2`|# +z2J+SDF+}f8ynKB-CX>pWBL*BQwX0z_*=A~u#qx7h)YY;55;r@>0a!qP9t3$NRWu7H +z7QzkuiL^9JS=U%&#Mh^#qf_pDxn1R}$+?at;^A~V(yTIQL5G8b6J0L*^T)#VWXV}~ +zM3f5PccY#@U&5wWs|${$-}X}Yq8ZFsGs)8p2K>G^rNzA9~L(rF?8Im{LI +zY7k{*JrTlgDSUkfoI)!M%Ia?^Y#U$sPND&;dOdaMU^9ZTP}2TzwEma7O%-VtkR +zPGntdKrm*L1c!4fz~!;52!AcAbr)cu323)NthOFy?RQ3Q3oG+T^`gCk)C$kD!g09qmkwA5-s +z0^RR9vexFZ4^3F-xZbXuAaYB{&CPv2MB-%f@_5t7Xc+K;T2N51Jvd(y_?T%E7Kud4 +z_|UYh!h3G|e$PN%rcMDG8uQtnAejAMC0qd#eINkulw$vO$OA~klaM7ZN@sorkh}hL +zwI!gwq=Yst_*l|ULGYRZ>vEc6<%p9y-WmwVRK~v?xS`IKB`8;Z_6tu4lcx+P<6)2o=k@ +zL1bfjMapzDH8}ZJKMo*B^p2VZdU|@%+`PP+9kB|fa>)SRr|ZL=J44-OJ3N_qQlg{P +zTEV01_0U3Mg+QYpWz#wRZ7WiGIxgj{Y3D=Zn@Chs1%Hx*FVP!BjJ*7+>YNn5evsRj +zt!4S=2;mWoPz)NEeWeSogotF2R0kI}#ZYMNklOY8%43{v-Fo9Y!z6-b3YZ;mRb*=m +z1!}v4a`(LpXQ*q(>xOWHj1_Ce_f9fkgIET)9!RhkIJ%#?(Md%js-;gYg#=F*Av}Ib +zBKvNp6fkc7*N=sZGvO31i2rt8GOzpsL`~3vyzhPppaq$BguK@Mq~Hqx#V4s2AWx!j +zbo5N;AycU}{aLJ3H8Kv({bAi_=ZH8WZ&R#M_h>?qQ<>DF&r*M;gw1RhIP?(1<+LH3 +zPg+dZ^!S;y@?-BpQKbdJ8NxJOQ58Mj9tUW9Cqe9X@X{c3s&--y%HIEn`0W)6o#*~s +zf@bw@Au8$)U03;n!aQ%VaFfGI6V@vmT0nvM$4t(k&SvItsS*CCA20{xbNgOJ%t&oQ +zFUKNx4A-Tp^nzX`(&v4ZDS+CAZgQ-RM2m9o;;&#GFLi_6r~(z-uw*~BCw^168Y +zCR6XcGxX4@76fol*V|DttX|XxljFPJ +z&h>b+z=%9HKg(o=y22|CPs!ZkFLtSp<*I`8x@ujt_X_nPq;J-g^rqY+5Oq5&twNO_X~Y$ +z?Jt%}d;}zU2dGRH>kYaBH#FlY#6;&>G6BcUQCFwMai|dgp#QjRt$Q=6utrW6UnbNd +zo~rG#d^e}>psVGL&e46&A4|$dyhHB!0kYHm)(idI>I@U@)~|{$iiNNI$8xi{aem9U +z*PGPq)oab;jfPMteE&Lv8uv*-g^49!wBPK#?XqoQ_mQkZ>~0caUOnnY&kh> +zCz9?x)5(~5eDdXmmk!@atL>Ii)+3@<%00TdF!nKLhn%tZjGl`btABGqj#|GmZ7J3d +z;KOJ0$7pur8}STeu`h(zvPVECWkglGKRQ0EvmIy0CEb9j#IyPNtp_(9Iva#cY!B^< +zPu8^8$jM7C)4SDKcckB2p$HTki%@=)CP-(O+{#gSll>+2%ccbXvqHk50N^AUlDoB? +zL7}JF{N_(nx%WOlJZTTz*w8S3(Ts{1ou>wKxnwXmlwohyomrWRsqcZJCM +zNP93b7+Iqmd!8k{brn9+%Q+Yu$izwna=5nK|K`Lh<>n_UYFLkDDo{3KA-mmqH +z{4ean0jw^H`lZ=b-eUvdt#7Bg$Pm97o7VBrAH`+QhZTHyuh^{{yt9?i&T2;|x0VNr +z9b5}w>yc#zhu}Bhvl?88_`iA~jEzS_>6)S+hqq_e+t~2m7wOW8XM4JXu1h86q|1t% +z{2-KIgFe!9>NQLu;55=eK`xL1Zamw9Jtn +zkiciJT?{EjrZ+@5AW0~u5=7&>xHF>Gm5-ItVHMY$=P!?k;}eT +zVSkzUaCevZWe~eV_*kIHelgyd4e)xRfi7UJ@O47N!DURyODk4?QD295Y!cVj62!4u +z_HOd&!dmm7_Uh=K?$LDV{FhNJd59W(_v<6e>*Y18QqFsE55ZjJ~;2wPi&RCR#_umFeD~ZCs_-e%z}2M<4&Rf3)?kW~l;W +z@X`K`0N?CWC1EW&&2qu;tVWq|`&nBGf*P5~Oyy1r-Inf*JFqnpYo$)@5W?Flp_UtH +zPO7wb_EB&k-4C&mN*c2&o)u2BFpkL}@Ms{aJdSKPBQZnSUTJU;o-iikbu)*v4vq?nUZba;k)x+w@Sjo&dhtA +z{0LlJ8=X`ZfXacYn6@9{>x$|otg-kqB(|Lr!(ngSMOF1PZWsmx%O_(b8$>S=R_e<$M{;Q`0NGN1{A-Hhju +z=cXU_O@U3kZ!y1UfxLn?zq|{Y2tyeYn*!hGxa=yAjC>MzSMP+~=`fqjkvTx>E${{)l;&&9T<7a@zN46w?4mI>v?hLQq +zZcim0vimaNFqSLWKikt!9m#C_X6t3}g(Sg=n^$5D)X?-DU4>cIk7=vGewtHAY-I!8 +zVS$lY9f3WBBLIg+IoSN^I}3h;aex1XLz=ay)|NjO7P-o6(av!AVsY9i56YM9A*Y!x +zLu_>oGM$Ia9toorq?EGl@ieibRtHG`HT30VGF+jcY4We;q11iEqD6{0v%i$f*m?IT +z40Hf?J&{oi(oFVi*T4P2Q~3%aQ>;8%yIq;R3;3iR$MnWCS-B&2HK*Cc=NEx^EZ_21 +z7C#&mx4HG5As2<3{&V8ZZ;XJP=)NP-_nT7>xSSOO>Qxy$D8Z_e%?ppLv*QL8*bLiB +z-|;xQcx9800pd^-1V9@?J$wKPtH{NnR~ld$Oj|PA&tePeO6Uq0D^Bn>Wc&7{lJg#l +z>E^8Mq$Q63C6mC@ll>_X$PBiqsEG5-_^t1DMJ6^j&Nb6XQETNJ +zUh|R@#B6NtJ0a~a(wEK6EPCJPKPd^ScJ0Xf{f-gxt49rI#TD$!7~6cZoDPqg32d0L +zX9Z()Q600i!%2o2!ufn&liK!7OyW+=Qj&joqWy>D)@C2}e6LC2F*@Mae|YHhQ~-dS +zgF-R5Nh~dW*2n(-cCbmIDEb0h``n78rjM3vBAJgEf_bQgie^_0LjCbTujF@NDvmgg +z6t!yqRJOgdMz_?qCEH2n9a +zOl@|{Fe9s@h$WrZU;U2QAS-od79(t7gt^$NbA1ke+30;#3e45nJ +z7}UN-aj?TvLlvd3fKaZGb@0V44UBj*V>{`O5jW|8sslkQWJ*n~vz|`Ted@PD?Up95 +zMDOQR;_RO^Kl;G;svmxV>!YR^Jf)?kh-GZSlUN!rAhD^-NXt~-8*YM8-O_Q>=y_bd +zD6sjj!~v(fQ~{&)U{5q;m=&$>L8+!#G_MCsSlGPKSD~>lC14j`iK;S;(rB#HV+SZH +z6%EB8%$%243+rl-b0hI(?mj#UvX3t`QRX8t$4-Nls9zLUGxy`^YJD{rPBrA-~5Bas)MdCE47g1l)gs$x`^wjMo0d_W)DXtd_g#Dqo +z&$K;>=MKe4(5+Ig^d(F^@TFNvRif{tHeg&zx^DoVA(~HCS5mYBhaqetCY1Qkh*S2% +zC27YSJAVonAn72DDT_AtuR()=dizb1lQS_zVR*lCOzE>h1E42fgXxXGY&;H2$75xM +zoclT{?b;m5VvE>|$Txr12AQQNjh{0fznWFskJr-UW$BC2E~UTZw0YzmaZ8KSfX +zxRkr3GR9B_vo-zLJhVbrlou2*!y2yDuqS`l5f=adEP$>1rU4bQ$QJ$|iHzI!;L1fM +z&WocQ6ePs?(>0Y3b;GJEFa#3A;gsT#xUtZvKBuU53pX~ByTIqUuar>5mvVhge5X+!5mOkKEN`A0-Dl3)?d!y3^}SeC9=a*qw? +z>@WDGW~Fbt35?8-Bz>+zpX*9crsnC3X8QUCh<1MoKnOeaDP(;;Z!SLP9tNofjAgxi +z$75-7T18rcyKWP!ECHH;yY6`UwuVeaJ!CBYC9~55lsIibpsKb1&pzG6o&${`PX9nl +zX9~)$_ZzNdbh;s^J%tf;;ku5K?mLIpDcDgf*5g!5x`*BC$`8wLaSSz@hE_sfVHoVQ +zYc6oRraAz-9f?uV)0Xo`V}p>q^?VNc6-m +zgWK957@l2qBBN5*%^~d2ycagg+eMfE13?stQ&*JnzS4-Qv2n!9!&9C8KF9s9{MXQH +z*XhnjDH$2~FD@>8&&bH=>^P8Zx^q%4D?9!`d4;?B=y|=u23v+NhoCq|nc^hj9sxi? +zfgodt@kl&d)3tara{iqyQ!!ri+?mXL+7LQeZQ)2xmZ*$Mb)KRV +zfD{#s;Yj0Y+5Vbe$dRVzXJN5VCgcO@;qK4*f;Me<@sm3xs^nORzUx;aQ+@oiyIW}Z +zgwAUBb-cW?ao(;fsH*{6+TfoBk@wF+;D-1TZz4XxeJ`TET- +zBEEGiJiFfJ!feB9kD7o5QnX}1|7c4Pxvkn9x$&vDhO(4h3EKhrYBUe%Zg1i{AS+xK +z%lXt22RK+-;{f(j-TTx# +zzmTqR*}d0s_?lSkVP_0pxrY(En2nPI>P8exLx +zTfrn8B4YSRI=rB49~!0iMf_FYqoNtxN>V{}1*_F=JX8>r%{JE(STBhQDay>EN0%Zb +zsUne*m&xQw;CfX&N6_ovIt+hm`MfwY +zsdQN&n3WKEN#w4lyh;dae<&!qF0CTXDPqohBi~P) +zE;65+eDiy6oS8_c(@$7fzko}ef6iKI$ +zxl{^+eK1FaZiUN=C~nBm?E0%1P@~7et=eoQ;&UT5l}OJCMVi;2&h9?wx+d(6{M|fX +zhUf!SvvDeH2DzF;E6h55#Q_doHB(ypQXgbr)?LxEO@jK}ONCF8h;RstrAt4k;@-z{ +zljL$FOZa?IA@QDP4xy}$&aP7Dv +zTY)^u$><-zqh&1C>LJPH@kDXEEQ{M&{xHFp&wy9W3JG)IiOkz^or&jERQl-202odC +zFvby~{KVKxYxF+FSw~pT!)=j~7VcGl8#|FCy4%Ws^Nf+MCdm=@!p5*(OW;Unb_x9xoo25} +zw4ZIqChewXksi>n2;F{6@TFULL9-mXGg#ui9X#{k2IJ|f;KwcgNH +z@-(Q!(dXxoKUOMP$Nr+Ba9xmTW@5k568z}^S6gBg()L_~KX>G+rP&Sx~6pI?a#f~brarLK|EJCjct|fCD)@z$>U5$}36LduxPs4L<2itt&o6&nOpOJ`l +zpnZZ3RD1vZX5(-ed~Z$$WT%w9?oG#=WXWH9*XQk}#ig}I{=EAGsX*uA3#zC*i!9=u +zPI2y#DZX&{;aMAq8j6Uki04gY=%6~7y+O+eb?Jgny2A97%H_k(8$aq|8dWjY3zns{ +zEB;n)Nx2I}M!%MC+5goqGr@wZm&`-EoKb7op*%^5& +zcCeDGXAFph5sxs!qxSgLe+M^IM!|1~*4q|Y^Z#MqeYy(acO2c!dGAGl2qxeR7CZK(`*bM(H+*fTHxnT0qwl3X=Mx4epp=W`nh4ZF!8r0ILL7& +zE9fz-#G!hT`V&giFOr}01;eM#Nf~TBg9XE43e07q4e%3>Xm&)IN2xy;5KxeunWFsAjbL_6Mfoya6g+O@tRUkh<%S&B+&d +zE%2_tuYzo3#iPu^uqN@fmHF95r??-0O-7VLiOLyNVcP~ocW52yDjsqNyby7y^zV?YBb5ezHLXhHsds36qWDtV=AfE*-fhSslGFoz#0BSUj8BD +z$W|3*Js4}HF|*;JPLJ)qmpYLVINUTi-yO7S&dP22`pltvY0ZnE%t +z^XZ}9z89ne1l`ejqfwrIQk&h;B&{WOReE2n3qo6Ay+ku +zbu*8v;2c?n6yL_!q%w!O_&Apm`75_aLq&Nwwk%)D!Xgz~dl>LOAGCwyva6MgCh2od +zOWFl_SHSnjhw6G$uU|_%5BRb&zv55`{5kQ-(gu@7HU7>KI;0tR$!7bz`Oz7dgKuxW +zuP7Y}wBWE7msQT2pO;oxFE^5V0}QuRuh>YHe_Bh{X+1q;NU~WEKdzyO=5H{TNgx-& +ztK$YfH!2>qn(qYBcbjEOu{Ke6Ap4kI?RngB7q{bZtv_Lx5s4gA9nl!OIv&c#PS7G2 +zsU;7l0vgCy8vp`>V*~0?Hu2BzxtW%K}u_GDk02RtDouY0Drds +z`k~VYwY3duZ4zrWD;6E6?1)>SUAVVA=!F+WQRZ{$iCSzi68<4cuWKr3J}bu0&W+Oa +z`yK?YR8{vu3F@|Z0^>lE=okSH8UWF-7^+@#fK_iaff +zdenKFL=F8oHa;|?vjjj3__cabG#10r^9I;poKYc#G}AM7EUxi|;rLo|R7Gn0hPM)sf!u +zpmJFD&7VT!?HUnir?VTdA+6>LL0B$@3B!?$#)6oX9T1ft{QN}PXL>>#nDxwhNG;cn +z47yg~296xsZDdDqSyF7xc$dC#P1)&bYinx+*s4-}v?5_yK`DvH2O|6UtbUtKVEzVN +zAH^zLwapEL3x%i)=N9=WAF*AD?W?B~Xq2go=~=Pc6oQFYw=2>M6!+aZU;WVAJsuvJtL7}A +zNu!(f*)mI<*h5Pv)WEO)nJR{uLy +zrt!L8MU3rEcWH$Nxua!}L~7F^K&UXA5_#RirRzFUzUGaePY;c7eX{9wIt6w+cN?QWIp?C;G{!$|U6uxlB65?WtGCVYoX7r|;J|dKjz=7i& +zD1lO8&SP>P{ygNUX0jh;rVn{u28Q(PC`rcCVP#IGhML4ze+9@WYqnED`_YLqVHJWL +zQ#ajXroslkbyq@Yt;|^nian0_I7J)Zovt)rJ#xv$lSbM8D&4xHkp+6+>gjI4%n@%G +z7*R*Bnw-corCNKBNwYDrMC${ins*Kr%^ugzrJ +z`^phR8+|I~;X$gFr0XZ0GR9e5wx)6$c(^UqaEObAe=_N=bUxGWWU6!6uQubnnpdKV(Pn3SwEx`` +zBry+S^J|@+{s)IpFrr^o9s_C4Pdn!bnHYcl>8ufV1J=m4&vc)9YWo)OvLeR|vKOTH +zt${SL5Nt40Jn;aDWj^$>;`4FHl%<3XCr3Epemn;QR|FMe3=RG%991S?#D+FymLjXY%=1 +zCxbnSOe;+Em%RKkr>byw8_phPOLf`KCi~1bA74qTRX$29EkoCqdZ{r7ioG=7%4r$2 +z+1mC5uc=-WJh*zCZU>>E_K&E@W;>{RWKWv2J#-o+{3VVXK4z|8X0N9T40ABS6d6hG +zPWOYv9(}K>8erkD-u^S1vefZv4_>~8sX)#sxcSb2Wh^Y)`sXh@u>o3@KZHt7E=|8; +zWh_Vt2LF%MT)EnaLCZ=v$q10meO4&``kMQ3@$-tLv$s-N|GV6&o$3{bW+O96VD_v> +zJkFwMpFge=61y_`^^K~cqJljS?!{;EHct14J6rTEtMs4cfP$MSCW(~P%3qxaUphyp +zL9jl5Dpr!;e-ykU^==Uj4CvxPsO`G0Vx2Utkw_V_F_9GSRqG~80Tf8d&)?GFQufiw +zpAc4T#1_CbyUPW|EJs-YF_kAVEG5SjoT21x2FTLFECUjNHK%ak^Vt;we)l}g#oAuaK-Js2PxjKbUjrlixFRE;_%bEg%3& +z)~lEf7$!lpIhQ<^2~+9(YH(KCG1uJCknHxQ9Sz~?=HMq69MgJ2k@CIo+~B&iJbjH= +zS-e2V_V~!Y;_Fhn%;}hbiMXSU35=J+-E(`&zXE`#lDoTm`+Ey!J#cWosLP(x=uW%+ +z%FA6D>%uX^x;?1Hrk##N!kUA47^Ke{XYBwPXD9eF2s&rQF(x%1fWk*DChpR0*R>(M +zf^A?T6>}#f3DAu)_TPxi>YEO=;!!{=7{4dBcjyUYgRMR7Wb+O=Ja$kKiwa`;*2lO? +z^s9_n259b(nQm@~FmkYH3aV!w1aBS>Y8JbRAt0=A2L#j@y*oymMA*Zp1wb@zq)YlGt;F?iu(Xp57 +z-wq^vNgru2oR$MgzeboH*ll}2u}j7l1Mn+mjo@Ut8BIG^ND|La(QTWykeOxo +zQr%2)dwE$xn?uX>=5*L(cxpX5;Akk0B;s_{f3E`Y%IrzGxgy$4^0zn)Ev38+c64^U +zS)yVGO=)oG6%CqZ)i<-@A8ZlJNS82yP9#yJx!dYuIX%X-ZffJ<$MJIGBM8nA{5LDZAt9VJ$?N+&h5xggxb1{LehtnjkZNRX(S7R +z1x5#O)#u&8<8@k;^p;vKP4rZwi;*L`5%AhLQ!=CU +zn$c_WoxlOnDh|Fn8({aCrIVqdM*?DEW-%^4$G2WHuNQ=v-E^b +zOL2}oTeJDwQ`JpWnKss&`ntGC&I0YQ={MVTvdsKO%n*1q^&Yz%Nv&1D*HVf2y~oDJ +zNo!XEu8SZ>=?jTf)7lv}kn;|&y*=;$>J~l!a5o6wU!@)BUT)Z&E_H%bi8t&3!qu(5 +z(E=HmH^Fx=7mNxyHdaqePPz%fr-fBFj9`VG(gM{$_2v9-420XK1x4ag2PrS9iX$T! +z>*~egVh5CTc0=Y!*>7gX&161zo`W?hz_`k97B!ZOyJ=CM!%+nC*2kQW{9QwKgYGfX +z_*ln?5NgIqV7|eyh^&xe6^rm&gWEBFeoZWO_uNG9quRaqF7w<1^N+m=Bj7xLBLEBi +zwa#C+uU}qxv1{JAyi*a2d$x9T^j#=x3o(AW{$;(3-3)YzOnUE%j+L29h2;lJ%k +zf>rDJ%2)5aMtnIyPRphD_Z(BrmYF|gl*9;kFw~AYbeEV>XW~_{Ch%6Las^pT@M0N$ +zs=^!N;B1mx!C5YOLk|X|g{}wPZRVF9xT(+I-mW_B16CMH3D`o)z;#Hp3D!Spb_?OES{4PJb +z%P}Qf#2o(9MFuXyX9eTM{1HP^F!h;k^}!+`sSHdj&&Hy0#G!Y6NO=*3PS>KYl6Do9lgxJ%2tRO^;D_{TC+44nq8LewFPp7|6(MJ2gD3h9zQ8+Mm#$>_= +z%xq*T)$4&DCcg5x1Uoe9Eioqos+QooWnt5uPO5<5Fo;wDQet2?=kKvglufM7rB)@y$n#(gij+KNU>0aFM +zM?+{;uXTR7pVqmJTftQU2|5K8T0j=AQkTAu=Vj)XBrCqMm^XBVaMU*OR`DDgEj=xw +zqv;=#f*wcPcgPNul%@u?isU+mQQ&r55(g_r9m+3UvlMwqhe%3#0E>NGMeSFNyr1yA|F0{ +zN;iZFp-LmP7j3o0NoS$>FGbQd$6=T=n)dQn=~bwgVU3)~QJ|r5qpz=y-4(9u?>F +z$Gr2-VG!$Hbb)`7?Ptj(cj~>V+=%UMGq|dQFC+Bg>7g9y?AtQj;YiQ`i)u*Oo6i6$ +z4G~R_1W2LKHMGEy(tNHzDy2(BIx=Ra!v9EDQSPTo44hv<*mY@Z2(O_vY^#T_E|N-U2Mv<#b5)VPaOM*IB3TP +zfQZBynh3a}6f{JkF@*4+=?pVYs^>; +zw27OyjcZ|Gc@x~>DYDh16@E_|Xc6g&YBI&P=0eSCI1~ddRYt36CSu}YA{-nVDOj#p +zhEHzu^ENhfp?@~WxHn^pH0Gln=u_0O(>V+(fCA_Qod3!*O*Asf=i!5y4w?)e!?9|u +zl|ElI6><$xTJSH%%W+4W5bDv1oUr9a>zzsdhM#2~#!^x;p^q*EyjAyhZ7M3?Tr${4 +zrZ-9E`iZhao*_QfD!mQ60dP?APPl3CS`PU>e+5_{pEYRdnzar-au@c!zC74~%ZFn) +zy2_WZ=AmS=*oXSk)kuMYyZE6B+RGQO)8@s*{QLxf4_Hj3V?Rkf-YbtqZuQUDXgn*f +zl_j={R0R-I_+OWvH%e)#(^2VR8wI?$xgM@X4Z`B6a+CXha2BIcpaOzwR>VKY`j{sg +zL!`ZZ3z9%m^tc)|>1s^Iz|73NZo*m6)#a}sBNILykAsiDkY^V02e*XR!{~8GR*O-l +zvr4zVugpH5&mMoni-W-e8!Jbx_B7T7>kcVp)N0aO{GLc1``Sg=``X5o_PnyTtD=Vt +z2-TXXC~+$RdOjwmH6!VBqp21|Bl7Z=TF6?RmpwYJ-0r&u#mN{i2tt=~<|gQ1`FAH^ +z1d?eXylWqoT2L!IKQ5p&xJ05!=kgJ410ep{IReC}A@{fO<0m*ES1U&)=^+ +z*32ti2Q0cvfJ?_!P^gg@QOHM}lghMJi&;YU&RZP<25~bnu@ZZw6+)Y11{g +zhC7ez!IK_{JbE)8AH>$&`&GM2aQl?9TziqVwfylHY`QwoxI)hMO#XisfQd@<1eV*n +zJ9I*C>jAJGKZ>-?mna0N0{e*OhDb2x6faJk#DiKuTjwW_ +z8fIhc-A~ckQ=4h2t$Miw^7pQ|-Orn~kq?YYHkn@MH9wYrlzmvJH9l*Ujw2J&zSlL- +zLUo~*1u2r&DZOJRU{+tFr=v3+OJ$OXhS9@5a0xfX4Wb2SgdP0?tQohdc_w_~bFP(w +z)~C$B2We5>^1fVMl9=t1f+oY51*eC^xF;)-&BoOO&HP3QQFme^0j!emfSWPz!_9j7 +z#y7FqpHdPvu{FPc<1cpVdZ2%DAd-Osk|vEal@VZ&B-A(lR3sqtJXE?&Km*78-etpq +zY0F;5!(5Hz-N;70^8nucQ5Q>n)BU^BNndyh|J(0oWn5v{UDt~Wpitw07rby?{8r^8 +zvmVG_R#l6k>Ob{#Ta%nhJxC9BY!Z4)$q~1CVoFQ)^LDRo0-Z6GHqvLTI3MiH`syrg +ztgBuOtywo9P#XDO3XvG*&=DMA)fSSJ#H?U(=g`?fwg(3$wJ_o_pft%iTfzn0?HXzw +zO%PT|xktZqocEYt7inTh-jQ%7!A0vFy?3%wphlS*j<{ZnhlKTiG<{`298It- +z!6mr6CBY#;a0?PNxQ4~~aCdiix8Sae1QvG>1a}s93GRL;@4mbLcc#0hyQ;eDoKuHt +z#U{`IPb#n85%$#!D`(mgkvnG1kig*U>$h=IK9@C;Bj_do$HhdM`AtBtu%gn*@{y+& +zfFQvJUZc(Nq#)yCCVR!^KR;jpE~FN5`Jo!WgX#WW4jr;sEYnT$&$7Ux43r8x*WK~Sz-^=x*l +zw8xQ*Zq4tH1TR4FX+o24rG&IDLTVv)B(#i-9!Kc{^y(`r1F8Kd)5UTvhe?LdVQOG> +z4^r4bp&<<%NE;Fajkcy_MM9H@rgbE^Rx;|dwf_Sf7dxWbP;maIc`7ZGogE`GXuF5l +zZk*Ir1*VrD%ar-AkjK>KF~5P6%wAmvRSlHM3EBbwsmuvNJ@)`VVPb#{hu#yLhu=6$ +zKaZ9FbflK1wIsoUvP3atIklaIvPmarX!SW?T$ZDk%hUP9GlqExnE(|vw%4eB^ZJk` +zos`7jWEODQ@jxp2` +zyNQq34}h0k;ZYQ +zfHj!+(qzH!7BG5+wom(jXguXH?6Lj(-R8ywA+}u +zPuYX7O*sv*;t_n$av?M!~kyH2!KS(il#k4xC?{ +znwytB(Xl9x+O~y&txxhw*qZ%@^OGrs%C;QmaVv^#>tR1v>dEzFs#AQ(zvefEXOb>( +z1ep-g52nj~u7}>~x88H&kw4=9z#O7w)Y%>_4izlVMkn$BBTV@HLSb0-G}3wuS6ate +zRJUHzXTni)(7#D|Bd35W7Wmh +z`mg3|DzG1HW&WFZJAljL^O1G%ng^gzWW2TOg-_KJpfIWFLao`yh$3F{PetF1=SFJ8 +zw!C%yFRk31d{6vPNDSqY2564{BsIN1-kExlnZ2!^+{iG?A!Lk*_RD7+H;$7ReY?kK +z-iNAEL_j;>mbd<9%q({KQHpq>nheL2vVs;f>OTr9Xqqzr9FE#@-E)%s7Tr=@(g+k2 +znDrasZ*E-P>Bk9OKb1B#%oaZ7uvHQOPktksr6v1Dw|n|YbXjKZIdu@2TIK8Jjc9x*gL_Rqs=R09jGaVebHqQY`TLYSC` +z`Y`2Do69=lZ#>n^iyjSEEjK08W)I)Q5|j(iB&2-4NLn*FJ^l3~j}K0lV0)qKyK>eupN!^)5dpH>UobUf>K1hQ+HFn&NH +zmXZH@4e4QyR2)r5^{OWcridV)f$sIJMBUL4gr3(POCuBA1Wc<$T-aq^YM%LqYb5{m +zKN)nRR_E@qAlq(;PmKd8xa;k##k_&Pt7^T@SZc*9>}q8cQ}~tV=qO8{d)DF$k?$3Y +z7+k2%B;Pb?tY7C7!Lloc6BBB5MI&5O4)z*et^TsW?f&St=ILyCQas#Ac&KD&)q&M; +zpN?w@d{dmagfv}Vg#{H7pVIq9fE^2<;1X6KnKZ`TM3M+f_m{T51+3q0dK;;Acp%NX +z4bV01k@AkyK3F|5a~h{96kB@}*GclPQ9) +z$dSSduYLIFkbtgMFoc#otdK5s-gr1dtn}{1dBA9gFt@vr`h?uK1G>p^EogQ7TNs!J +zcAs6)&5(ePqxx6bI6ntn0aPp;dTcS2i^H;qe?`@c!T-sCmHpR?QI +zc^WhOcrZRhwoxz+<-Q7znQ34|!a_5<2!n1l4V^?5a`y{`aP5thg^$iEjL#av)HF0k +zZnbF1`n_e+3qU65pZ~J39<`I3aSn0eRVG55^++bpU +zzs>)*#wD;qsB!j72@eRGEgkE0?921!O;ISdy6e^>Uf{YrQd4W|PH2c%MVB-so#)Hh +zDBxSpHVC48o-xZFxNd&?aY+gI+o}ArVn)ZupWDqQJJPvG)qvy>bj``O_`~E+CeXLg +zDybhBJJIr1)kdTDNKRTDu>=%^vEM}MRdens$%MTnYT@8H*aHR&6PsU|-STNTZHXoG +zYiog#h1iHUpZ0K)8?C4SnNZ9O@w4!!G+#r%`b(xNij=L311%Jseo`r&0CDV3&t@TZ +zKiSqm_kO&Kv>4-SU*0d-;|THVZl+;3stp`Y*VQXQT*CTBOta +zZ$NTJsRZ`}DCy45>1t<_JKeeK@a#zT9Jc}lX@78B|4Es0A-sc}5Aypy-cJGo0-{;G +zgMJ%id6scD64~YJKX@u~of!oF9{hMeOmr~g!W9y1q>c@&5wtHMAU=Il;$%>)zqI{nx_B(U#ZHOsw2^tn(5M +zepd3gs|n%3{@;H8`3d&%O8%k-v=ZJ`fdAzy*G(2*_}!+l0BFig-1Ez*<0zYmv8La? +zIs=YXVH)|s3D|SvkXW+)dc>;tqz8K1J4vaCCN@p{!E^LR;|C&6e2ps*L9;Yc4`W1^ +ze;WH<%<*qk2x}T|VBZN2?1MtKuZcSw4pAJxlNYA5NzDv?DDyO*lYDsANk{u~&}Oud +zvrcx3*x^xB@{0x3_)kFuIsGj4xlu4X+VE!xF7{8+F^%BlY3>AD^(@sW@WC>q?M!0_ +zrow+-du;5{(r(2bN8FAiGVHYdE^&nB!9UmiFUmE)4nq+&ud&~{#v)Zvn)BwCq64$& +ze#5WtumZ)#gkATR>{{Jiu;^%tYsGzW!x!**|I0X;jPcwuTf};#Z!>8Ke!aTh#Z^FqRw5b!)sm(j +zKdmhz)}T8=l@6|PPpOp8k40IJ9Ip>k9gZtLTX3pftC&gqPUQTv=k;HFrFE7lP^bec +zPb^oEr +z+$VJUsl?|;lPecD`pLnprhxnLkMExMQ~q9SI8ewpF`16hb$xQ&&`AEfiuOMUJL&xM +ziCyzCcl6Qkm0VUE$k1 +zTiLee46I{tW)P{)erv0-8qX$_+~qIa&d@G$ZTmGaWa0D8tift9R*3>E^`pfsyMKO! +z#LytP5S3S5>NlgqmAp7UcFtP0I%z*I_I>QWS+rr9No6G2Yc2PH8N;KZ8!E +z@}KXc>3uc*CGw=Yk9wD@3O+_8w||##YuiBtP4Ash5((ogAqq<1TCQW4@tQM%eD!3n +zDFa6XtIu2lreCXvoc5m~lkU~L6*=Dsmrv{4OOuqP# +z>D}fp9}7SW!fjA!M_R)Ccj(vjKP0}U&)1z9ykWN +zTi}4t)PhV$)8KCz0#>hlb?8?6Hc!_=AdX*ZBmB~{(}Twuo?tlUk;32-@ESkN7cv6k +zKkOj2?`eo8n{bokh6j9K?t4GuIXd9LGJjH~HPS}T_sB}kZg@Cd?5p3@(Y%`l`V_n= +z5lpZ#j$ZOZ`u_Dx1Fo=sT{6?h=+OqetB;~^aLk?*lN(moF(Js66NI+GW0t+GE|sgo +zmT#{MdeQJ+54&kEw({})Cr?i6nlWOR@2VB +zOCj-I3<2s?oKLc~Mq@n9cd^~@O`NX-z$alD@RmXDZG_kbvqbQL=L9xQ%+ISG!FBV! +zlZ(&mxu4-yvp^ioF8c=6ZaI-#sMu3B!I}Q=knLs&kZ#X#*Q#2fSd8BSZH7T~TDT>E +zV2fzbGfzQQzm22guh02^1ENtP&tEyCbBUNJOo{E9oz$4DAsS^{G$o%_CjT}tTcxiR +z|JDkjiz_VY=s8@#l{-ps>~JIQNN(NugV}DPMc4475l$Rw4J&^0{cYBlB(u~Ly!D1Qs;rEv#vNG +zL;s%sxu^2VISf!>L46X7KyUDswKqt|@+TeA`3mXPuxnIn^U9spesdd=YO)DBsE{n} +zy;Xan|NCcs_3tEHMMW~1KtMfQEB{N51{Z;ZWHYzrJofAd*S^6dE#LJN4B9T~^SwKp +zkcTLdo~t*`neA=&(ZiWirJ2E&ZadVD@;n~GXfJmG7wciHh2WEh!wH>RKR_H}u;<0= +zV%h3#(`@V;1{trv*PzJ8ypJ?iwRTG{z-yYSbgPK}S?j%oLk+;5o7CiQkN|}c+}Llu +zt+ll1I- +z`8`{XK@?Z|?HT;8tMSL!goOLkceg-vGihSgt^WEw8k$c>Ee8l-(a_Sa61d>)A`p`x +z99rW3!1dHy0+o@~XhTNq6cJFMg!p8s5J?Ec|ojB2}Ou-IweqECE7w6uzdM#Lhil +z6{PDSTt9DE-MG0g0-bJGlM20_us29WJAZyq7yMDDziKR&K#|nbkr6kK?G2>$t$aj) +z`(Q2La`|^Zw*n$!^QxSxD=kOK`Xv>t(=uHXW1_6A9C((x_MPR)F+xA~_vd#8iSE+3 +z3ypvSu~Ca*sNs9J7v9RvPEXH!RPzy437<>0(oTbUFXkZd`MJa0ZWD>n*jk#`_SJ-G +z@70U7QE#DfNxEA2N!A|fA}*v&31(WW>JfKaV^Z_BeON_)zvel3csIk3S{*(Z7A1Up +z;J)^CBKs5TNWOY6ak2+ApDF5t+lA?oR_?4?O)Plk5dQ7MHp$c78R2ENkYP6jE`(>o +zalmZqtc^kO8MC0kD0uJ}P)UC}`H{tLj95|0SSM@;UI(V(8w@OTi7^NL5I{7sdO`!C +zwmyjh0YS>UiH2^4`}yvmYqSMDkQs5uX4QzPC(ExhmhkPC?DbXl69HI1B!WvJ*6)N62+?p}X? +zEOS5Yh^51Hc)QJGMzZV8)`&>|Uvy)QoiZT}L@U!++}qnbjV$Aol4xVsyRi;4CtJ)t +zyBj^=!5|APA_3JKB~-+NLR|+7!o0Rc;`BbQXvChdO4#rP4Xy&WDu{RM40iB@>UK{j +z%mftcOWIAC7GVfcA{WPcr>k8^PyAtRF1g&r=oFguL=$*rQSNE?b#l_a&?1KQI +zwwgrQ;aI9~e!o$;QiZs-`I3`UQDRJ9)3qdPGB37=j33gXmh491ajnJe;nXqB%VN06 +ztoK970~ibE6>QpD{~R%>Qh0nf|5L!~)6#sCDR%vLW~kB<$TB(TTkcZExD%D}TX?mD +z&U|(DpT=-)kmp<=W2i4BlX_NMKmCW2%45JFze~>n5xdpEtOrU@XMJ$${$O)oo$=6Q +zzBp3cnc|o}gKitjl)j_*9Q29OooE5QMLAuo(SB1bSJRFG70?B9)p~;i;Sk->VweEf +zN1$%Dpmd{;mA%xu@z&%Gq{Sqs>Y+1rSKRtoOWS3o&XZ88T--gC&G=^uDCI&&k^*NYv +z4z%{Y(d~>PO99a|o>ftNF`66xjEH|x@=J3QCUeE*;V?_}m-&V-GNLjzD~YuRJssCC +z6S75?=3aI;Zo%RgS!1Ql$qKzKSjJ~j&^co@s+odOb$ +z>2^M8eQ^SZkDCEtQ&7(XptR(evQm0%H%B1UxVHJa+}xa_`zkongP}|QGKc-j^YP_1 +zw@jrYCN8cNfZI+YQCg#q>?0{n;a;X`hi0CViz9(f>+uotM;1^I(u}>PViko^ms|Nc +zY?FNT;-W@B5cn{!d_8KYW9 +z(D{t!F!O$X!gcbZ17TK0T|DKbpj`CX6+cZ856x+Ah6`}~dF2Be`E+rxu}Pw7K3| +z17Q@bh+)y|KvtU_3KQJ`; +z+3T~AY?DwA*~=U;#=%{&h;&k^LK=74MO}M4Uv(vbAo)7<`Xlf02IhWr(_S#Z68!cZ +z+qxb=v8@=2`AQ*CHG~DtgzWKq`kYiEH}Uu6bKgn4vuS1N*(ZSC{m^J|{Umk{$jQM9 +zHR^4}827k)2`v_O@pHQV<+X77A@&x~;C!fZxbv$TUdI<{130K!vQXy9?X*B-Q@F|9 +zX530Lf+>{G6!t5?Y;*|;-jM`IWr=&*YPK$4Q6Q8GcS5s{1HRu+za1hQPh#5f#MVv5 +zV?{q+;)Hbw`#d5(UsbR_R&uW7d^S09_PpHBPy=bQ-K;y-REcc&+&?&vaV<#uG{PX( +zD&&CL$da(0jei=97IyvY6>p4#F1)VH-O_K~W)dl*HW3|xg?&XYx~vwIPM2^SEY=Ll +zk(M=e>dt2B2`eD{(}hx)xdcId#O*`)(4L5%rIVWZgPL*sZ;!DaXhnVG3b}l8A}@!E +z#ScxL8uolEm;LB4LQcG7-o1;0ugBR52?1-`A{j@2M~0X8o*uK^xbB^P5r#oA?IKD$ +zP+-A&H%PkAg|_%Q#GXLu`=ByhyK5O!T3Fak&6fD(;I<9yE8sGa%hy}B=vkVV=f7nX +zR6mnT$>SSPlg3zT*4^QLcsWOaV8@-|ka7oWRV6L(QpTmeeaHXzIkRsP +z3`IIE?)aK&SWqH^jBE0<>q94za!-oV^k0QSy^yf|X1re%Yo_(N+r^(g-KFVi-Q!xe +zAZ+ff?pr+1{!Y!I$(Q>5drgsdqs!_!mEAd(Db+jvTdky)N9^k~?#B(sD6uAcQm|f! +z_RoTDb)9O1rVu}y-W#5c9|)dl+f#w@VN#=hp}EG;<)<33-_tnK*;CVR%G?YO39G38 +z!vb`?jBond)Uh5Dep_)pmBQrEWA-%Qwl_69_rPC;O1!F?X;e32Ju@7ttv2MoCbFPTm`Ijy4TS2Qug4A#ZO +zl|-y*8egC794ELtpY=Dt=N8W=7g^?yx%N{nETr_-eQjSjOp>p;$-BTIs^In^!`sZl +z2j`-V&vT7mX3C3r`Q0aKSrN1oOR3)Tsfx?xKKdQ$VYz=C2qO&o7DC64-6Kc#PnnN! +zwx(w@Vg23als=i`O(YtI>{P%y!sXxM4wq%jb5@LlpMuL)*l!R?G%UST +z9nn(8uJQw7;){ud*RGD=JyH9smq>RG1lhcb`aaHy{f=}4H;3}?xW$ +z1#-oN(&3G&OB=k8rrz%#9G@MecxM$>oN`hHTk+U9V9mE;c%~`!j~@4r*Skb+C^ufv +zMr&(717d?J+c@ZK@luDb!Eh(HV30GpwU7hR5Q?z@-sPcD7hQoY1_Y28?acmyV8dZj +z`|&~IS3^Ss=@41_`8QQ}Xrc-RNtIxOiqu_J +zzI7dOu8vk^mxAo9g%55m98i+3;q>zXgw~VsHl>6q2K_Lr>}swgDh83s7bCE)mYyQ6-vh`a7DLmGGsV +zQ1o_V#83X%eEVWHPEK@EY|QDq0TtW~ef0b;aSnTSiFvKuO1^2yq8K4si-7z-tgpwX +zVS;8BG3-z-n#@*wm4(cfRBAu<(BRu7{)(?XbPpF9BD!N$3=!(|&AHm2Fbo9~Hfxjq +zjWNzq8H!n$J79w;+)B{9lw=QHrux3wLDua7!2M1_E9nb@!3T^m<0`@FD4s~45G7K< +zR14Hyd(yQHg};7->R)`U@-x@R5sfzpX=a-6rx%kT4Ldy_-R0B}XuZ=#ps5)ETr)X2 +zc`_WE_SzZEIV#A;+nZ87jmKvB`(p_erBp23y>#`o^m)S!7xHH`Oln4|oZ|}$zd9QKRt@wQB^&pYnYq*qq6&st;=!{MTkZ&a^ +zE_Qn#7BOS3GN(0b$JI +zIm%LldbL`0>(})g#iHV38;WdDqs2j8B9$$|cUY3ejTXI`=hVEum6?XYAS=ViS`zn_~ +z&?Rxta8|Q4mO`lFXpmiz=mo(OhlE5HsNoj@YPm9%k>TNafXwINwOBE*?lm2ckBAy!7tM+Xpq4S*=1cFK?GaTFHKN@gp+%6 +z%TaHLZg%k@Jh70_Dg8Vovn=f7#6+49?do=gpI;&*V@T!>N3@%$>>+7zr%W~NVm?o| +zs~?eO87{;6H|^;~R_Bgp1{zW8P%(xC9}IM<7`rzFMv*{?m`+p$Z8Ou;I8S&Bg6s!s +zyIX_dwUdEU9lhh|rvZ$%@%ApMUgk+xY^u)koan?3EUOP!1IP{h-9wUh2U9^htZ9l- +z9^QvZzAQOXJ{LTU2SiBFg9DK4mdX!u>)lTmY|z}xb( +z+(a~=yP>tQg!ym=^^~EBPhr^>329(;F7NQA1%MMZnzSJNre9r{UL^68+oc+>Xwor{ +zO~+%kZkx7dpugv<| +z+VG!0Gt2GPnO^*`sj2ZONo4B8x9GX6=>`S{H^Fj2Eh;`QEt*k#Ys*7vuYYRLnA?;h +zx#8K)2kUZ#yeug`rA+A~NL?|wtia1Jz|K!jl8N~Vtf;om3rOjgtgU9A4~w^9S-(&< +zG1JR$X3jmfW@Kd4d*sRklvvL^k-I)jk*p^BWgCZA#OSW)U0p}@g-P&?DWa!E9(Hx- +z +zSSVS;+44INbI`?QNaPIZ2>Rz;nK~0v<_gTjt!X3JNgT~p5>ot8u&+H?ZL49OFMwa} +z?N$9*gYYK?gw@={y}VZiZ~SQdK{S1)h5@*<}bY&tebZi>kYm8<&%?zO@EsXUp_73 +zbM&EsFJm#keziC%^GlKJQ4PYu`#^9Vy|KjLPoE*vHO+_X*Y&kR?R*hkj11{HWz0lS +zwy@|~Xx%EG34{RwHQDCRG!6~pb890p!6x>dY%s7XA1TTfW4`je +zbybt#smew~L^Ka&Pd@h7-L80FeTcMNtT`lU*7oM#V&`|u&w0&y(FB%ZfXh5FwNGb} +zd%MBAGvFnPdICfD#1wWk%bhl8*)jL*b79v_kK5KL*ev@DfVSOHQ@*NBT}<3rVVldv +zVvUvOpt9XG_sXRJzZnf>VZqG6bd>RDVOw$f%F|CVw~=)%7vX^mSk^I8N8nxb|4iFz +zS)jt7NR{hPO9i#FF6vE;y7YmF6W +zAq~IK+n|L9Z7Xxf02ynJV2&bTpWRH4>{s;0)GIvFg{Z%-0;g_BmR43kIVMWQMJ>+# +zT2uPU#DX?&zL~NsAKQ5NPu?Gw-GfI_2js>LQg~3%1<{pBg~8~0_zGV6?^DZ3$>1kJ +zbkfMWKiQky!JBx`?}Wu_xk~PKJsL%Y +z1~ui|Gg}lS${>jWt7z<9^Jwi3%*v%U$dR+NC?EJSrBzuQ245cbqoQ2wHS?H~4Wg0; +zyETy;2mfoWu(6O~_q1Xe+!Iy|LMk+Z5?cUmL|$yjuKJNaoFCK#E1gs}K36Lm!?{ku +zY2gQg*=&*Xg%}(0PQOB +zo?MpGmI}^lBSvz)jlj5*zt09)$m{k?e?pxgq=!NQZzk9tP1h{R%PJ)m9>xw>0fd@1 +zNM@#{KOgZpK9|1Us($grtAPBoNq%4SOaZ6O8aU(a2gnBmP}VbMr)Vu#waewm +z%$Pm9;D|KlHqe|S;S~3v<_1>N`o{gUxL2a8WTKXz-i-Wz&yYS^d-$fQ0SRt7P5VZg +z-lHa1?_dKfrOS)f(lH$Chm{9u(I@gbEbkS4kfulMKNd*aYQ*z%VXIJjKFjl+`_6De +zGSOpztCcyQ;){#>^XGd>zVyv#PBJ$P??3}9Yo><~8&lJgO`e1co-?J2nSo|1WY+5i +z&(ikxEMRpwdjaDqkvTn)nv +z_IG7Shikv|(Oj8sG1Bam2o2>6L~p|a83pCVrE7fNy$~cO20M`S*wNVTz(SMtUVS(8 +zHs%z>^s=0gNWb#&oFot&@OuXPzBbeYaOR;R_I~r2W-3+gL>-Nv%&de1farjSkAL`T +z5PG}9VS#hwcm`yUjv>v8S4dx+2H&f9vBFOT2h8DN&x;{Ve9N~)(-W`d5;nnI!jRY4#*pkBj%4LZX86kXb5*i*B78TAo(#pB{Z4nh;s%W*&} +zkP}qcv81TG6}mZkQLl)Vp1)^DvcSzCu4w!a25ktro6g&_CYe6}HOo^vCy9q#b$Z%o +zE)K}SS%qOLPQUOAIL?eENjsCHEQc!jZbEB>GjXi+^JI9Ui$2B)GRvF5TxDeIIB>00 +zKS)vX`%Z;Q5if`Q9@^_ZK_2B@G&MD?{xdS-Nb*u~5B^ii$;oh5>HX-vPPoJDCxuiQ +zSdCh@gk{~)-Q#Lszz)U2BjXq#ENiOp?3bY& +zF!CoOOAJ0&b?xrvQO)l?dS6?q$*`+n$P6)C?^OBUb^ZiSm#66&12a@kYfVg+nqUCZ``IzRWzJW}^ej&5$A_Hhs#5=MFj +zdXK8*k^5>Rtv%PnvX#{R%f1z9zkf9nzlPrKr#3KjZk^M9=irk~KKPoV{ch1k}jfp8?>nVFd6GA!D{!yVmK +z#A+2jZbz=DDhhq;dcCP(V7tTm__In1%X?;Jndu&Uz4P^$`%ZuuO{R+6IMn0 +zzHV<#oj74Y%>i6hmF$Z#A7WtTEBG-0ggrRC>FNt)t;(OPf0`w~Uc~toWo407R5<>4 +z=>%!}zG9PbSthR=_C&h>(UcvbSJi)z=TR(BXM5B0v~@n7ABz&f)?sHCI(WT)DpIX_ +z9+4V1Nc}NIJ@q<#nxz|XTn(hJJwyI1tx_Gd(yvhFl +z;yaz{drLJn+$*ylhoXMX9>?hj_gUW6Jyk%ucABGfAEvq#}Ju*fUpaFTc +zi*E9F30S0LT%SJqy8zs(Ez29!_My4ughENsR_w*6Ur~gkhu4QQ{95Sg=$f`Y%UISd +zp*+bK7bllfZK^v*ESipeU|+jDt2OT{`Enz#b)UOUg-7#+YTe{(p|>GM(Q&Y{{M +zqY6h@Pn@nkYP~1Y6BiHqg8gnzn+hIuoCv~g_8gtrSO5Wj}2HBv2u$$ +zSh-L#Y0NY`spcN<7jmY2tE+#t%A*w$APwV%g50PvRM-V%Bh#z4ZUJ*=2=}q#Cw9J9 +zq~$L#q-M{?5N`MZc^>%!pQp~rA*1feWP{4AuZZ)Z256CzD=6n79PbP-cYaSJ-N!<) +z?t^&Le#kpsN0@X7mc}^l3Z9G%?4V +zXxQ!C0tj@+P5}nUI^f;cWx#6)8c+fEUNE`tq1ri`Xs_E8GslCm^?%8CG8*~PV_|gd +zcZRVKF^&7F0DtTs)vMgQYE}KrAy9OP;jM4&yyQ+sV7=Xvgx2#vita +z0H!@qiBzlI;_qPQr;Niy@4+OhX3MCI#*H|@&C2>&UYlEy*}|>?Z}wN9o<$_kq{k%Z +zmoQcZLqqd6Do##y(=E_C8Gt^Z6%YW^^2IWl>uEM0h%+#g)FwrQVm2G+C3P3uHBGaB +zmv7IVgO`^{$m-C9ms*@kuc1KADTWkjJrFR%iC7uBV70FkX>W4*%$Tc%ubh1L*Ie(% +za#2l8%k9@|pl5x59fa!Z#<|a~B{)UO8$!qBBIr}+C$g6y +z*2ZnK{6XE?qn?O6TGUIa4gm(`&bL~tE)W$JsAzUQe-yL90z~@*Iz>!=9#%79HBY3{ +zeS>KTVS=n^j$T<#1;UX3`=%Dpcy}|7&ELw8_;*g3S{vgxcF3Omg5ncn3PnkWYq)(`^p7w)%d+I0ZH$MYcOyJbm$qAS3RGf +z$`kX>8NP?0yVspI>6c8j2o>+WFb@xpYSCDzz{;)9(m8Qd(a}O +zUAqIMlmPfU;L;4(WkpyhJI|-(*z$9^hnuxW@9tlp&Ps)SJZDM;+|nOC+ajUZKx+pp +zV}1Dh82wrM;zl4G{`B@&LhBT(MX1Fp&P@#>(6pRVB-MGQ2@C>H?_JEQDk|%(Q8O|N +znyf{g(%4GfbMP!oF2HnF0-hWecwIM;B7c$>`C5jc56G7Lf3xrQJO(!vsrFBL=3&^1 +zZnRX%s%Pm-pTGDSukveO9;p0Uq8anU_1@tlf5&M9HUtFzrnkp@AS1kNuh`v`zykg> +zB$VTQ0PDBiosV|3dBnT!WP5k+{BpA(C(zOI(QrNs%HqK09nwcq6~Ghf12BA4pzkzI +z?OJvT4&m~cfIavuTE7(<=R+V3Jtp9;_1|4bxzYj3Qh>u$~KknOB9$^aVLpD%vccLHMY +znW9+*qw|*p|9IVyWr>Q7Etx8mk^{`af+V~}cTOXU<@);_&yZ3j58&Nla5ouQnyAjF +znioHm2%Gc~*-c*|YR-7HV^=h;4;Xj7HeLvo7YBiOzIKgg@zIi($NCEzGdw6>O--!|kXAkJXHYE(rp6x(nI)c^vg@lP0a?{tq!G?y2Q3tAKXlFFbCJvq`@*%=H*#GUiM#^J_H3Z +z+aYplXTF=b!$3!WvGNzzt-|bbt6d3u3Z_ENoC+i5MQ)@5z-xMU2ZeEK5buY}r6>Xx +z)Ni}^7!);mBH}A31|H1ueY9u=>e6YCH$Z1XmggxNi%_QLQMA~% +ze@XI6f$=$lA01g9y3NncB|uMJ$KyvRR#%7hLDJjZqpPM<&HnmRe +z_WOhs@i26K&xJrQd3#M*v!{8M7`ybYJZkKMfs1P8$WsZW;tvG-JN67 +z`}YVw9u&SU)SD>=YsWZ1dw?s>RoAwc5Qx=+E1v|wVI~y1-%ylAMZHA2EMaI41aShC +zn3{o;=H5;F{*kuB6jZP?3`|9J$^2tC(rMoSTAFL{fulOiWC`Gin6cpv=j$hgNh1Y{ +z!04ph@uRzsrhtH3Mu4`omeyiPVBrH|2W^jG{WlfwGvOG(kg8+uKEA^V;w+lxs{gv( +zVOzS;Du;xeOV;an=nyr$1L6(U^z`%~@PWYIN9CoT6XlHspB(qcv(XhwoouNzRn*m6 +zPIn@DZ(=u9-fsvJ!b#E!HM?0czN%yF#**Y0i5V_dQiwZQd+jS*F(>ZnHC5x5th@L%+ +zj(-1#8-hU>yK+O`sSJCd0~8yc!^6YN0CBJF{AsMX+kau~zQ2SofaG8;6-axs?D?7A +zyN3l@0axUPdN~7jlCpjmvQc_n>JIC~dpu9bJp^C(0TwUeLxy3bL +zU~mRxBt+FpONxp%45#B)G}~?<3wmA(A?m!x$44$+Yz>LL{}Dl&L4%;HqZ+jeb_CHb +zc+qh5tu3YUyLW(nV~dN6KRVh`f1)-IQzQWDYAZ_yw{wk&nkw2=XPE^7&5OKDl(V0; +zC@v5l@&8mD;fZ2RCT@HL+`Eq)*%xyqP6}EZ;20m@XuULj;Mj7CY^u-V9Lp0Q5*EgD +z6(tn4#wz$$W%DDxvbO}tJ@TA@8+&d+$)oKtBk%ugot>}YdQK~Cz%AwF+J0^lUMpsm +z(}xN$p18D>#qYBLhYw>O!V~3C784%s&Z%`}DX?X0A)3+&+&{qeT605fT0unq{Cg;Y +z{)NQ?z~f;iNB&jW=H}*}^gMP`knHWQ`v@!whouEv!-G=5;&Ng`K-`BBJU0d4jIkG{ +zbuHQ}X@FPCO5D1-ADlzXiQaH+n~H>yyyQH!M&0mahc@E=v$m$Pa)7T~%HOpP(#!#8 +z=4sQyl9Itrg7$0O%iqQZWZq=974(sU6-+|im4nt$3?N1y5NV#t%E`5Dv!e>HF*n+- +z^1B%uZ)47_F(a-WLfHb2j*g6&-P?)?e_({crFg$w{`J|NvW_UAmwk&3FBnF$YNy;t +zF>bUG0LJUg3LG92#R=gRJv{hwnfDr(tmCetP*yBoejN<6?S>FkI6pS&;SJOk&^P)H +z(dkxiA)=Z>|82>y`+!G|qgq77RQ&3`8A7-M2&O?bYVfCEH%(+2Q_0Rq?l +zW9SR;_a#>IuvsT<)TK0M9^ax#fGG>P(Iz4$hPJZfV{SsiX7UsFP~OD+dF!(9dFM)3 +z6b01vpJ{afPoivYUKSPS7Sg?0r-y_|QDxtvvj9jAIP?$5^vp+YDwHD@$eI~khW-Mg +z`MbNq)e-e&?nk#CxPKl6Xr9W0Z4Dn@!U4&eX&O{de_<6{{6+5x<79~%}RigE*-1O_8KDUQ|k^-F`){q_F) +z?>{_`23iCDz<}TQns$15e-q6%S;bTS*9iePi5C|aPfd2~v0;^H?ydd*KTeo=VnFKZ +z>O6`+4)ANC97IoRMpS@%A_|&Yf|eRb_G-&Vn03x6fDwoSjZFdIRB5geZbmNeEMvXp +zjqrSBT{&QZ(MfR0o~ay#c49(8odSkX6yT|23Aj7ymeXz8C^Gw65&!w;ogg>`M>*L0 +zLFxQdU1m16xgHMZ2t~?DKu^_n6O;#^^L0W3Atj%EC+=d7)?R``>;isWm}7g|#3)w(afFJ}aEBov +z0;3MFpPL7|pXV*i3y@j1sDN~JJLDCSKD+lYG&ba&6rs15SA(>)Ag2T+I36WnmEAt* +z+$aCWpl;x#@tLS}=7CZ$2-4n8hN+A6p3L$iu(c2BL%4qf*W<>%k$s!pk{mFY|HjzY +z_5g&MHLVU$E=JFj&z@zwBhSThge)zhsO=^2%O>VBF!zLj*Mh64s`9=BfM2W)AH371 +z#95rHoA137=Im5~#v5?~kHpvm=l@gFmB&N9e*cdxvW&5W%63WI7-_MSWk^)EkY>Wr +zBKuZhtf`QcaJiPySZZv87zPo_8sTD^J2UnsTZ3pK>-U-ad;R9m=ks}<&u7l(JZE{I +z^PE$9Q^#9!il2S=%*fl#(F$M+S7nFpd^luo9p1?bs(JCpZr0~FRITVv98Y7rN-lNiQIXmQP_XtxKT1g|zRfxhuzQEVEXE0=dBSBVc;_5A-tdn$=xuZ$DF# +zvntt*MOJ=dG<_DtWoG>CqGNAohkKYYfAJpi)9bm}7sMB-sqKfpm-010fTHo;ChGe7 +z5$hn(+~Yk>u6;wkgmdaj7tvVz#I+%9asGV$xj6AXVs`%-x_?*v>1qYG#SeOYph2hE +z18?m~j;7#90sTgNK2=LR&oG4gy4ley5q+kkqe-UbyU_e2X@9YdeDt|?SW#P>H;{6I +z)^P3#1JMx*<$}|}hJ3{qS7802FH3nhin@W)Jme`e{SHS1k#?s4L|HmxK8+soDTBr` +zJDZglECaHB$5sTKEu>_9pbyR1HFyGT19VYdWU0Bnqufm)WPBT?`lc#dSv|Gr>grs; +zXj1iaVn~fOH9us^ClfyROlyr#19+x;*&~VD`M?RlPRIE=9Kz!nj+&a9A9=Oe?T>HP +zb4hPXxvR>w!bL^j7R`&_{`YNMTx6@9I^pILMmJtwu0>xkt~cfhLm| +z!?kxO_xC~<=D^vHvmjOVgPV$+Sq(7nGkbb2e0r(k!>3h}3h+jbg +zy(4|RniStuGHFIDupkH|gzr86!m8JTh9v70AgQalV<;OdQq){ts9-bh`}}Z`J)Q^@ +zNtH`8*$U@$gh2-Cz@uEQa=TYq?#lJ+gTlk{5R_-qDbW_IS5C&-cU01tnR`{KT7NMo +zTG0Gofyhb$ngmOPWfngcerlc9WTYEj%p&a2#!&0bN8GWoNH8}`uab0H+-*L8A$}H-@w4=iGm_iju9PxfmhW| +zxIx}`XUUB|Ffg$1-^HZ^$?LGcfEoPJtu-%Gm$!a2EoyVN +z4&ca)V3&l3{QXWw8Ftu;$!EYb!!)W2p%oQQ41E4PQSnfTBi4kkP-UiuQv))AZwiJH +zFkRf0nhBr6)?4;#xjx2nwaR2`X;KTCGRH==ph>ifWkje-eD^0a5(o)z*@(F +z;8EKhUy1%w?a|4eZyDgAG|gev5*Ls1E_@ +zhJZG?D)}HiWj<3ikwZL9sHH!w#m)aT16gUEU7ejRY!SL0$0JZY(#JWT--gRBuLJp< +zKET4-Rt0rr*2Fk=Igi(e{ZLema19ywhc|ksx>P+a!n<}dI=xT*I)IU4)xjGN>6*Z` +zc*{{2&6dpVE(Powv6H4|XJ@|vRka*xs$+PFrL)$!;iU3u)8gTJ9a(R+g4O7Sltr7_H*~jR^(bSnas~AfcjS +zN0YO1Ulx><^=!H)u+JPUz;>-x-rV2ZIYVunMK%mb{9en^ta=qJ47mWs@#;#Pe6mvE +zOZj^FmlEcuhG~;8Gc&b}&Yj!K&&kQTMkEr0EG;cr0MM=f_%R@c-(Qmi;Ky~N{ojMt +zcs$;!dAzmi-MdJkE-jwVNMXasuUkitKYw?RdI?D=@Er9A=Pq1S5TzC6<+%YYB;fHt +zwv~BdxttO3>qZ=@$tbaK{%S_@q^Pn2Y~rQ2n3$;O)_F5C3cr9rqv+CK-D!t#r;kcs +zw_G7t(v0?JTX{R4k<(Tel;uVy2JdULc|OF+!61pu1@RpNyHG}y+g(Nk!N1mLZ*KRo +z*YyDLW^a)Zpxw3oDI3{?Oxj744awAPh6x~rqU_cCZ$Vt=R9PJa7i8PsMW$|u-_&?$ +z>*o+s_@tMwZ@%HliebVqTdKt%f=AHm!&OLKi25xvQ{0@h)5c@f}j@zef?jSWp8iThJ +z7S2RV-5(jCN>fT4JU622bJbm1k5LtwF` +zrL0y*?~1Af^=K><(vFIBwbUH?%)c-PGOA1Dbo*UZ5G74bDA7l7Q0Y4@&X;F@A}Q&9 +zfI=Jsx&uH1W8{fvEN=wZ#r-fMUw-&Svn#U^8`(5lH>0xB(wYDl7nj^9I_Ob9hCq01 +zP^qXHVgHltWJ18PFr=^kssR@?27&{3%t~rpE2ET>W}FSa4P`<$43Z;Qvy2CFJiJ#C +z1SzH|%j0FdEteTQ5TwC&G&6d0JGFf7PJ+lJBFqzl91_3|X}x83*12f=d3NC|iR8cj +zZ)yAmU_$iNIuu2GUiW#r8urkNjrg-4iucKm-#9vkvK0f${gobj(Vj6B(IO(;GzNjU +z9Qd~%c+y~$)&FJFwMdA=LFs)K7i230sEURAwtEkG3P5GRA_djr$5gq|wloKN5M(RH +zzFc?Yhf=#4_Yef=gy#ozGJF$M)$ReRL8Ut +zkur6sg$IowTedILXJ+?xaSSW8tUm!efbi>j#0~Ljfi;B^3$q9&q|W8LL-v9q40$@I +zvBrP2hc$TDEA@XkE;~e#I>#aGcMyWB0q#GNMiX7W8pK2S$c38iW4?)#4`fNKC_u)> +z2Z7Fd&Uxn&0ppkXeu0;UZA1yKBG%SduVk!Pl|+VFKHr(tt&BFTag5odI3?t*t_+Hq +z50`eJIE?4mThSE$sAf&uOS1zpIO1}Akj(Sqj<_!uLsemBZvD2M_|w%yPG>Qc2y_>4 +zGQsq6hN^SBR#xH!ocj+O*eG5;-JiZ4 +zrhVhXu~;J8GfcK1IbKY@-V((*$Og4Z;bGGUFf#1jeJlY@v{Bh^k>{Il@9wS0oqxLVc2Qb!Pxa)q3=a#{I@VRk??Dmfw_ +z0&o}41GO&VLR3&g)v1W=_dsD)pjdc-yZiJwNbyKVp-@}+pd~D8k<|>^)rH|`HS&tNEd@d<0` +f%aZ5FpCY?4O-QvAoQ7^M1b&w0)@B4#Ox*tfQ=_$$ + +literal 9353 +zcmYLvRZtvU6XoFU5ZomY++lEs;O_3h0t~_3fk^U)}wGUwc03u9FO;6!p +z`VVeyZvPPs42*vS0RiE^)XdBb85#LMPC-HbA0Z+n`i~$Yp&%ilARr+AtJl)jMMgnG +zMEYOmfiE$hqlA0G4u;ywU(Ba)71?8#N)oQ_EgR5L23mOH!Xx +z$3@gJM!+KY_4Sq4{s6$8|2pZuvbO#DIr@Jq?}~o-#L4S$^!WJr`rG3TyUpF*T}({O +z?Ck9D=(vfA2`RTLfV*I3Zt3Xg2n?C|^y!nOrR9?^=jiy<`1tsbhM)g%dV0FNyfid4 +z^oHl7jjb(!H6FlIL`KcGwy_%$5^_K4T3%ibpmndUtp!k7A9dUB?(WXd&reNFrFx2G +zWPHud&24OKa(1*e(UE)l<>or{nw*-xu(aNrqYZeM5)~P>*KP~o%&Le|9x$c*u1RIc +zP0Ve(17HcCt2OWK?K1^ZzcUXliTg0(|vk1*mZG@R-#bEj73SKf|^Id{-N-;@!nm{v9~K +zA{`5G$;S(wfU~2cGhI`?AVi*rkIzkjxXs2&N>)u*j>X?UK$(SwhK8oFsA$B+3GgG( +z%FVgV$Uv8aJtZ}D+QSWy?i8b>2q^NbGdKNJYDB@N69Oa|F4Et3V;juZa~pa6(06B9 +zw=q^^VBdabkU!qu-N2ZzD;Wqjz^C_2e>_g4JD>eY>Xy^ivU*3zP +zd4`Z<-?*UTHImSky)`V%ijTUksI~BW(Ug8_;<8s<7Z~n(f$zMuzubbKpN?;rn_sT3 +z7AJuG)##Cp%YaJQNZH_?WqH&n9>?nR&aeOP +zp}y6C*jASxrS-&~F(pC)6_!d%uT28&?O$$dgxZ^vraxENk`)RLmioPWfsM$ummJIQ +z)6N73o6W-<14B;!hS(<))d$M6jef;d*hvwYXtCrLUkLhY{WZ&0p+T&#IlN`fiqUd# +z(lh1D20@4)>ij1KNc1YCnYiXJk{4tGJjgeu(88d6iDKg$S&oUz}SdU +zd-E+?;9EJFp*POBSii}1q6@HX@qG^0)?2QhiOEKo$%4G@x)Qd6fcbzSX^Pp0N-|&N +zMyC0GbR(>);4%Benw-A#^Kb(2L<7TmUmugvII;n9H)d{>%;zKpNgl#cQK7x5Fktxx +z^@|`B!Fmm!GF}A_!$ +ztdo^!_m0(a?BK)EclArm8Ac%;{_v&NN_hTaZLm{^1=j@!(EW+I?h77H4W87D@OQVN +z5^)VzUlKnwZuEuGFGeA{dP=c{d8=t3PC^&E(!tCnB4ImyD&F)X1NIDpE-&$ZtTCO) +zD*F&y(04=GSEphcH+d{|tsHqQpQZ}8@6v}_StU3a^yrvCViLXCD(IU6v8?yT8b^t} +z!pq0O?lbiF*Y^228^61quj2+lVE*FJY)dsUSrtQZ8ee8xj7lhJf31}OhJ4h1gGfQX +za)=|}7dFguC_QoZ_j9j=Rl+yEquOHl+mCgx@z3H}_!&Ie6XRK4X6IO0oR!UicyaWK4r%G02lG&1Gxl_X +zZi%0wr3gvGI2ARbQK5q#|mLnpkhT_I*C8vR0#unVQNIvx~Y+`F7 +zQwQIt{Rr{>>XY7|&Z_PhS1nZlHACN$gK?3TOzXHN0`=3~+B=!~G>La8Qd+!uErOKa +z^UO!bSptZ^f4uU!s8gBaX~Oz3Q?+w&O|FV4inFAQfrz6WqSeMw%P6MbBw1v|Li@`& +z^}CH-jSdy-XXY^e$-N* +zlS+~x>3r;ys{my$J+hlqMktSw-(GpeFtoO;9UXPTf*j_6+;bWB1`Izt%r|9X;UP>LA%7{?$#lPdfa1v +zuuJ)ks*-_!$wJd`>9uVq`54W1afxqrmby;TtG5qLs$lG1v#%FK_$z$ZUDQcD?Xd^G +zfVCgQMvo+jZPzHCPfr^B>^+-q;1cL{H&xhMOF1~wCqL#dMf+!w_SvuSSnvkk(I*xf +zrv=iBM^Hh?B>nnG68wBiKn>Q`Jc<*PSnZN$?PNhx(dFnnzhH;AKISQU-7+^3_vjX= +zfohS%ittSf#SaOB&#@``&I+Kx)kJa0@8$VbIHW}7IN(b$EfPIDW1hiTD$gZ`B|L=m +z(2pu#1jI6L5dbqaNJI&emd#W>&GnXo2WLMdA(yPBM)U-6FXNm6;sa5E8X_aNGYw@W +z9)X`9mELY%l!rjH1tp#cE1FcJa8tx4ul!dnOY1FP$8OrHpoT|Is}LVjY{zexjFxE) +zFe$a&5CzLe=%1(SXiSH?QO}FeRtb-dd^E`3O)Q1uP|Z6kzJ!j +z=vE>WC=u*^TrmLIaJEee;d$GSzwOS0&Wu2HqOs9c>hbicYY|rUaDKcYVe~u%i+PUB +z^$7C(cmna9LYgmkh)6p@3nOcGdGS>Wz3E)MdeHK&-vIV>9xj*4KvL!|7E4RLFgb|q +zXvsuYIv8uY)WOuxyDK)6Nd;0u$y`Qjw||EHSMN)YoBR13sS-#X3E9jbtxm6 +zj#E?Y2`f_`?&QSJcD9Hmh74B5UNmG(tKj|j2w(#@Fn>X*Jt~lHJ?IdjbN^cET5qOn+j0ekte?i*shc +z+ySi1>aWJ>6KiV=C{|ZN5eH2A9Q&9}Ar+npNw(Pv{~-Q6(x@L@`xU6Xa!G +z{6G@%MT?m!`!gi{EyV9)dr8uSY~?T90si$c@d>ZL|8fkpel>Ns=)+(!5l1_w&bb@< +zTbqs76RI|HPQo6Nl{ZK_wtjBDo;7!WPEMJJ0i{$>tGlio)>u)xmYUYyzf`}3svTZ> +zM~QoBdvRfiS_(3ft*x(~(4|goiEtnY;Cn!qBN#kWrF2TTNwg)>o1MI1a*~=4<-|NL +z=M`MV(DpSa7+sXvsIvI!fCJUiCoFTJ;*SC2g(hqeN-u>1p^`1`*_p~$H1i58FlDd} +z#FFAQer2%q`epQke4K?PQ#IcQllA_oAI4=##*v1*a@43jwe%#Ql#TN{GL>ac2()fD +zOG7?ky@VjJ^-S+{kYVaRM_%e(DM^Q9ItJ7|QN+WC8)06d_;q)xs=}4CB+lz+vqKVB +zA}yK&yB8B_iiHWYc$bbG-fKlRe&{3Cb5^WyrEFU`ZM8w!I2V&AREd^c0U1DTP2Dkf +zN!OW>P0FYO@h0gwBuwaijnjSynk9!XP2&{#N^VU#LrB9)*{?EMjbNCiAZWf6$U3D+ +z?6W^UBOyF|6}^)T#=Q*=#)oDn?wMJ`Z2HoI6b8Wqc&>RR<0c(OuxjBFaF+~NY+RWu +z@r?KOpoI<=aFOhld-)E9nzbG?1`yhVQJXwiOvLUoeXJ7z +z(~o4|V^mj;L3q~3p$6u}2NI~D&(cPov0ydYeWMXlf(kxx`>ChU%yEOtJnT9l=W~!^ +zF|_%`+zC0#*Q8`qw9(_IJE$=peQ`P9$phDEgbX3Yyn$Ff#+lM;BZ0=^BH@4$h=q0WewElMn6kROe-M1l5gm8Qz +zO~8~Vewd~cR-)WpP^?u!RXJK8g@OI$vdyJqQUP~qG4G4`DwuAFUm>IC4 +zUif3&giRXum#Ye*637Nt(>khgwSxX;YyJtnkO;@xuOX)3flan<$x5k#W38PXzS)>+ +zUtGhkX1~l$RZy?J_DBiWVZ^V?=zbr+YR|S`lsM3t9LoJ!TFAR_<2cUQ!`W}@`Vb#9 +zjnJAxrR%IJ74U`TtxvzO!w;PUmm+AAqyZ3guT!OsDJevYU68oJEMIS484=%PngS>- +z{O(vT*M>M<@4(h1f=kl_3$BxgUZ9Do88J=!W{mB>;RL@SPUL3I)-}qg(U1aqPh^r& +zBSonuS_r>Kp{RR$A +zbTdZlB+Quw6^1Jh9-ws*w@W;V`=8dU=kvRz|2b^h1Skf547=_O0GTS;r#zzt`1>2P +zxv3qT)28T6-br-%RGvEzzVDc!Qwb_XG8A*#{mH|xE#_T5Gk^jHV|*+ap}aa!Q;rBZ +zBW7gM4Jo+g~|2p)d(sRw(eP|U_$w*N9{cx +zE8HkpVW*R>sLnRuOc4_#d9N+4D8=5NE7wFcPVp_$0uW7|=8Q7Q*_&-CgAa^eQmR`RAVqousxl +z1|LwV{4rmPW3qJV^z=!o&z!$Zq7Gi=M$S99Tim?8(Z&v;6YMGR1!SmsJb=z%cA+Q` +zpubn_OCk%V&Bf7)a(e*1p$N874DCe9N#C@30KgZBlswGlM5%vpAV9}T{WI@HA4?$ivs=Mxyow(O;04BAy%wiXPb;+_q4vB#p+O_qlkn{YeCN?a7~lYf$i75f?R6EapjiM~;uW3Z>SkPnK0eHtu=XcKp2! +zNsz+0O6O&Bzfwiu#PWnfFpL;!4*`|Y1Xrni?@ +zP=Xs$*C^KX-?m`;8P1QOr~aOqui~<|7Cm>W(H?c9rTjY%70&yky?!Pmfa1yWzCg$? +z5t+vsN)_ER=Mfm=!wNPwJgPfDj$&|m`Byfq$c5hr$Dn64d>dTPqcU?ps$93eGzuuL +z(cj6MA1qyP%*hrgKCu7rEHA_RCQ_qHW8QuftRL+%#lhh}MSFY3T~@`4yLrC`*UWrQ +z(fRO94@bJ8QXodD;Z3&)`RR#z=hb4Z>f90&?wrZl_T`s8cPpA(W9;EDMEP|31K$^u +zK_@FYV>$*I7#VC)!d*k*ZtQe@W#X8WswuW7NaW7_;9H>PIZv1t)D**<+R(2^B^@iE +z^+@WepBA`22{S;cue?UULXWYKK3YCd?-LL$cC7jn#D5X5F%gd^c{L35b^3-e8}o)03V<^#+=Dg7TO0 +z3Ln+!SOYF;;)gLcd7L4XyUqJbakoZ0IX{p;DmG9Dn6d$L;ttp}u-(Clr4TM)BH34Lc(_a(nE_jBJ0^p2KUHI(8KzIMk!`4jKHTECA +zczT&g8rZN?dG{t`m`u7P#fs(-N~qmr#3jty>i@T&N>DUhOOksGK`rV +zqGIZOS>1D>dVXyxY?{j}fUDX6v%6z%{!wA$gV4aNyv&B$cBzc-`3{0aDbTyY3>jpm +zKBaKou(pGTjT9f7*0 +z#4U>@DOkZ}MK?(VT9|}CO>*gk(0_R8N32bh;UARmn-OR(OjAOTufR@T*6|D}nHm!wv|fae0%P?8#Qq65kr{iUI^;9n%0#?!a2!^|D|K>o +zWTzN7010ioZGxRU7LIdcq0uWsr~2&EY;&dwcla$U6(<*B5b&5jk_9Q?G!9G!G)hh1 +z15>jCu5p8>QSA?n`fy1p^{Wyvaw7`)I|m#P(vv2kh+GL%JTLqDhM%CIV6t|u*~Xz8 +zfR)Q!Ze9PU%*Y{NJ$S*DjeJJEDp;>0BO4;=2txZigc3zbQ7lRuWFn-X#aw{W397J8 +z$jA-~2v(Zcw#y*uBzQ=)?jl{`~}GzpRShp)27Fx+vy}rI{~%NAU*SGEpshO?-6lMwbbsn9koxGv^sf +z5QPXbNarV%2`&0tUTdi`Y>Rxrhod?(176#KejiKv)s!3a6&1ec-6@bn;0bZVB{k!4 +zXO|X&{DEm|-t9I#=qvFRh#@1fsZHLw_+b1jU>a&UVrzDoCf__QF+*Qx6P|5Lm(JKLvO +z#tcp=Z$t(Hf#J9d`J-k2Ir-5TFhazT7Pk;?OgLH%G1w)*Mg_zJJ$CMIydnr=3SdJJ +zZgcCK{2ca&s+$D^HgL-X9OW81iV4H1DDow^_*q5rR$c}V@GfL~34 +zmU%1YW{#SnDD)~48N%HW8|Zol4@(e%m1O<^Yi_ +zhU7%PaV~PHD<^{bd8or_F|(&|2^ux8@EnuQy%f7rFwg(!^MG#daXm~ +zHTS0IiC23(mlSyYlY}Yq$IS@n@aSCyI@^d=Xcp8rgt6zg#aI^V!+iMrac=jdtoJ06 +zMP2Gz*0EQyEb7NxN`(wJy>*LTi9Xr~6uG`r*RRCz@^Pd0*MFyoWk}ZSk!2lmW0aVp +zfA~Dbl>>`C=#E?WbPJJXUp?{41pl^>rc{qP&>lqP=pJhbU_@9@@+sp6-(2+spYo5} +zGEbuOSAPHcn5_rxyP4;};^lucFdqp6vl%yffq?#@g}I5QxfRSSxn65}W2LTnmjtxC +zbPAyE;>ad;{|_Ftx>deud~E!6gUxM%L3h6nzpw%oNZz<Q%SPL=CgfA`tD1gTZe7lTI}Y0&8*Gv?@c1gFcqXc=`R||Kl~RYA +z(9Wrz*8F9#wo`m?)xw|r<@Fu#gtRF%@mr@`PCc6(6!cNEYAI7RN+Y4z@kBZJe0Inl +zy?}! +zm&s@J=XGF$Myqizc0n=YFjuGcW7pWzeeI=ODCYCTghdq1N7Eaq*$m@=8v%k#+UuW& +zi^LVd>om*4p-boixGg6SrwhIszaNV;{m$slHChJRYCGySqF=^V>Eq5xbeeiT~=0owC}rcu&RZF<7bSm0axncVlkccx6g%6qvjC7`L{s +zwZc@XUYxD1fclE#`UHxJ-ZQq%SZQ(o?)Gb7p(f5z+ACqeO8gpH*QeH}xtPe6Nqh93 +zgQwyS-Q+o-}K{a;!mDeoyqZaB)xf*@zC^X*JQ1(!=`VaVXyB8l +zVbR10yhr>)1b85&xV`}`N~7DOSZdKOC9jeWG$<(NVd|Def)Ct+f!)xGiIT6tuLlUE +zl`((tguSeTXHfoZP7)QN7YTu0;dP;oq~Vhq6Ec2Kxy;uB34Gv4@%hWN>|H}ehZ>f} +z>eY0eS+_wxjGS-OuT0?4#)Z5`ufcV&#L;90T-nQ-VOwfwD*S1o+#OZ(nmDN +zYu1D$*lN|phwf{<{^-wrSkx*$*uURF%)1#4QIXQWFj0-$#ib7SXUN2jpAP-Dk`+aH +zt=1W6GN@;duf`oU(bIRH6Yt-O<70ZSMoG_*C1Z6(M(YV>(SX2#(&p+0%~i7Cx(#Ty^bGQ| +zqMU34T}9J~84J0t>FW{Dc&4wzh;EZZ-!w6s3@5%Sii`P8R58(LQwX=M9^XBSSDL)y +z04`YB5lcI(Pzkj=WL}Wv9dYlITZG&g;YSCrzKhz_a>-joE9>%HEMUq4=)ou(C)tSw)(rwQ;XklrM(nL;s_V!DR9WY(>vn)Vy>9>t|NNda2 +zlkBabjs~r9QrltNwqK2A7KDS4t)V=2iqTQyzF2sRirqzGj=B@21*fMZD!EnB^%|iC +zU6W{~^5|Z>rBOQ%qKCHuwyM>YUObIml==P9NUULkHFzYRE_|+&gMm@)Xo_HMGWz1Z +zW4QFD>JzPO;ZKl2PDR@IededWspXt*Q(-K!5GejVbQz(&Sh`D-)v^V(S>QvU`F8WkTQr2lh%^P02 +z@LkE-sTe;Zt+h`uV)b{PDgM(KW$csKw~46CQ@8gMDPdDVWtw$P*!|tCc@L_?+%|Kl +zn?6$pjN{YYIl$TDPnbmJg`vg$#J*yX#z;SAtetna)~HbQ9(3JpS0=5-e)Tl`hUw2s +z#AQD+iVmg>)|D$DRUb#1I9zgB+Jxp>mG+LAJu6k`X9(@w+uHTis&P(DhwIE|Nm@iV +z+H=_*h#tCG<8_MZ;;Vuf@>Iu&r(~~HXZAjQ%6;_@+Ve(DiC$0+#=iJQuP!3Ed6?Ev7TJ> +z+kKPEiJ5ifar)Sz-Okz%x#_nQ-OitQ4N;l|2gczxZVM`Z?#ppLLtNjC;+J;Sw`B0% +z9u_CWrsT;@+%NRVaBiQnz49nm{3+15S(y79L>?^5pSq-J)b06+N%pd-q6B4Gb-y>{ +zQ#lcrP=&OnWr8(6_y3(Q?Tu2nCG+?hJYj{gz7j`Za(gsY!>nzWWeur~4=vtZ1xvm~ +zXHNua$_ia!EXk0EDj9ea80Bq)dYwKTWDzYNe_)}F7=K0k?f1i9B=kpiRtX0O)oUov +pQ1?0kSo#0Edj6RyF7Sd74Tg=2=v0?l{^y@jQP7b8A!`-!e*lvM>Fxjk + +diff --git a/aops-web/src/assets/vertical-left.png b/aops-web/src/assets/vertical-left.png +new file mode 100644 +index 0000000000000000000000000000000000000000..23c557e30326b2662e53b20c710fa61cf9a3bd67 +GIT binary patch +literal 8457 +zcma)iWl$T;_jQ6h6eqZ}P>Q=22*FykxJz-TSa5<4EwUCXUc_%5($VhRLdcY$}yhVH513-xVOn-;?NssOo +zoNVBjwLOZNWOfcFtuA>dUoFV=AI;P95@K+m<)iQff=O&u0fX(76OStbFEoq*wNPp+ +z-Q;bFDsa)AvP~AiO!hL-8gLZYembjvz*uOYrN*P=V3&=-L_@<(5%(U4svuQG-zBVZ +zR3Mr6q*cb|BmENT^Xd??Zoq)J^oeD#R1_dm${&vfwv1`%hTw~)if6d=F;#fS_Xpec +z?^@qrBpCYySGoo#4bz=PU=K%=ZS*@H8nWfc`Xj%j-~nncpG8vI0_!dRz*q|joQm&g +zVBqTvzlGRRXq{_DnFOor=hxgZ*LbSKCehcF*e)MGoedt$#Z4I9YGF(L*JWqfMLYPO +zW1K1U#W>3{IDj0JV9R4y!F|pA+uY<`#l2Bt(n<>Yv@DLT#E#mqYUf-tO6VFx|Ek95Xw*O;5dua1ZYfNjfB+l{$|jGk{H=Hunf9=)SjtuK6ERrq2Gx><_?Xy0BS;}0%SmFJh~OGU=8y5*!pF;N +zG6UP$k%W2tsO!capNgK>v$tFL>BgQi>v~022`eO+gXe1~hEO~L!Sl`@$=e%$~U*BKnbv%Ez#q0XE +zv{}k8wrIu(o+Gt)amn}&EWP?GppT`kEf`@GhFTHCOEKUT8|uKyglUT|=C(dqkf(b; +zH`~}3({3pNLA^f^XJ1KiUoi0nuR +z&=Tu0?RLv>NdP|@oC|U7;lm6;;YX><6!$Am%kgR16@fMADi5*dEyR&=CopM61czFh +zaT0Y5`sc;b5qn!E_p`)@hc55>cTVmr@jNXr%wm2VzNj*l?Zc)9vaQL1c!mz~DVrF} +zv8U>|g7^1N{+(g**FiSb!mOlT+a()q0g0O&-#j2D6zumRj$!bELp2&mNzueGq8OFm +z2xzA}EyvFRQvonCE|FXsz=KK^ +zr!={Zhh!}WPhUz6DP1AnXN~jX1mk?kIsG@5D$=zPxZjB6=wAgbw}>77j&eEz-#K=# +zp#lG%FG>lO9XpS*{(dQ`c>PP&tC#54^NwX<|HPkDE_O-Fa!gTiz9W%Ki#H77^NSo8 +z^>>mCy=?_WK9U;G!Eg$q?%G+}5pDYuX8tbRUqWhiYP&X460;_!;E`YRv(aONv;lh4 +z+pyG`TWVjh^+V_6Ay>p;pj`Rg0{p +zuO1L(2B?sPwX-$4k(7uE29f>F(5p9i!XP&KS2fP~Cwve>z<#k$0YN)5Rbr_j*73_W +zeZ^Ff?DD6ov4|Z0lU(yJEJO;1S+DbO)I6Q5u$zU4SBd0f{8hmijKi-EzYRsu4#f3* +zD`C5CQq&MFRo{i!4`K>T7Rs&y0e`iSY7y!rvD-qt+s|gk4;KE;g)X*{zM1wzK?OHlRqa7HiWE==QAHMEX^|$pMqT!q1YRA{kn6~qx5#x}yB*33!_?AGrX@73UKA}KwA0OL-X +z4>?@wOW~ZQ)UIw6rUaas%6Z=Px;79NREptD>?TESZlK!!re(_IHU546~}&YM`m2R|z<`Q=%^oa;8#p{0C +z4SN1b<%fH3AU;hH6rg*&1!O<^(0t?e`RWntzNF*mRmP;yDW5;|b@tnY>(tbWVar8Cg=)V2XS>xUjjg@Tl~=MpEWMPh9L?P9OF~DiF29ZeWuY0g +z6c`2#c?w;TINIk-vH(c%$+XUl0!;twSKI4IrA%{rkkj+wxnRHE9sruq^li#r3EK2^ +z{vIpW-Jz1ArDfg+T4U-MLRa7Kc^%*Gtm1vitY@2mp^|NvPB?IOv0_PKDCoDt!ehYa +z@Ep^9Ok2OnvOpAgj$G>fLn2wfy@B$M*h)2np+{f?GhT7spyS`3o-4oBgd$j(0AO8Q +z2SPTA@DTsWwl#SQ`SXxj4)`g?r>72zXTogh2=wC6#U~-S0r{7sF1HeKjq9J{EySOf>7y7Hji;5kG%zE4AP~v;8dQy#}K$-rMyH0z~-vGgnFBb)%sMJ}T2X|H6z= +z6)w;g@8GqzGO`+gmO~foL9L~4NtSd<$C#JUd^0*Ysf^+H1rJVVGdE{^z_5%VVUotg +zCayR3*XEuVuXV)-$#EMPC6%snS1udc7aG7WucE%{VTARMG+}Ki=q#{RDOC3=^>Z>B +z@O`~|CHf&Nu+p4@ZMe7j>@?`QhgnSK1buD=-zk!kFR4Sm(Ab8BfxNQ3-J*g}-QRXa +zSk&w3&O$e-d&z)o{E;62KeMJ%fG7KNR2M3kUm@b{?~PTOd`Cz;`e7F{{O +zWQqMuRtx+;JaFpMDku7RYF8tUuin=`cGB)^kjcv4=>+Obh+M>c4#m#c@hib#yVWr= +zVwz_J%vTK-(mUWWn&s;(OsP!$w92|&gIa*E*Y=k16^T)=fToq+ELE6n!`gbg|CgQXCj+x3}f`-K!QrG!tARxNfLw +z#|?k7fAa6U4p1V#UrKy<6D$Jh$SYwyu1RT|^v-K&uhaz@#bDDVB7bV)%YWx6xzLlu +zrN-p2%z7s0Wxe?#&pzRXg`6Bt#=z8O74at%vR9?Y=%c}3+rQL-4hQLouR +z&8)-odwI&qdfb1cERyl-(qVfdFPC466_m+&&4q>Gd)!&4uD-nWtQZjfE@5@U=gMz= +z@h%ci2tKh_b`U=rm~4{wtO?LkEbnCDvo>8niNEE4{xGF)Z>)d!sdAHfS{5v4Wq8f^ +zY|wm1VIh`QK#qV@JM_ief@VL6s{qwj+bfPrN?9{CBIT=LgX*G0jPtQj;%_|GUIN_~ +zQ|ts7>==0ObsEqTvbDT)L(yv{Yx3_Nxwx3vo!hobFby<2gdyK1J=V`zKZ64#3rA~j +z!S%S5BYR?)3s{>W$P$+^_BYW!HQGBg@7}~d$1nEa!k7I1mi1b!`E{bwXtfHcDkoRs +z5VN(CXCq(c0AyxV&W|4wa|e+|52pz$M(#)Q;>*6a&DbGiSJDL3B|Z1svo%d*qrCW45 +zKg_CHtt9z4X_L$9s|=|Rnt9?hbrdQKF)Q}rJF}O&A0`%kz@Az0rV~cr^X?bT3ETFU +zRvalV7$5!))vnbCVIMy>S+o~hO3=xzw$`^a;hkYo2d24zj8{DX>^nG2cy%d_JhFZw +zBMM^OSP2aLQjTe7Y^z((26aU-{dN2DC3{`Oy(&na#P<>Se}o +zQU~3{bxAeQg^AekFMjg{4se0;qdiEGySH_H>uL3xVv8{`RujrrM*HAI2g+t=cmktw +zX+O%bUZ+GI(R!qC=$rfK*bm!RJbRoPi9;mI{PZYorMUF +z^=a4Q@r1pPHSYaE>0IwjH7Wb#b|&u_QS^ZCqo&Cx9@+U&+0<*G09slnM^7-Pb(+B+ +zR`Yy9evM;#&p!_ygD)vEMXeL?|24V`V$PJ081gobAAtg=OcA(%Vwfy%-?P?&m20eA +z>MED8;2~_`cz@?@qk&IFE~?{vUXw$JMrKb;dzp=mw{ak;=}v^XCxw82MK>znph^hA +zDzj+Y?$VoOX}fHA3;Fb1nF4BeHx2O|Hg~6zVbPAl{@8(Q5{AEJ$nt7V)v=11-r0@% +zk8Ry&z?9{PzHj>9*>u2iCYlUTfAC|h4v;z9XMqXDM!2#?Js`gp6eoD%Un;$zI{WWx +zz&5_Wk5FO^vca%fJvqbowC@2Wi8`Ew>eKe|} +zIRrLdbJwesh9Gh?mzft~v3c+0u*4q&ryJkP*h`(4ItC?=9SkumPaD8byEY$;)w>uZ +z+WARvoMUzs{k|B`_Iv$OWjz}DmeZo-@?=_vx57NNcT%m?*LKtKHOd~8$31ar1qGtL +zW_wsTNBji00+6N){n9Trl2haRt_p<%?7Pq8&xNbso+R8T=ir8W4Gt*(;d{9p?BohT +zWm_0o925KIG_-RT|IEEUGMumG8tecsw{W=$3}%I|oVeF8J2BOYE(*td)9#X@Z0i4Y +z_1XWdRzKhC4ctk!Be4#lT&2nCE7|}m(+4I<#)dDet)}Ud@hPp=sUwsiODtcyRBbp? +zs1rWzK+5|nqn>Zgc_5@2c6L~WN-X}tnqzYc@KZZ0LE+REL_8+2D{4$<@`LX +zt}}@@UU4|7`@*#+fk{f;n=U|CPP`?KL8m+wXFtv?bzGF{U^zq0%!`wyUk?HOr})^L +zT`cCfW0#ADORxYA2X{p}nFx>CqgO4FIs9E9!NC)(O$S!a^FfK<~aJ9HurAt;pXU&iyZY6az?gbLoh` +zqVD#eFB(-cDmwCn% +zLmy*yq|{z{fG^>L8`WO+M|r0FBr9w(p)xgp2XLd+x{USmWK+tgHnUBGJDq?+dM}oH +z1*jr7@EO`M4zfDP_NA1YDAv~;@`I1A +z&CKU~KRs{0*v*55)|2+zp!!Ff^QxaN^Yv@LTbKQ-3pl$T2Eupr)e)Y&rl&txgR}hY +z!FX&Qmx-+)>8K*I4RWO~mOnKe92|-{tVnG>FjWp_01y^o1uPSm|9^fa=) +zvx7~^YqlswO+Y!GC0socQi4;vD7BO0nPoZVmYJE^esPJ^SV1#{${}Ew(}cnId*$4k +z0RaLUFR;sU&z;i#pncR`A&+Zu>I>#ZOkyfdRqwNG?riw?*p2784S_g$dEGFeN_)15 +zn;&7A__d`{honPA@gezjFk=+!NWRlVL0?9upxrUj`_Uu+ZP2MA>x0ikZQoW1qapu( +zPXzZGndi>f(~vp#g2E$(=f{#V*opb>05=9`(-ixjzD=x*AoYcP(^*xGRvS=nS3)A{ +zTd8vXd{5BBJ+{D7K{xHhSf!z%A?usY*9D!pyW>4~7dz>GK%$XXa${;7GMFD@A9#!w +zCukO0-FoQ)<@;8ELBr;yg1z#OAL`7y&7!*d{hd+NPhBMb40zJJ<8}lY0%VJzhA)3aW3&?-rJyiukLf+L`&CjEEz0r$*AM;ybn4$Lbw<{@6hf&i0=8( +zGdXypiB9@L9~XYBqEVtB05w)oLvu0v==*erCMLgO{~nB3@CPoaI-F>_F$so_-4aSv +zxg!+1!2(4O8^Aw}nUf_>DU9}AQ&W%21tK^k!igk0{0)ADOF|t^YUb8eLI;N|pf4yM +z0e=z{-QK-_fAHBG5+Vc{xRbx3m-LY;!mVKZjtR;0brjhTBFtNOVIFp?Wug@YL4ANm +zFvKLp@pM!$?@w4T$ZEQoDJm*rxBVU3eNS{=2cIL918nh1grCp96$+C=i-E`{UNwY+rQ82293Iwc +z0cj%vGO|B;3lO=#{CWv-2`cQ@_Tv=OIX|BXlZY`A_9RLr)C?SSHyhA__cfYSbI@r=ybuuBsa69+sAC?t`dcXP3pcvsKY$=tt4JHfYpM2W9OQ +zfpro~0#oSC9u^6Aw8_zo0UL0`AZbv6<0klQ{B6NZga+B(-Y$9vVEF-;r0UAQ^P8Lm +z{$~B!t6uvHA*z55L5g+5P+4PwiYTd&<$Og$4H;R3sJG6LW~r#Uc9}XPH7iwEy8fr{ +zvyZ*D1WV?j*L;wR(a(Q>@VH7sXG3>H+y2{s@LMz7!jir7JfobKgN8YCq3tDihvHpQ +z+n}Y6DlAiSd`shj+^d#6X@~MuaiB0F%N?+q+0RLCBL0;Mdv$Pth98C~FwZp@blY)% +zf%5Y4@o8Uh`FZKMD9!g1L`(DSuScYg!M%-PhNONiJWtJxeuc1V?K_M@v +zbFkUEfl;+yjR?F9eOCLn(lhcs4=YZY=@y@^HQ!!_D+c{#%eyQ+!+42Ne%U0#yP6DI +zeCe=k*_GOA{kr9N1Ak|Rr#fhA14S|$arc=40&$ES+U~m`dJvSV;IWDkqX^Gv3R!Hj +z)6n4uZ3EwTk#s6o+dF<{->?n={xwE+cpv0C&D9bzb4QzU4byuNP?}EWUg&@OM%wB3HJU$Qg$;)eK^S_jKiibrFrTf;^x}ViQ>gRH}E?F2T83aZyhW0E= +zeLelI3Pc%-&TJ*Vcb!*FOpL!;E7M0HG&m}UM~sU$zOBY}Ac3(zJy0?O%OG#~M*N29Po5vv +z>BU2uH(Wk4Fgk8!NmEhOdFfBScU#eY1&1~0Z2 +zj_A4!A!td(1>uzKcwRS9piG`ozMyzmwj<&43AdRM9I0FKQLKP-a7lukFbHrw;6*rW)xa9myoQr%^qz-3@f0UX&TcLRj-ug63Mdn+i +z)YpGhP3;eE6!VbAIxkLq*uX*Mv5$8U5Oa?Y7@!h@ZxkN+hVHO;4JqeO86NXC-1rjjzQv2)6nVMv|7yL!z?wB;?7xaaoi&>7m_;=^CSpDa09 +zGDj3lF#j}vQa7zghmV*Ni}Y6`UW2^ktMi|o_yFhd{GNP=TA88MU*&P%*S&&%KA}B{ +z5xQ_qHADHKw^x0E1VVxg<9$=i0W?yxRUxC4E(!tJ9&6(>nOd}1`s$042CZaU>x&!vP0Vey7({Z#x$gA9?WLb?ic;9- +zfYH7eK?e909E!P;TFba|4Q&+>YHeRFh-D~zmqZFce;R?Irs$5mfubIGKG^8+hegn2qld7GX8B!_a5G1-9-x1~I?sE@g}@r`zc^A!pmy^HyZ +z*+iF(Aizz^ohi}^T6hf5e7l)1*U9Puk3cK*OzOjv8$ +zpjy8`T#$>^c?JHx<1fK9Cf9`?WDIlS_Cj%bU70$YPCOIRuK;Cv6S@Ff@THK9%gj9i&hN23Z3Ql?hsv(xJUysPZ;ab&V& +zs>~hGxa!~pDZ@%QjjA@Jw2W87Cox+*@-0U861Zp7o!fm_o&|kGbj2eiGHkWILof*+ +ZWkSDu^x1%qtN!;5K}A6wQVX^S`+s=^3GM&@ + +literal 0 +HcmV?d00001 + +diff --git a/aops-web/src/assets/vertical-left.svg b/aops-web/src/assets/vertical-left.svg +new file mode 100644 +index 0000000..a7403bd +--- /dev/null ++++ b/aops-web/src/assets/vertical-left.svg +@@ -0,0 +1 @@ ++ +\ No newline at end of file +diff --git a/aops-web/src/components/TimeScopeSelector/index.vue b/aops-web/src/components/TimeScopeSelector/index.vue +new file mode 100644 +index 0000000..5445735 +--- /dev/null ++++ b/aops-web/src/components/TimeScopeSelector/index.vue +@@ -0,0 +1,59 @@ ++ ++ ++ ++ ++ +diff --git a/aops-web/src/components/Uploader/index.vue b/aops-web/src/components/Uploader/index.vue +index 8a135c3..c202e09 100644 +--- a/aops-web/src/components/Uploader/index.vue ++++ b/aops-web/src/components/Uploader/index.vue +@@ -62,17 +62,23 @@ export default { + const reader = new FileReader() + reader.readAsText(file) + reader.onload = function (e) { +- let content = e.target.result +- if (_this.toJSON) { +- if (TYPE_ENUM[_this.fileType] = 'application/x-yaml') { +- content = yaml.load(content) +- } else { +- content = JSON.parse(content) ++ try { ++ let content = e.target.result ++ if (_this.toJSON) { ++ if (TYPE_ENUM[_this.fileType] === 'application/x-yaml') { ++ content = yaml.load(content) ++ } else { ++ content = JSON.parse(content) ++ } + } ++ _this.$emit('load', content) ++ _this.$emit('change', content) ++ resolve(content) ++ } catch (err_async) { ++ _this.$emit('error', err_async) ++ _this.$emit('change') ++ reject(err_async) + } +- _this.$emit('load', content) +- _this.$emit('change', content) +- resolve(content) + } + } catch (err) { + _this.$emit('error', err) +diff --git a/aops-web/src/config/router.config.js b/aops-web/src/config/router.config.js +index 0a88a0b..1b5a8bf 100644 +--- a/aops-web/src/config/router.config.js ++++ b/aops-web/src/config/router.config.js +@@ -177,6 +177,21 @@ export const asyncRouterMap = [ + } + ] + }, ++ { ++ path: routeMap.task.path, ++ name: 'task', ++ redirect: routeMap.task.children.TaskManagement.path, ++ component: RouteView, ++ meta: { title: routeMap.task.title, icon: 'robot', permission: ['task'] }, ++ children: [ ++ { ++ path: routeMap.task.children.TaskManagement.path, ++ name: 'TaskManagement', ++ component: () => import('@/views/task/TaskManagement'), ++ meta: { title: routeMap.task.children.TaskManagement.title, permission: ['task'] } ++ } ++ ] ++ }, + { + path: routeMap.diagnosis.path, + name: 'diagnosis', +@@ -290,21 +305,6 @@ export const asyncRouterMap = [ + } + ] + }, +- { +- path: routeMap.task.path, +- name: 'task', +- redirect: routeMap.task.children.TaskManagement.path, +- component: RouteView, +- meta: { title: routeMap.task.title, icon: 'robot', permission: ['task'] }, +- children: [ +- { +- path: routeMap.task.children.TaskManagement.path, +- name: 'TaskManagement', +- component: () => import('@/views/task/TaskManagement'), +- meta: { title: routeMap.task.children.TaskManagement.title, permission: ['task'] } +- } +- ] +- }, + { + path: routeMap.networkTopo.path, + name: 'networkTopo', +diff --git a/aops-web/src/views/assests/HostEdition.vue b/aops-web/src/views/assests/HostEdition.vue +index 37dcef7..d7990a6 100644 +--- a/aops-web/src/views/assests/HostEdition.vue ++++ b/aops-web/src/views/assests/HostEdition.vue +@@ -199,15 +199,9 @@ export default { + router.go(-1) + }, + checkNameInput (rule, value, cb) { +- if (/[^0-9a-z_]/.test(value)) { ++ if (/[^0-9a-z_.]/.test(value)) { + /* eslint-disable */ +- cb('只能输入数字、小写字母和英文下划线') +- /* eslint-enable */ +- return +- } +- if (/^[^a-z]/.test(value)) { +- /* eslint-disable */ +- cb('首字母应为小写字母') ++ cb('只能输入数字、小写字母和英文.和_') + /* eslint-enable */ + return + } +@@ -247,7 +241,7 @@ export default { + /* eslint-enable */ + return + } +- if (value.length < 8 || value.length > 20) { ++ if (value && (value.length < 8 || value.length > 20)) { + /* eslint-disable */ + cb('长度应为8-20字符') + /* eslint-enable */ +diff --git a/aops-web/src/views/assests/HostManagement.vue b/aops-web/src/views/assests/HostManagement.vue +index 8462884..78e371b 100644 +--- a/aops-web/src/views/assests/HostManagement.vue ++++ b/aops-web/src/views/assests/HostManagement.vue +@@ -126,7 +126,7 @@ export default { + { + dataIndex: 'public_ip', + key: 'public_ip', +- title: '公网IP地址' ++ title: 'IP地址' + }, + { + dataIndex: 'ssh_port', +diff --git a/aops-web/src/views/assests/components/AddHostGroupModal.vue b/aops-web/src/views/assests/components/AddHostGroupModal.vue +index 3474fa4..daea694 100644 +--- a/aops-web/src/views/assests/components/AddHostGroupModal.vue ++++ b/aops-web/src/views/assests/components/AddHostGroupModal.vue +@@ -109,7 +109,7 @@ export default { + cb() + }, + checkHostGroupdesc (rule, value, cb) { +- if (value.length > 256) { ++ if (value && value.length > 256) { + /* eslint-disable */ + cb('长度不超过256个字符') + /* eslint-enable */ +diff --git a/aops-web/src/views/configuration/TranscationDomainManagement.vue b/aops-web/src/views/configuration/TranscationDomainManagement.vue +index 5124a56..8fc26b0 100644 +--- a/aops-web/src/views/configuration/TranscationDomainManagement.vue ++++ b/aops-web/src/views/configuration/TranscationDomainManagement.vue +@@ -98,6 +98,7 @@ export default { + // 特殊处理 + _this.domainData = res || [] + }).catch(function (err) { ++ if (err.response.data.code === 400) return + _this.$message.error(err.response.data.msg) + }).finally(function () { _this.domainLoading = false }) + }, +diff --git a/aops-web/src/views/dashboard/Dashboard.vue b/aops-web/src/views/dashboard/Dashboard.vue +index fc557e1..12a6e26 100644 +--- a/aops-web/src/views/dashboard/Dashboard.vue ++++ b/aops-web/src/views/dashboard/Dashboard.vue +@@ -82,7 +82,6 @@ + +

    异常检测记录
    + { ++ return { ++ ...result, ++ key: `${result.host_id}+${result.check_item}+${result.start}+${result.end}` ++ } ++ }) : [] + }).catch(function (err) { + that.$message.error(err.response.data.msg) + }) +@@ -205,9 +209,9 @@ const columns = [ + title: '检测条件' + }, + { +- dataIndex: 'value', +- key: 'value', +- title: '检测结果' ++ dataIndex: 'description', ++ key: 'description', ++ title: '描述' + }, + { + title: '检测时间段', +diff --git a/aops-web/src/views/diagnosis/AbnormalCheck.vue b/aops-web/src/views/diagnosis/AbnormalCheck.vue +index eab2eb4..2680099 100644 +--- a/aops-web/src/views/diagnosis/AbnormalCheck.vue ++++ b/aops-web/src/views/diagnosis/AbnormalCheck.vue +@@ -10,7 +10,10 @@ +
    +
    +
    异常检测规则数量
    +-
    {{ ruleCount.toString().replace(/(\d)(?=(?:\d{3})+$)/g, '$1,') }}
    ++
    ++ ++ {{ ruleCount.toString().replace(/(\d)(?=(?:\d{3})+$)/g, '$1,') }} ++
    +
    +
    +
  • +@@ -22,7 +25,7 @@ +
    + + + + +@@ -85,6 +88,40 @@ + + +
    异常检测记录
    ++ ++ ++ ++ ++ ++ ++ ++ 按时间筛选 ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + {{ index + firstIndex }} + +- +- 查看报告 +- +- 删除 +- +- +- +
    + +
    +@@ -122,10 +146,15 @@ import MyPageHeaderWrapper from '@/views/utils/MyPageHeaderWrapper' + import DrawerView from '@/views/utils/DrawerView' + import GetCheckResultDrawer from '@/views/diagnosis/components/GetCheckResultDrawer' + import AddAbnormalCheckRuleDrawer from '@/views/diagnosis/components/AddAbnormalCheckRuleDrawer' +-import { getRuleCount, getResultCountTopTen, getResult } from '@/api/check' ++import AddFaultDiagnosis from '@/views/diagnosis/components/AddFaultDiagnosis' ++import { getRuleAll, getRuleCount, getResultCountTopTen, getResult } from '@/api/check' ++import { getDiagTree } from '@/api/diagnosis' ++import { hostList } from '@/api/assest' + import { dateFormat } from '@/views/utils/Utils' + import CheckResultExpanded from '@/views/diagnosis/components/CheckResultExpanded' + ++const defaultPagination = { current: 1, pageSize: 10, showSizeChanger: true, showQuickJumper: true } ++ + export default { + name: 'AbnormalCheck', + components: { +@@ -133,14 +162,76 @@ import CheckResultExpanded from '@/views/diagnosis/components/CheckResultExpande + DrawerView, + AddAbnormalCheckRuleDrawer, + GetCheckResultDrawer, +- CheckResultExpanded ++ CheckResultExpanded, ++ AddFaultDiagnosis + }, + mounted: function () { + this.getRuleCount() + this.getResultCountTopTen() + this.getResultList({}) ++ // 获取筛选数据列表 ++ this.getFilterListData() ++ // 获取故障树列表 ++ this.getDiagTreeList() + }, + computed: { ++ columns () { ++ let { filters } = this ++ filters = filters || {} ++ return [ ++ { ++ title: '序号', ++ dataIndex: 'index', ++ key: 'index', ++ align: 'center', ++ width: 70, ++ scopedSlots: { customRender: 'index' } ++ }, ++ { ++ dataIndex: 'hostName', ++ key: 'hostName', ++ title: '主机名称', ++ filteredValue: filters.hostName || null, ++ filters: this.hostAllList.map(host => { ++ return { ++ text: host.host_name, ++ value: host.host_id ++ } ++ }) ++ }, ++ { ++ dataIndex: 'ip', ++ key: 'ip', ++ title: 'IP地址' ++ }, ++ { ++ dataIndex: 'check_item', ++ key: 'check_item', ++ title: '检测项', ++ filteredValue: filters.check_item || null, ++ filters: this.ruleAllList.map(rule => { ++ return { ++ text: rule.check_item, ++ value: rule.check_item ++ } ++ }) ++ }, ++ { ++ dataIndex: 'condition', ++ key: 'condition', ++ title: '检测条件' ++ }, ++ { ++ dataIndex: 'description', ++ key: 'description', ++ title: '描述' ++ }, ++ { ++ title: '检测时间段', ++ customRender: (text, record, index) => dateFormat('YYYY-mm-dd HH:MM:SS', record.start * 1000) + ' 至 ' + dateFormat('YYYY-mm-dd HH:MM:SS', record.end * 1000) ++ } ++ ] ++ }, + firstIndex () { + return (this.pagination.current - 1) * this.pagination.pageSize + 1 + } +@@ -148,22 +239,76 @@ import CheckResultExpanded from '@/views/diagnosis/components/CheckResultExpande + data () { + return { + ruleCount: 0, +- filters: null, +- sorter: null, ++ countIsLoading: false, + tableIsLoading: false, +- columns, + resultCountList: [], + resultList: [], +- pagination: { current: 1, pageSize: 5, showSizeChanger: true, showQuickJumper: true } ++ pagination: defaultPagination, ++ filters: null, ++ sorter: null, ++ ruleAllList: [], ++ hostAllList: [], ++ treeDataAll: [] + } + }, + methods: { ++ getDiagTreeList () { ++ const _this = this ++ getDiagTree({ ++ treeList: [] ++ }).then(function (res) { ++ _this.treeDataAll = [{}].concat(res.trees) ++ }).catch(function (err) { ++ _this.$message.error(err.response.data.msg) ++ }).finally(function () { ++ }) ++ }, ++ filterByTime () { ++ this.pagination = defaultPagination ++ this.getResultList() ++ }, ++ handleTimeSelect (value) { ++ if (!this.filters) { ++ this.filters = { ++ timeRange: value ++ } ++ } else { ++ this.filters.timeRange = value ++ } ++ }, ++ getFilterListData () { ++ const _this = this ++ getRuleAll().then(function (res) { ++ _this.ruleAllList = res.check_items.map(function (item) { ++ return { ++ check_item: item.check_item ++ } ++ }) ++ }) ++ hostList({ ++ tableInfo: { ++ pagination: {}, ++ filters: {}, ++ sorter: {} ++ } ++ }).then(function (res) { ++ _this.hostAllList = res.host_infos.map(function (host) { ++ return { ++ host_name: host.host_name, ++ host_id: host.host_id ++ } ++ }) ++ }) ++ }, + getRuleCount () { + var that = this ++ this.countIsLoading = true + getRuleCount().then(function (data) { + that.ruleCount = data.rule_count + }).catch(function (err) { + that.$message.error(err.response.data.msg) ++ }).finally(() => { ++ that.countIsLoading = false + }) + }, + getResultCountTopTen () { +@@ -177,13 +322,32 @@ import CheckResultExpanded from '@/views/diagnosis/components/CheckResultExpande + paginationChange (page, pageSize) { + // this.getResultList({}) + }, +- handleTableChange (pagination) { +- this.pagination = pagination // 存储翻页状态 ++ handleTableChange (pagination, filters, sorter) { ++ const timeFilter = this.filters && this.filters.timeRange ++ if (this.paginationChange.current === pagination.current) { ++ this.pagination = defaultPagination // 筛选是重置pagination ++ } else { ++ this.pagination = pagination // 存储翻页状态 ++ } ++ this.filters = { ++ ...filters, ++ timeRange: timeFilter ++ } ++ this.sorter = sorter + this.getResultList() // 出发排序、筛选、分页时,重新请求 + }, + getResultList () { + var that = this +- getResult({ perPage: this.pagination.pageSize, page: this.pagination.current }).then(function (data) { ++ const pagination = this.pagination || {} ++ const filters = this.filters || {} ++ this.tableIsLoading = true ++ getResult({ ++ perPage: pagination.pageSize, ++ page: pagination.current, ++ hostList: filters.hostName || [], ++ checkItems: filters.check_item || [], ++ timeRange: filters.timeRange && filters.timeRange.map(momentTime => momentTime ? that.getUnixTime(momentTime.format('YYYY-MM-DD HH:mm:ss')) : undefined) ++ }).then(function (data) { + that.resultList = data.check_result ? data.check_result.map(result => { + return { + ...result, +@@ -194,59 +358,26 @@ import CheckResultExpanded from '@/views/diagnosis/components/CheckResultExpande + that.pagination.total = data.total_count + }).catch(function (err) { + that.$message.error(err.response.data.msg) ++ }).finally(() => { ++ this.tableIsLoading = false + }) + }, + deleteResult (result) { + this.$message.success('记录删除成功') ++ }, ++ handleAddRuleSuccess () { ++ this.getRuleCount() ++ }, ++ getUnixTime (dateStr) { ++ const newStr = dateStr.replace(/-/g, '/') ++ const date = new Date(newStr) ++ return date.getTime() / 1000 ++ }, ++ addFaultDiagnosisSuccess () { + } + } + } + +- const columns = [ +- { +- title: '序号', +- dataIndex: 'index', +- key: 'index', +- align: 'center', +- width: 70, +- scopedSlots: { customRender: 'index' } +- }, +- { +- dataIndex: 'hostName', +- key: 'hostName', +- title: '主机名称' +- }, +- { +- dataIndex: 'ip', +- key: 'ip', +- title: 'IP地址' +- }, +- { +- dataIndex: 'check_item', +- key: 'check_item', +- title: '检测项' +- }, +- { +- dataIndex: 'condition', +- key: 'condition', +- title: '检测条件' +- }, +- { +- dataIndex: 'value', +- key: 'value', +- title: '检测结果' +- }, +- { +- title: '检测时间段', +- customRender: (text, record, index) => dateFormat('YYYY-mm-dd HH:MM:SS', record.start * 1000) + ' 至 ' + dateFormat('YYYY-mm-dd HH:MM:SS', record.end * 1000) +- }, +- { +- title: '操作', +- key: 'action', +- scopedSlots: { customRender: 'action' } +- } +- ] +- + + + +diff --git a/aops-web/src/views/diagnosis/DiagReport.vue b/aops-web/src/views/diagnosis/DiagReport.vue +index 16e429e..6e31f43 100644 +--- a/aops-web/src/views/diagnosis/DiagReport.vue ++++ b/aops-web/src/views/diagnosis/DiagReport.vue +@@ -2,28 +2,22 @@ + + +
    + +@@ -31,6 +25,7 @@ + + + +@@ -45,7 +40,8 @@ + + +diff --git a/aops-web/src/views/networkTopo/NetworkTopo.vue b/aops-web/src/views/networkTopo/NetworkTopo.vue +index 1fd2e66..683c99e 100644 +--- a/aops-web/src/views/networkTopo/NetworkTopo.vue ++++ b/aops-web/src/views/networkTopo/NetworkTopo.vue +@@ -104,7 +104,10 @@ export default { + _this.setGraphData(res.entities || []) + _this.initialGraph() + }).catch(err => { +- _this.$message.error(err.response.data.msg) ++ if (err.response.data && err.response.data.status === 500) { ++ _this.$message.error('服务器错误,请稍后再试') ++ } ++ _this.$message.error(err.response.data.msg || err.response.data.title || '获取架构数据失败,请稍后再试') + }).finally(() => { + _this.dataLoading = false + }) +diff --git a/aops-web/src/views/task/TaskManagement.vue b/aops-web/src/views/task/TaskManagement.vue +index 9356497..0d75e9d 100644 +--- a/aops-web/src/views/task/TaskManagement.vue ++++ b/aops-web/src/views/task/TaskManagement.vue +@@ -36,9 +36,7 @@ + :loading="tableIsLoading" + > + +- +- 执行 +- ++ 执行 + + + +@@ -68,7 +66,7 @@ + +
    +
    +- ++ +
    +
    +
    {{ item.template_name }}
    +@@ -126,7 +124,7 @@ + + const defaultPagination = { + current: 1, +- pageSize: 2, ++ pageSize: 10, + showSizeChanger: true, + showQuickJumper: true + } +@@ -214,7 +212,7 @@ + methods: { + // 新增playbook模板 + addTemplateSuccess () { +- this.getTemplateList() ++ this.refreshTemplateList() + }, + // 新增部署任务 + addTaskSuccess () { +@@ -267,11 +265,15 @@ + }, + // 刷新列表数据 + handleRefresh () { ++ const _this = this + this.pagination = defaultPagination + this.sorter = null + this.filters = null + this.selectedRowKeys = [] +- this.getTaskList() ++ this.tableIsLoading = true ++ setTimeout(function () { ++ _this.getTaskList() ++ }, 1500) + }, + // 删除配置任务 + deleteTask (record) { +@@ -297,7 +299,21 @@ + }, + // 执行配置任务 + executeTask (record) { +- return this.handleExecuteTask([record.task_id]) ++ console.log(record) ++ const hostNameList = record.host_list && record.host_list.map(host => host.host_name) ++ const _this = this ++ this.$confirm({ ++ title: (

    { `确认执行任务:${record.task_name}?` }

    ), ++ content: (

    {hostNameList ? `该任务可能会修改以下主机配置:${hostNameList.join('、')}` : ''}

    ++

    详情请查看任务描述。

    ++
    ++ ), ++ icon: () => , ++ okType: 'danger', ++ okText: '执行', ++ onOk: function () { return _this.handleExecuteTask([record.task_id]) }, ++ onCancel () {} ++ }) + }, + // 执行部署任务 + handleExecuteTask (taskList, isBash) { +@@ -331,6 +347,13 @@ + _this.templateIsLoading = false + }) + }, ++ refreshTemplateList () { ++ const _this = this ++ this.templateIsLoading = true ++ setTimeout(function () { ++ _this.getTemplateList() ++ }, 1500) ++ }, + // 删除playbook模板 + deleteTemplate (templateName) { + const _this = this +@@ -340,7 +363,7 @@ + templateList + }).then(function (res) { + _this.$message.success('删除成功') +- _this.getTemplateList() ++ _this.refreshTemplateList() + }).catch(function (err) { + _this.$message.error(err.response.data.msg) + }).finally(function () { +diff --git a/aops-web/src/views/task/components/AddTask.vue b/aops-web/src/views/task/components/AddTask.vue +index 3a79576..c286db1 100644 +--- a/aops-web/src/views/task/components/AddTask.vue ++++ b/aops-web/src/views/task/components/AddTask.vue +@@ -3,7 +3,7 @@ + + + +- ++ + + + +@@ -131,7 +131,7 @@ import { getTemplateList, generateTask } from '@/api/task' + cb() + }, + checkTaskdesc (rule, value, cb) { +- if (value.length > 256) { ++ if (value && value.length > 256) { + /* eslint-disable */ + cb('长度不超过256个字符') + /* eslint-enable */ +diff --git a/aops-web/src/views/task/components/AddTemplate.vue b/aops-web/src/views/task/components/AddTemplate.vue +index 8e593d9..7e3baab 100644 +--- a/aops-web/src/views/task/components/AddTemplate.vue ++++ b/aops-web/src/views/task/components/AddTemplate.vue +@@ -3,7 +3,7 @@ + + + +- ++ + + + +@@ -14,7 +14,7 @@ + toJSON + uid="treeUploader" + fileType="yaml" +- v-decorator="['template_content',{rules: [{ required: true, message: '请上传YAML文件' }]}]" ++ v-decorator="['template_content',{rules: [{ required: true, message: '请上传YAML类型文件,并确保格式符合要求' }]}]" + /> + + +@@ -25,7 +25,7 @@ + + + +@@ -80,7 +80,7 @@ import Uploader from '@/components/Uploader' + }) + }, + checkTemplateName (rule, value, cb) { +- if (value.length > 64) { ++ if (value && value.length > 64) { + /* eslint-disable */ + cb('长度不超过64个字符') + /* eslint-enable */ +@@ -95,7 +95,7 @@ import Uploader from '@/components/Uploader' + cb() + }, + checkTemplatedesc (rule, value, cb) { +- if (value.length > 256) { ++ if (value && value.length > 256) { + /* eslint-disable */ + cb('长度不超过256个字符') + /* eslint-enable */ +diff --git a/aops-web/src/views/utils/DrawerView.vue b/aops-web/src/views/utils/DrawerView.vue +index ec9f000..7e0e50f 100644 +--- a/aops-web/src/views/utils/DrawerView.vue ++++ b/aops-web/src/views/utils/DrawerView.vue +@@ -8,6 +8,7 @@ + :body-style="bodyStyle" + :visible="visible" + @close="visible = false" ++ destroyOnClose + > + +
    +diff --git a/gala-gopher/config/gala-gopher.conf b/gala-gopher/config/gala-gopher.conf +index 9390b92..16832c5 100644 +--- a/gala-gopher/config/gala-gopher.conf ++++ b/gala-gopher/config/gala-gopher.conf +@@ -39,7 +39,7 @@ probes = + ( + { + name = "example"; +- switch = "on"; ++ switch = "off"; + interval = 1; + }, + { +@@ -49,7 +49,7 @@ probes = + }, + { + name = "system_vmstat"; +- switch = "on"; ++ switch = "off"; + interval = 2; + }, + { +@@ -70,7 +70,7 @@ extend_probes = + name = "redis"; + command = "python3 /opt/gala-gopher/extend_probes/redis_probe.py"; + param = ""; +- switch = "on"; ++ switch = "off"; + }, + { + name = "tcp"; +diff --git a/gala-gopher/doc/api_doc.md b/gala-gopher/doc/api_doc.md +new file mode 100644 +index 0000000..7ed7466 +--- /dev/null ++++ b/gala-gopher/doc/api_doc.md +@@ -0,0 +1,89 @@ ++# gala-gopher 数据访问API ++ ++gala-gopher通过三种方式提供收集的原始数据,数据包含实体(被观测对象)数据和实体上的指标数据。API尽量在小版本间保持接口稳定性。 ++ ++当前API版本是 `v0.1` ++ ++## http方式 ++ ++本方式仅提供指标数据输出。 ++ ++访问/metrics地址时返回如下内容: ++ ++``` ++# HELP tcp_link_rx_byte byte received of the tcp link. ++# TYPE tcp_link_rx_byte gauge ++tcp_link_rx_byte{pid="3426",client_ip="192.168.100.110",client_port="1235",server_ip="192.168.100.110",server_port="22"} 3812 ++``` ++ ++返回的数据,由三部分组成:注释(HELP),类型(TYPE)和数据。 ++ ++以# HELP开始的内容提供指标名和说明信息: ++ ++``` ++# HELP ++``` ++ ++以# TYPE开始的内容提供指标名和指标类型,TYPE注释行必须出现在指标的第一个样本之前。 ++ ++``` ++# TYPE ++``` ++ ++除了# 开头的所有行是样本数据,遵循以下格式规范: ++ ++``` ++metric_name [ ++ "{" label_name "=" `"` label_value `"` { "," label_name "=" `"` label_value `"` } [ "," ] "}" ++] value [ timestamp ] ++``` ++ ++metric_name和label_name必须遵循gala-gopher的观测对象和观测指标命名规范。value是一个float格式的数据,timestamp的类型为int64(从1970-01-01 00:00:00以来的毫秒数),timestamp默认为当前时间。具有相同metric_name的数据按照组的形式排列,每行由指标名称和标签键值对组合唯一确定。 ++ ++## kafka方式 ++ ++``` ++{"table_name": "tcp_link", "timestamp": 1301469816, "machine_id": "5002b12c68744d1a8e0309f7d00462a2", "pid": "35331", "process_name": "curl", "role": "1", "client_ip": "192.168.100.110", "client_port": "1235", "server_ip": "192.168.100.111", "server_port": "80", "protocol": "2", "rx_bytes": "1710139", "tx_bytes": "94", "packets_in": "324", "packets_out": "282", "retran_packets": "0", "lost_packets": "0", "rtt": "404", ...} ++``` ++ ++``` ++{"table_name": tablename, "timestamp": timestamp, "machine_id": machine_id, "key": value, ...} ++``` ++ ++## file方式 ++ ++本方式仅提供实体数据输出。 ++提供gala-gopher.output.meta和gala-gopher.output.data文件分别描述元数据和数据,文件路径可配置。 ++ ++文件gala-gopher.output.meta中内容: ++ ++``` ++tcp-link timestamp machine_id pid client_ip client_port server_ip server_port [rx_bytes tx_bytes packets_in packets_out ...] ++udp-link timestamp machine_id pid client_ip client_port server_ip server_port [rx_bytes tx_bytes packets_in packets_out ...] ++nginx-link timestamp machine_id pid client_ip client_port virtual_ip virtual_port server_ip server_port ... ++lvs-fullnat-link timestamp machine_id client_ip client_port virtual_ip virtual_port local_ip local_port server_ip server_port ... ++lvs-dr-link timestamp machine_id client_ip client_port virtual_ip virtual_port server_mac ... ++haproxy-link timestamp machine_id pid client_ip client_port virtual_ip virtual_port server_ip server_port ... ++kafka-link timestamp machine_id pid producer_ip producer_port topic kafka_ip kafka_port consumer_ip consumer_port ... ++rabbitmq-link timestamp machine_id pid producer_ip producer_port queue rabbit_ip rabbit_port consumer_ip consumer_port ... ++etcd-link timestamp machine_id pid producer_ip producer_port url etcd_ip etcd_port watch_ip watch_port ... ++process timestamp machine_id pid cmd path ... ++container timestamp machine_id container_id [pids] ... ++virtual_machine timestamp machine_id hostname [container_ids] [pids] [ips] [macs] ... ++bare_matal timestamp machine_id hostname [vm_names] [container_ids] [pids] [ips] [macs] ... ++ ++``` ++每一行以实体类型开头,后面是该实体类型的属性以及指标,指标是可选的。 ++ ++文件gala-gopher.output.data中内容: ++tcp-link 1631351950 5002b12c68744d1a8e0309f7d00462a2 342695 192.168.100.110 1235 192.168.100.111 22 1710139 94 324 282 0 0 404 ++tcp-link 1631351950 5002b12c68744d1a8e0309f7d00462a2 342696 192.168.100.110 1238 192.168.100.111 80 1710139 94 324 282 0 0 404 ++``` ++ ++``` ++ ++## 命名规范 ++ ++### 观测对象命名规范 ++ ++### 观测指标命名规范 +diff --git a/gala-gopher/doc/design_coe.md b/gala-gopher/doc/design_coe.md +index 7584751..b53aa30 100644 +--- a/gala-gopher/doc/design_coe.md ++++ b/gala-gopher/doc/design_coe.md +@@ -480,18 +480,19 @@ gala-gopher -- dr-xr-x---. + + 以上,可以将基本的拓扑绘制出来,并可标记数据流方向; + +- - nginx_statistic ++ - nginx_link + + nginx观测指标,指标项: + +- | 指标项 | 含义 | 备注 | +- | ----------- | ------------------------- | ---- | +- | client_ip | 客户端ip | | +- | virtual_ip | 服务ip(nginx对外服务ip) | | +- | server_ip | backend ip | | +- | server_port | backend port | | +- | is_l7 | 是否七层LB转发 | | +- | link_count | 链路数 | | ++ | 指标项 | 含义 | 备注 | ++ | ------------ | ------------------------- | ---- | ++ | client_ip | 客户端ip | | ++ | virtual_ip | 服务ip(nginx对外服务ip) | | ++ | server_ip | backend ip | | ++ | virtual_port | virtual port | | ++ | server_port | backend port | | ++ | is_l7 | 是否七层LB转发 | | ++ | link_count | 链路数 | | + + - 拓扑绘制思路 + +@@ -618,7 +619,7 @@ gala-gopher -- dr-xr-x---. + + [table_info] // 配置关注的指标数据 + base_table_name = ["tcp_link", "lvs_link"] +- other_table_name = ["nginx_statistic" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] ++ other_table_name = ["nginx_link" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] + + [option] + exclude_addr = "192.168.150" // 指定排除哪些地址 +diff --git a/gala-gopher/src/probes/extends/ebpf.probe/src/include/linux_4.19.90-2012.4.0.0053.oe1.h b/gala-gopher/src/probes/extends/ebpf.probe/src/include/linux_4.19.90-2012.4.0.0053.oe1.h +new file mode 100644 +index 0000000..e5826f6 +--- /dev/null ++++ b/gala-gopher/src/probes/extends/ebpf.probe/src/include/linux_4.19.90-2012.4.0.0053.oe1.h +@@ -0,0 +1,97760 @@ ++#ifndef __VMLINUX_H__ ++#define __VMLINUX_H__ ++ ++ ++enum module_state { ++ MODULE_STATE_LIVE = 0, ++ MODULE_STATE_COMING = 1, ++ MODULE_STATE_GOING = 2, ++ MODULE_STATE_UNFORMED = 3, ++}; ++ ++struct list_head { ++ struct list_head *next; ++ struct list_head *prev; ++}; ++ ++typedef struct { ++ int counter; ++} atomic_t; ++ ++struct refcount_struct { ++ atomic_t refs; ++}; ++ ++typedef struct refcount_struct refcount_t; ++ ++struct kref { ++ refcount_t refcount; ++}; ++ ++struct kset; ++ ++struct kobj_type; ++ ++struct kernfs_node; ++ ++struct kobject { ++ const char *name; ++ struct list_head entry; ++ struct kobject *parent; ++ struct kset *kset; ++ struct kobj_type *ktype; ++ struct kernfs_node *sd; ++ struct kref kref; ++ unsigned int state_initialized: 1; ++ unsigned int state_in_sysfs: 1; ++ unsigned int state_add_uevent_sent: 1; ++ unsigned int state_remove_uevent_sent: 1; ++ unsigned int uevent_suppress: 1; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct module; ++ ++struct module_param_attrs; ++ ++struct completion; ++ ++struct module_kobject { ++ struct kobject kobj; ++ struct module *mod; ++ struct kobject *drivers_dir; ++ struct module_param_attrs *mp; ++ struct completion *kobj_completion; ++}; ++ ++typedef int __s32; ++ ++typedef __s32 s32; ++ ++typedef struct { ++ long int counter; ++} atomic64_t; ++ ++typedef atomic64_t atomic_long_t; ++ ++typedef unsigned char __u8; ++ ++typedef __u8 u8; ++ ++typedef short unsigned int __u16; ++ ++typedef __u16 u16; ++ ++struct qspinlock { ++ union { ++ atomic_t val; ++ struct { ++ u8 locked; ++ u8 pending; ++ }; ++ struct { ++ u16 locked_pending; ++ u16 tail; ++ }; ++ }; ++}; ++ ++typedef struct qspinlock arch_spinlock_t; ++ ++struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++}; ++ ++struct spinlock { ++ union { ++ struct raw_spinlock rlock; ++ }; ++}; ++ ++typedef struct spinlock spinlock_t; ++ ++struct optimistic_spin_queue { ++ atomic_t tail; ++}; ++ ++struct mutex { ++ atomic_long_t owner; ++ spinlock_t wait_lock; ++ struct optimistic_spin_queue osq; ++ struct list_head wait_list; ++}; ++ ++typedef _Bool bool; ++ ++struct rb_node { ++ long unsigned int __rb_parent_color; ++ struct rb_node *rb_right; ++ struct rb_node *rb_left; ++}; ++ ++struct latch_tree_node { ++ struct rb_node node[2]; ++}; ++ ++struct mod_tree_node { ++ struct module *mod; ++ struct latch_tree_node node; ++}; ++ ++struct module_layout { ++ void *base; ++ unsigned int size; ++ unsigned int text_size; ++ unsigned int ro_size; ++ unsigned int ro_after_init_size; ++ struct mod_tree_node mtn; ++}; ++ ++struct elf64_shdr; ++ ++struct mod_plt_sec { ++ struct elf64_shdr *plt; ++ int plt_num_entries; ++ int plt_max_entries; ++}; ++ ++struct plt_entry; ++ ++struct mod_arch_specific { ++ struct mod_plt_sec core; ++ struct mod_plt_sec init; ++ struct plt_entry *ftrace_trampoline; ++ struct plt_entry *core_plts; ++ bool have_plts; ++}; ++ ++struct elf64_sym; ++ ++typedef struct elf64_sym Elf64_Sym; ++ ++struct mod_kallsyms { ++ Elf64_Sym *symtab; ++ unsigned int num_symtab; ++ char *strtab; ++}; ++ ++typedef const int tracepoint_ptr_t; ++ ++enum MODULE_KLP_REL_STATE { ++ MODULE_KLP_REL_NONE = 0, ++ MODULE_KLP_REL_UNDO = 1, ++ MODULE_KLP_REL_DONE = 2, ++}; ++ ++struct module_attribute; ++ ++struct kernel_symbol; ++ ++struct kernel_param; ++ ++struct exception_table_entry; ++ ++struct bug_entry; ++ ++struct module_sect_attrs; ++ ++struct module_notes_attrs; ++ ++struct bpf_raw_event_map; ++ ++struct jump_entry; ++ ++struct trace_event_call; ++ ++struct trace_eval_map; ++ ++struct klp_modinfo; ++ ++struct module { ++ enum module_state state; ++ struct list_head list; ++ char name[56]; ++ struct module_kobject mkobj; ++ struct module_attribute *modinfo_attrs; ++ const char *version; ++ const char *srcversion; ++ struct kobject *holders_dir; ++ const struct kernel_symbol *syms; ++ const s32 *crcs; ++ unsigned int num_syms; ++ struct mutex param_lock; ++ struct kernel_param *kp; ++ unsigned int num_kp; ++ unsigned int num_gpl_syms; ++ const struct kernel_symbol *gpl_syms; ++ const s32 *gpl_crcs; ++ bool sig_ok; ++ bool async_probe_requested; ++ const struct kernel_symbol *gpl_future_syms; ++ const s32 *gpl_future_crcs; ++ unsigned int num_gpl_future_syms; ++ unsigned int num_exentries; ++ struct exception_table_entry *extable; ++ int (*init)(); ++ long: 64; ++ struct module_layout core_layout; ++ struct module_layout init_layout; ++ struct mod_arch_specific arch; ++ long unsigned int taints; ++ unsigned int num_bugs; ++ struct list_head bug_list; ++ struct bug_entry *bug_table; ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; ++ struct module_sect_attrs *sect_attrs; ++ struct module_notes_attrs *notes_attrs; ++ char *args; ++ void *percpu; ++ unsigned int percpu_size; ++ unsigned int num_tracepoints; ++ tracepoint_ptr_t *tracepoints_ptrs; ++ unsigned int num_bpf_raw_events; ++ struct bpf_raw_event_map *bpf_raw_events; ++ struct jump_entry *jump_entries; ++ unsigned int num_jump_entries; ++ unsigned int num_trace_bprintk_fmt; ++ const char **trace_bprintk_fmt_start; ++ struct trace_event_call **trace_events; ++ unsigned int num_trace_events; ++ struct trace_eval_map **trace_evals; ++ unsigned int num_trace_evals; ++ unsigned int num_ftrace_callsites; ++ long unsigned int *ftrace_callsites; ++ bool klp; ++ bool klp_alive; ++ struct klp_modinfo *klp_info; ++ struct list_head source_list; ++ struct list_head target_list; ++ void (*exit)(); ++ atomic_t refcnt; ++ union { ++ enum MODULE_KLP_REL_STATE klp_rel_state; ++ long int klp_rel_state_KABI; ++ }; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++typedef signed char __s8; ++ ++typedef short int __s16; ++ ++typedef unsigned int __u32; ++ ++typedef long long int __s64; ++ ++typedef long long unsigned int __u64; ++ ++typedef __s8 s8; ++ ++typedef __s16 s16; ++ ++typedef __u32 u32; ++ ++typedef __s64 s64; ++ ++typedef __u64 u64; ++ ++enum { ++ false = 0, ++ true = 1, ++}; ++ ++typedef long int __kernel_long_t; ++ ++typedef long unsigned int __kernel_ulong_t; ++ ++typedef int __kernel_pid_t; ++ ++typedef unsigned int __kernel_uid32_t; ++ ++typedef unsigned int __kernel_gid32_t; ++ ++typedef __kernel_ulong_t __kernel_size_t; ++ ++typedef __kernel_long_t __kernel_ssize_t; ++ ++typedef long long int __kernel_loff_t; ++ ++typedef __kernel_long_t __kernel_time_t; ++ ++typedef __kernel_long_t __kernel_clock_t; ++ ++typedef int __kernel_timer_t; ++ ++typedef int __kernel_clockid_t; ++ ++typedef __u16 __le16; ++ ++typedef __u16 __be16; ++ ++typedef __u32 __le32; ++ ++typedef __u32 __be32; ++ ++typedef __u64 __be64; ++ ++typedef __u16 __sum16; ++ ++typedef __u32 __wsum; ++ ++typedef unsigned int __poll_t; ++ ++struct kernel_symbol { ++ int value_offset; ++ int name_offset; ++}; ++ ++typedef u32 __kernel_dev_t; ++ ++typedef __kernel_dev_t dev_t; ++ ++typedef short unsigned int umode_t; ++ ++typedef __kernel_pid_t pid_t; ++ ++typedef __kernel_clockid_t clockid_t; ++ ++typedef __kernel_uid32_t uid_t; ++ ++typedef __kernel_gid32_t gid_t; ++ ++typedef long unsigned int uintptr_t; ++ ++typedef __kernel_loff_t loff_t; ++ ++typedef __kernel_size_t size_t; ++ ++typedef __kernel_ssize_t ssize_t; ++ ++typedef u8 u_int8_t; ++ ++typedef u16 u_int16_t; ++ ++typedef u32 u_int32_t; ++ ++typedef s32 int32_t; ++ ++typedef u32 uint32_t; ++ ++typedef u64 u_int64_t; ++ ++typedef long unsigned int sector_t; ++ ++typedef long unsigned int blkcnt_t; ++ ++typedef u64 dma_addr_t; ++ ++typedef unsigned int gfp_t; ++ ++typedef unsigned int slab_flags_t; ++ ++typedef unsigned int fmode_t; ++ ++typedef u64 phys_addr_t; ++ ++typedef phys_addr_t resource_size_t; ++ ++struct hlist_node; ++ ++struct hlist_head { ++ struct hlist_node *first; ++}; ++ ++struct hlist_node { ++ struct hlist_node *next; ++ struct hlist_node **pprev; ++}; ++ ++struct callback_head { ++ struct callback_head *next; ++ void (*func)(struct callback_head *); ++}; ++ ++typedef bool pstate_check_t(long unsigned int); ++ ++typedef int initcall_entry_t; ++ ++typedef u64 jump_label_t; ++ ++struct jump_entry { ++ jump_label_t code; ++ jump_label_t target; ++ jump_label_t key; ++}; ++ ++struct static_key_mod; ++ ++struct static_key { ++ atomic_t enabled; ++ union { ++ long unsigned int type; ++ struct jump_entry *entries; ++ struct static_key_mod *next; ++ }; ++}; ++ ++struct static_key_true { ++ struct static_key key; ++}; ++ ++struct static_key_false { ++ struct static_key key; ++}; ++ ++typedef void *fl_owner_t; ++ ++struct file; ++ ++struct kiocb; ++ ++struct iov_iter; ++ ++struct dir_context; ++ ++struct poll_table_struct; ++ ++struct vm_area_struct; ++ ++struct inode; ++ ++struct file_lock; ++ ++struct page; ++ ++struct pipe_inode_info; ++ ++struct seq_file; ++ ++struct file_operations { ++ struct module *owner; ++ loff_t (*llseek)(struct file *, loff_t, int); ++ ssize_t (*read)(struct file *, char *, size_t, loff_t *); ++ ssize_t (*write)(struct file *, const char *, size_t, loff_t *); ++ ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ++ ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); ++ int (*iterate)(struct file *, struct dir_context *); ++ int (*iterate_shared)(struct file *, struct dir_context *); ++ __poll_t (*poll)(struct file *, struct poll_table_struct *); ++ long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); ++ long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); ++ int (*mmap)(struct file *, struct vm_area_struct *); ++ long unsigned int mmap_supported_flags; ++ int (*open)(struct inode *, struct file *); ++ int (*flush)(struct file *, fl_owner_t); ++ int (*release)(struct inode *, struct file *); ++ int (*fsync)(struct file *, loff_t, loff_t, int); ++ int (*fasync)(int, struct file *, int); ++ int (*lock)(struct file *, int, struct file_lock *); ++ ssize_t (*sendpage)(struct file *, struct page *, int, size_t, loff_t *, int); ++ long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); ++ int (*check_flags)(int); ++ int (*flock)(struct file *, int, struct file_lock *); ++ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ++ ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); ++ int (*setlease)(struct file *, long int, struct file_lock **, void **); ++ long int (*fallocate)(struct file *, int, loff_t, loff_t); ++ void (*show_fdinfo)(struct seq_file *, struct file *); ++ ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); ++ int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64); ++ int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); ++ int (*fadvise)(struct file *, loff_t, loff_t, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct notifier_block; ++ ++struct atomic_notifier_head { ++ spinlock_t lock; ++ struct notifier_block *head; ++}; ++ ++enum system_states { ++ SYSTEM_BOOTING = 0, ++ SYSTEM_SCHEDULING = 1, ++ SYSTEM_RUNNING = 2, ++ SYSTEM_HALT = 3, ++ SYSTEM_POWER_OFF = 4, ++ SYSTEM_RESTART = 5, ++ SYSTEM_SUSPEND = 6, ++}; ++ ++struct taint_flag { ++ char c_true; ++ char c_false; ++ bool module; ++}; ++ ++struct bug_entry { ++ int bug_addr_disp; ++ int file_disp; ++ short unsigned int line; ++ short unsigned int flags; ++}; ++ ++struct cpumask { ++ long unsigned int bits[16]; ++}; ++ ++typedef struct cpumask cpumask_t; ++ ++typedef struct cpumask cpumask_var_t[1]; ++ ++typedef __s64 time64_t; ++ ++struct timespec { ++ __kernel_time_t tv_sec; ++ long int tv_nsec; ++}; ++ ++struct timezone { ++ int tz_minuteswest; ++ int tz_dsttime; ++}; ++ ++struct timespec64 { ++ time64_t tv_sec; ++ long int tv_nsec; ++}; ++ ++enum timespec_type { ++ TT_NONE = 0, ++ TT_NATIVE = 1, ++ TT_COMPAT = 2, ++}; ++ ++typedef s32 compat_time_t; ++ ++struct compat_timespec { ++ compat_time_t tv_sec; ++ s32 tv_nsec; ++}; ++ ++struct pollfd { ++ int fd; ++ short int events; ++ short int revents; ++}; ++ ++struct restart_block { ++ long int (*fn)(struct restart_block *); ++ union { ++ struct { ++ u32 *uaddr; ++ u32 val; ++ u32 flags; ++ u32 bitset; ++ u64 time; ++ u32 *uaddr2; ++ } futex; ++ struct { ++ clockid_t clockid; ++ enum timespec_type type; ++ union { ++ struct timespec *rmtp; ++ struct compat_timespec *compat_rmtp; ++ }; ++ u64 expires; ++ } nanosleep; ++ struct { ++ struct pollfd *ufds; ++ int nfds; ++ int has_timeout; ++ long unsigned int tv_sec; ++ long unsigned int tv_nsec; ++ } poll; ++ }; ++}; ++ ++typedef long unsigned int mm_segment_t; ++ ++struct thread_info { ++ long unsigned int flags; ++ mm_segment_t addr_limit; ++ int preempt_count; ++}; ++ ++struct llist_node { ++ struct llist_node *next; ++}; ++ ++struct load_weight { ++ long unsigned int weight; ++ u32 inv_weight; ++}; ++ ++struct sched_statistics { ++ u64 wait_start; ++ u64 wait_max; ++ u64 wait_count; ++ u64 wait_sum; ++ u64 iowait_count; ++ u64 iowait_sum; ++ u64 sleep_start; ++ u64 sleep_max; ++ s64 sum_sleep_runtime; ++ u64 block_start; ++ u64 block_max; ++ u64 exec_max; ++ u64 slice_max; ++ u64 nr_migrations_cold; ++ u64 nr_failed_migrations_affine; ++ u64 nr_failed_migrations_running; ++ u64 nr_failed_migrations_hot; ++ u64 nr_forced_migrations; ++ u64 nr_wakeups; ++ u64 nr_wakeups_sync; ++ u64 nr_wakeups_migrate; ++ u64 nr_wakeups_local; ++ u64 nr_wakeups_remote; ++ u64 nr_wakeups_affine; ++ u64 nr_wakeups_affine_attempts; ++ u64 nr_wakeups_passive; ++ u64 nr_wakeups_idle; ++}; ++ ++struct util_est { ++ unsigned int enqueued; ++ unsigned int ewma; ++}; ++ ++struct sched_avg { ++ u64 last_update_time; ++ u64 load_sum; ++ u64 runnable_load_sum; ++ u32 util_sum; ++ u32 period_contrib; ++ long unsigned int load_avg; ++ long unsigned int runnable_load_avg; ++ long unsigned int util_avg; ++ struct util_est util_est; ++}; ++ ++struct cfs_rq; ++ ++struct sched_entity { ++ struct load_weight load; ++ long unsigned int runnable_weight; ++ struct rb_node run_node; ++ struct list_head group_node; ++ unsigned int on_rq; ++ u64 exec_start; ++ u64 sum_exec_runtime; ++ u64 vruntime; ++ u64 prev_sum_exec_runtime; ++ u64 nr_migrations; ++ struct sched_statistics statistics; ++ int depth; ++ struct sched_entity *parent; ++ struct cfs_rq *cfs_rq; ++ struct cfs_rq *my_q; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sched_avg avg; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct rt_rq; ++ ++struct sched_rt_entity { ++ struct list_head run_list; ++ long unsigned int timeout; ++ long unsigned int watchdog_stamp; ++ unsigned int time_slice; ++ short unsigned int on_rq; ++ short unsigned int on_list; ++ struct sched_rt_entity *back; ++ struct sched_rt_entity *parent; ++ struct rt_rq *rt_rq; ++ struct rt_rq *my_q; ++}; ++ ++typedef s64 ktime_t; ++ ++struct timerqueue_node { ++ struct rb_node node; ++ ktime_t expires; ++}; ++ ++enum hrtimer_restart { ++ HRTIMER_NORESTART = 0, ++ HRTIMER_RESTART = 1, ++}; ++ ++struct hrtimer_clock_base; ++ ++struct hrtimer { ++ struct timerqueue_node node; ++ ktime_t _softexpires; ++ enum hrtimer_restart (*function)(struct hrtimer *); ++ struct hrtimer_clock_base *base; ++ u8 state; ++ u8 is_rel; ++ u8 is_soft; ++}; ++ ++struct sched_dl_entity { ++ struct rb_node rb_node; ++ u64 dl_runtime; ++ u64 dl_deadline; ++ u64 dl_period; ++ u64 dl_bw; ++ u64 dl_density; ++ s64 runtime; ++ u64 deadline; ++ unsigned int flags; ++ unsigned int dl_throttled: 1; ++ unsigned int dl_boosted: 1; ++ unsigned int dl_yielded: 1; ++ unsigned int dl_non_contending: 1; ++ unsigned int dl_overrun: 1; ++ struct hrtimer dl_timer; ++ struct hrtimer inactive_timer; ++}; ++ ++struct sched_info { ++ long unsigned int pcount; ++ long long unsigned int run_delay; ++ long long unsigned int last_arrival; ++ long long unsigned int last_queued; ++}; ++ ++struct plist_node { ++ int prio; ++ struct list_head prio_list; ++ struct list_head node_list; ++}; ++ ++struct vmacache { ++ u64 seqnum; ++ struct vm_area_struct *vmas[4]; ++}; ++ ++struct task_rss_stat { ++ int events; ++ int count[4]; ++}; ++ ++typedef struct raw_spinlock raw_spinlock_t; ++ ++struct prev_cputime { ++ u64 utime; ++ u64 stime; ++ raw_spinlock_t lock; ++}; ++ ++struct seqcount { ++ unsigned int sequence; ++}; ++ ++typedef struct seqcount seqcount_t; ++ ++enum vtime_state { ++ VTIME_INACTIVE = 0, ++ VTIME_USER = 1, ++ VTIME_SYS = 2, ++}; ++ ++struct vtime { ++ seqcount_t seqcount; ++ long long unsigned int starttime; ++ enum vtime_state state; ++ u64 utime; ++ u64 stime; ++ u64 gtime; ++}; ++ ++struct task_cputime { ++ u64 utime; ++ u64 stime; ++ long long unsigned int sum_exec_runtime; ++}; ++ ++struct sem_undo_list; ++ ++struct sysv_sem { ++ struct sem_undo_list *undo_list; ++}; ++ ++struct sysv_shm { ++ struct list_head shm_clist; ++}; ++ ++typedef struct { ++ long unsigned int sig[1]; ++} sigset_t; ++ ++struct sigpending { ++ struct list_head list; ++ sigset_t signal; ++}; ++ ++typedef struct { ++ uid_t val; ++} kuid_t; ++ ++struct seccomp_filter; ++ ++struct seccomp { ++ int mode; ++ struct seccomp_filter *filter; ++}; ++ ++struct wake_q_node { ++ struct wake_q_node *next; ++}; ++ ++struct rb_root { ++ struct rb_node *rb_node; ++}; ++ ++struct rb_root_cached { ++ struct rb_root rb_root; ++ struct rb_node *rb_leftmost; ++}; ++ ++struct task_io_accounting { ++ u64 rchar; ++ u64 wchar; ++ u64 syscr; ++ u64 syscw; ++ u64 read_bytes; ++ u64 write_bytes; ++ u64 cancelled_write_bytes; ++}; ++ ++typedef struct { ++ long unsigned int bits[1]; ++} nodemask_t; ++ ++struct tlbflush_unmap_batch {}; ++ ++struct page_frag { ++ struct page *page; ++ __u32 offset; ++ __u32 size; ++}; ++ ++struct cpu_context { ++ long unsigned int x19; ++ long unsigned int x20; ++ long unsigned int x21; ++ long unsigned int x22; ++ long unsigned int x23; ++ long unsigned int x24; ++ long unsigned int x25; ++ long unsigned int x26; ++ long unsigned int x27; ++ long unsigned int x28; ++ long unsigned int fp; ++ long unsigned int sp; ++ long unsigned int pc; ++}; ++ ++struct user_fpsimd_state { ++ __int128 unsigned vregs[32]; ++ __u32 fpsr; ++ __u32 fpcr; ++ __u32 __reserved[2]; ++}; ++ ++struct perf_event; ++ ++struct debug_info { ++ int suspended_step; ++ int bps_disabled; ++ int wps_disabled; ++ struct perf_event *hbp_break[16]; ++ struct perf_event *hbp_watch[16]; ++}; ++ ++struct thread_struct { ++ struct cpu_context cpu_context; ++ long: 64; ++ struct { ++ long unsigned int tp_value; ++ long unsigned int tp2_value; ++ struct user_fpsimd_state fpsimd_state; ++ } uw; ++ unsigned int fpsimd_cpu; ++ void *sve_state; ++ unsigned int sve_vl; ++ unsigned int sve_vl_onexec; ++ long unsigned int fault_address; ++ long unsigned int fault_code; ++ struct debug_info debug; ++ long: 64; ++}; ++ ++struct sched_class; ++ ++struct task_group; ++ ++struct mm_struct; ++ ++struct pid; ++ ++struct cred; ++ ++struct nameidata; ++ ++struct fs_struct; ++ ++struct files_struct; ++ ++struct nsproxy; ++ ++struct signal_struct; ++ ++struct sighand_struct; ++ ++struct audit_context; ++ ++struct rt_mutex_waiter; ++ ++struct bio_list; ++ ++struct blk_plug; ++ ++struct reclaim_state; ++ ++struct backing_dev_info; ++ ++struct io_context; ++ ++struct siginfo; ++ ++typedef struct siginfo siginfo_t; ++ ++struct css_set; ++ ++struct robust_list_head; ++ ++struct compat_robust_list_head; ++ ++struct futex_pi_state; ++ ++struct perf_event_context; ++ ++struct mempolicy; ++ ++struct numa_group; ++ ++struct rseq; ++ ++struct task_delay_info; ++ ++struct ftrace_ret_stack; ++ ++struct mem_cgroup; ++ ++struct request_queue; ++ ++struct uprobe_task; ++ ++struct vm_struct; ++ ++struct task_struct { ++ struct thread_info thread_info; ++ volatile long int state; ++ void *stack; ++ atomic_t usage; ++ unsigned int flags; ++ unsigned int ptrace; ++ struct llist_node wake_entry; ++ int on_cpu; ++ unsigned int cpu; ++ unsigned int wakee_flips; ++ long unsigned int wakee_flip_decay_ts; ++ struct task_struct *last_wakee; ++ int recent_used_cpu; ++ int wake_cpu; ++ int on_rq; ++ int prio; ++ int static_prio; ++ int normal_prio; ++ unsigned int rt_priority; ++ const struct sched_class *sched_class; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sched_entity se; ++ struct sched_rt_entity rt; ++ struct task_group *sched_task_group; ++ struct sched_dl_entity dl; ++ struct hlist_head preempt_notifiers; ++ unsigned int btrace_seq; ++ unsigned int policy; ++ int nr_cpus_allowed; ++ cpumask_t cpus_allowed; ++ struct sched_info sched_info; ++ struct list_head tasks; ++ struct plist_node pushable_tasks; ++ struct rb_node pushable_dl_tasks; ++ struct mm_struct *mm; ++ struct mm_struct *active_mm; ++ struct vmacache vmacache; ++ struct task_rss_stat rss_stat; ++ int exit_state; ++ int exit_code; ++ int exit_signal; ++ int pdeath_signal; ++ long unsigned int jobctl; ++ unsigned int personality; ++ unsigned int sched_reset_on_fork: 1; ++ unsigned int sched_contributes_to_load: 1; ++ unsigned int sched_migrated: 1; ++ unsigned int sched_remote_wakeup: 1; ++ int: 28; ++ unsigned int in_execve: 1; ++ unsigned int in_iowait: 1; ++ unsigned int in_user_fault: 1; ++ unsigned int memcg_kmem_skip_account: 1; ++ unsigned int no_cgroup_migration: 1; ++ unsigned int use_memdelay: 1; ++ long unsigned int atomic_flags; ++ struct restart_block restart_block; ++ pid_t pid; ++ pid_t tgid; ++ long unsigned int stack_canary; ++ struct task_struct *real_parent; ++ struct task_struct *parent; ++ struct list_head children; ++ struct list_head sibling; ++ struct task_struct *group_leader; ++ struct list_head ptraced; ++ struct list_head ptrace_entry; ++ struct pid *thread_pid; ++ struct hlist_node pid_links[4]; ++ struct list_head thread_group; ++ struct list_head thread_node; ++ struct completion *vfork_done; ++ int *set_child_tid; ++ int *clear_child_tid; ++ u64 utime; ++ u64 stime; ++ u64 gtime; ++ struct prev_cputime prev_cputime; ++ struct vtime vtime; ++ atomic_t tick_dep_mask; ++ long unsigned int nvcsw; ++ long unsigned int nivcsw; ++ u64 start_time; ++ u64 real_start_time; ++ long unsigned int min_flt; ++ long unsigned int maj_flt; ++ struct task_cputime cputime_expires; ++ struct list_head cpu_timers[3]; ++ const struct cred *ptracer_cred; ++ const struct cred *real_cred; ++ const struct cred *cred; ++ char comm[16]; ++ struct nameidata *nameidata; ++ struct sysv_sem sysvsem; ++ struct sysv_shm sysvshm; ++ long unsigned int last_switch_count; ++ long unsigned int last_switch_time; ++ struct fs_struct *fs; ++ struct files_struct *files; ++ struct nsproxy *nsproxy; ++ struct signal_struct *signal; ++ struct sighand_struct *sighand; ++ sigset_t blocked; ++ sigset_t real_blocked; ++ sigset_t saved_sigmask; ++ struct sigpending pending; ++ long unsigned int sas_ss_sp; ++ size_t sas_ss_size; ++ unsigned int sas_ss_flags; ++ struct callback_head *task_works; ++ struct audit_context *audit_context; ++ kuid_t loginuid; ++ unsigned int sessionid; ++ struct seccomp seccomp; ++ u32 parent_exec_id; ++ u32 self_exec_id; ++ spinlock_t alloc_lock; ++ raw_spinlock_t pi_lock; ++ struct wake_q_node wake_q; ++ struct rb_root_cached pi_waiters; ++ struct task_struct *pi_top_task; ++ struct rt_mutex_waiter *pi_blocked_on; ++ void *journal_info; ++ struct bio_list *bio_list; ++ struct blk_plug *plug; ++ struct reclaim_state *reclaim_state; ++ struct backing_dev_info *backing_dev_info; ++ struct io_context *io_context; ++ long unsigned int ptrace_message; ++ siginfo_t *last_siginfo; ++ struct task_io_accounting ioac; ++ u64 acct_rss_mem1; ++ u64 acct_vm_mem1; ++ u64 acct_timexpd; ++ nodemask_t mems_allowed; ++ seqcount_t mems_allowed_seq; ++ int cpuset_mem_spread_rotor; ++ int cpuset_slab_spread_rotor; ++ struct css_set *cgroups; ++ struct list_head cg_list; ++ u32 closid; ++ u32 rmid; ++ struct robust_list_head *robust_list; ++ struct compat_robust_list_head *compat_robust_list; ++ struct list_head pi_state_list; ++ struct futex_pi_state *pi_state_cache; ++ struct perf_event_context *perf_event_ctxp[2]; ++ struct mutex perf_event_mutex; ++ struct list_head perf_event_list; ++ struct mempolicy *mempolicy; ++ short int il_prev; ++ short int pref_node_fork; ++ int numa_scan_seq; ++ unsigned int numa_scan_period; ++ unsigned int numa_scan_period_max; ++ int numa_preferred_nid; ++ long unsigned int numa_migrate_retry; ++ u64 node_stamp; ++ u64 last_task_numa_placement; ++ u64 last_sum_exec_runtime; ++ struct callback_head numa_work; ++ struct numa_group *numa_group; ++ long unsigned int *numa_faults; ++ long unsigned int total_numa_faults; ++ long unsigned int numa_faults_locality[3]; ++ long unsigned int numa_pages_migrated; ++ struct rseq *rseq; ++ u32 rseq_len; ++ u32 rseq_sig; ++ long unsigned int rseq_event_mask; ++ struct tlbflush_unmap_batch tlb_ubc; ++ struct callback_head rcu; ++ struct pipe_inode_info *splice_pipe; ++ struct page_frag task_frag; ++ struct task_delay_info *delays; ++ int nr_dirtied; ++ int nr_dirtied_pause; ++ long unsigned int dirty_paused_when; ++ u64 timer_slack_ns; ++ u64 default_timer_slack_ns; ++ int curr_ret_stack; ++ int curr_ret_depth; ++ struct ftrace_ret_stack *ret_stack; ++ long long unsigned int ftrace_timestamp; ++ atomic_t trace_overrun; ++ atomic_t tracing_graph_pause; ++ long unsigned int trace; ++ long unsigned int trace_recursion; ++ struct mem_cgroup *memcg_in_oom; ++ gfp_t memcg_oom_gfp_mask; ++ int memcg_oom_order; ++ unsigned int memcg_nr_pages_over_high; ++ struct mem_cgroup *active_memcg; ++ struct request_queue *throttle_queue; ++ struct uprobe_task *utask; ++ unsigned int sequential_io; ++ unsigned int sequential_io_avg; ++ int pagefault_disabled; ++ struct task_struct *oom_reaper_list; ++ struct vm_struct *stack_vm_area; ++ atomic_t stack_refcount; ++ int patch_state; ++ void *security; ++ u64 parent_exec_id_u64; ++ u64 self_exec_id_u64; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ struct thread_struct thread; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct lock_class_key {}; ++ ++typedef void (*smp_call_func_t)(void *); ++ ++struct __call_single_data { ++ struct llist_node llist; ++ smp_call_func_t func; ++ void *info; ++ unsigned int flags; ++}; ++ ++typedef struct __call_single_data call_single_data_t; ++ ++struct secondary_data { ++ void *stack; ++ struct task_struct *task; ++ long int status; ++}; ++ ++enum pcpu_fc { ++ PCPU_FC_AUTO = 0, ++ PCPU_FC_EMBED = 1, ++ PCPU_FC_PAGE = 2, ++ PCPU_FC_NR = 3, ++}; ++ ++enum ctx_state { ++ CONTEXT_DISABLED = 4294967295, ++ CONTEXT_KERNEL = 0, ++ CONTEXT_USER = 1, ++ CONTEXT_GUEST = 2, ++}; ++ ++struct context_tracking { ++ bool active; ++ int recursion; ++ enum ctx_state state; ++}; ++ ++typedef struct { ++ unsigned int __softirq_pending; ++ unsigned int ipi_irqs[8]; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++} irq_cpustat_t; ++ ++struct nmi_ctx { ++ u64 hcr; ++}; ++ ++enum ftr_type { ++ FTR_EXACT = 0, ++ FTR_LOWER_SAFE = 1, ++ FTR_HIGHER_SAFE = 2, ++ FTR_HIGHER_OR_ZERO_SAFE = 3, ++}; ++ ++struct arm64_ftr_bits { ++ bool sign; ++ bool visible; ++ bool strict; ++ enum ftr_type type; ++ u8 shift; ++ u8 width; ++ s64 safe_val; ++}; ++ ++struct arm64_ftr_reg { ++ const char *name; ++ u64 strict_mask; ++ u64 user_mask; ++ u64 sys_val; ++ u64 user_val; ++ const struct arm64_ftr_bits *ftr_bits; ++}; ++ ++struct user_pt_regs { ++ __u64 regs[31]; ++ __u64 sp; ++ __u64 pc; ++ __u64 pstate; ++}; ++ ++#if defined(__TARGET_ARCH_arm64) ++struct pt_regs { ++ union { ++ struct user_pt_regs user_regs; ++ struct { ++ u64 regs[31]; ++ u64 sp; ++ u64 pc; ++ u64 pstate; ++ }; ++ }; ++ u64 orig_x0; ++ s32 syscallno; ++ u32 unused2; ++ u64 orig_addr_limit; ++ u64 pmr_save; ++ u64 stackframe[2]; ++}; ++#elif defined(__TARGET_ARCH_x86) ++struct pt_regs { ++ long unsigned int r15; ++ long unsigned int r14; ++ long unsigned int r13; ++ long unsigned int r12; ++ long unsigned int bp; ++ long unsigned int bx; ++ long unsigned int r11; ++ long unsigned int r10; ++ long unsigned int r9; ++ long unsigned int r8; ++ long unsigned int ax; ++ long unsigned int cx; ++ long unsigned int dx; ++ long unsigned int si; ++ long unsigned int di; ++ long unsigned int orig_ax; ++ long unsigned int ip; ++ long unsigned int cs; ++ long unsigned int flags; ++ long unsigned int sp; ++ long unsigned int ss; ++}; ++#else ++#endif ++ ++struct qrwlock { ++ union { ++ atomic_t cnts; ++ struct { ++ u8 wlocked; ++ u8 __lstate[3]; ++ }; ++ }; ++ arch_spinlock_t wait_lock; ++}; ++ ++typedef struct qrwlock arch_rwlock_t; ++ ++typedef struct { ++ arch_rwlock_t raw_lock; ++} rwlock_t; ++ ++typedef struct { ++ struct seqcount seqcount; ++ spinlock_t lock; ++} seqlock_t; ++ ++enum arch_timer_erratum_match_type { ++ ate_match_dt = 0, ++ ate_match_local_cap_id = 1, ++ ate_match_acpi_oem_info = 2, ++}; ++ ++struct clock_event_device; ++ ++struct arch_timer_erratum_workaround { ++ enum arch_timer_erratum_match_type match_type; ++ const void *id; ++ const char *desc; ++ u32 (*read_cntp_tval_el0)(); ++ u32 (*read_cntv_tval_el0)(); ++ u64 (*read_cntpct_el0)(); ++ u64 (*read_cntvct_el0)(); ++ int (*set_next_event_phys)(long unsigned int, struct clock_event_device *); ++ int (*set_next_event_virt)(long unsigned int, struct clock_event_device *); ++}; ++ ++struct timer_list { ++ struct hlist_node entry; ++ long unsigned int expires; ++ void (*function)(struct timer_list *); ++ u32 flags; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct timerqueue_head { ++ struct rb_root head; ++ struct timerqueue_node *next; ++}; ++ ++struct hrtimer_cpu_base; ++ ++struct hrtimer_clock_base { ++ struct hrtimer_cpu_base *cpu_base; ++ unsigned int index; ++ clockid_t clockid; ++ seqcount_t seq; ++ struct hrtimer *running; ++ struct timerqueue_head active; ++ ktime_t (*get_time)(); ++ ktime_t offset; ++}; ++ ++struct hrtimer_cpu_base { ++ raw_spinlock_t lock; ++ unsigned int cpu; ++ unsigned int active_bases; ++ unsigned int clock_was_set_seq; ++ unsigned int hres_active: 1; ++ unsigned int in_hrtirq: 1; ++ unsigned int hang_detected: 1; ++ unsigned int softirq_activated: 1; ++ unsigned int nr_events; ++ short unsigned int nr_retries; ++ short unsigned int nr_hangs; ++ unsigned int max_hang_time; ++ ktime_t expires_next; ++ struct hrtimer *next_timer; ++ ktime_t softirq_expires_next; ++ struct hrtimer *softirq_next_timer; ++ struct hrtimer_clock_base clock_base[8]; ++}; ++ ++struct tick_device; ++ ++struct work_struct; ++ ++typedef void (*work_func_t)(struct work_struct *); ++ ++struct work_struct { ++ atomic_long_t data; ++ struct list_head entry; ++ work_func_t func; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct workqueue_struct; ++ ++struct delayed_work { ++ struct work_struct work; ++ struct timer_list timer; ++ struct workqueue_struct *wq; ++ int cpu; ++ long unsigned int data; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct rcu_work { ++ struct work_struct work; ++ struct callback_head rcu; ++ struct workqueue_struct *wq; ++}; ++ ++struct iovec { ++ void *iov_base; ++ __kernel_size_t iov_len; ++}; ++ ++struct kvec { ++ void *iov_base; ++ size_t iov_len; ++}; ++ ++struct bio_vec { ++ struct page *bv_page; ++ unsigned int bv_len; ++ unsigned int bv_offset; ++}; ++ ++struct wait_queue_head { ++ spinlock_t lock; ++ struct list_head head; ++}; ++ ++typedef struct wait_queue_head wait_queue_head_t; ++ ++struct fasync_struct; ++ ++struct pipe_buffer; ++ ++struct user_struct; ++ ++struct pipe_inode_info { ++ struct mutex mutex; ++ wait_queue_head_t wait; ++ unsigned int nrbufs; ++ unsigned int curbuf; ++ unsigned int buffers; ++ unsigned int readers; ++ unsigned int writers; ++ unsigned int files; ++ unsigned int waiting_writers; ++ unsigned int r_counter; ++ unsigned int w_counter; ++ struct page *tmp_page; ++ struct fasync_struct *fasync_readers; ++ struct fasync_struct *fasync_writers; ++ struct pipe_buffer *bufs; ++ struct user_struct *user; ++}; ++ ++struct iov_iter { ++ int type; ++ size_t iov_offset; ++ size_t count; ++ union { ++ const struct iovec *iov; ++ const struct kvec *kvec; ++ const struct bio_vec *bvec; ++ struct pipe_inode_info *pipe; ++ }; ++ union { ++ long unsigned int nr_segs; ++ struct { ++ int idx; ++ int start_idx; ++ }; ++ }; ++}; ++ ++typedef short unsigned int __kernel_sa_family_t; ++ ++typedef __kernel_sa_family_t sa_family_t; ++ ++struct sockaddr { ++ sa_family_t sa_family; ++ char sa_data[14]; ++}; ++ ++struct msghdr { ++ void *msg_name; ++ int msg_namelen; ++ struct iov_iter msg_iter; ++ void *msg_control; ++ __kernel_size_t msg_controllen; ++ unsigned int msg_flags; ++ struct kiocb *msg_iocb; ++}; ++ ++struct kiocb { ++ struct file *ki_filp; ++ loff_t ki_pos; ++ void (*ki_complete)(struct kiocb *, long int, long int); ++ void *private; ++ int ki_flags; ++ u16 ki_hint; ++ u16 ki_ioprio; ++}; ++ ++enum { ++ IPPROTO_IP = 0, ++ IPPROTO_ICMP = 1, ++ IPPROTO_IGMP = 2, ++ IPPROTO_IPIP = 4, ++ IPPROTO_TCP = 6, ++ IPPROTO_EGP = 8, ++ IPPROTO_PUP = 12, ++ IPPROTO_UDP = 17, ++ IPPROTO_IDP = 22, ++ IPPROTO_TP = 29, ++ IPPROTO_DCCP = 33, ++ IPPROTO_IPV6 = 41, ++ IPPROTO_RSVP = 46, ++ IPPROTO_GRE = 47, ++ IPPROTO_ESP = 50, ++ IPPROTO_AH = 51, ++ IPPROTO_MTP = 92, ++ IPPROTO_BEETPH = 94, ++ IPPROTO_ENCAP = 98, ++ IPPROTO_PIM = 103, ++ IPPROTO_COMP = 108, ++ IPPROTO_SCTP = 132, ++ IPPROTO_UDPLITE = 136, ++ IPPROTO_MPLS = 137, ++ IPPROTO_RAW = 255, ++ IPPROTO_MAX = 256, ++}; ++ ++struct in_addr { ++ __be32 s_addr; ++}; ++ ++struct uid_gid_extent { ++ u32 first; ++ u32 lower_first; ++ u32 count; ++}; ++ ++struct uid_gid_map { ++ u32 nr_extents; ++ union { ++ struct uid_gid_extent extent[5]; ++ struct { ++ struct uid_gid_extent *forward; ++ struct uid_gid_extent *reverse; ++ }; ++ }; ++}; ++ ++typedef struct { ++ gid_t val; ++} kgid_t; ++ ++struct proc_ns_operations; ++ ++struct ns_common { ++ atomic_long_t stashed; ++ const struct proc_ns_operations *ops; ++ unsigned int inum; ++}; ++ ++struct rw_semaphore { ++ atomic_long_t count; ++ struct list_head wait_list; ++ raw_spinlock_t wait_lock; ++ struct optimistic_spin_queue osq; ++ struct task_struct *owner; ++}; ++ ++struct ctl_table; ++ ++struct ctl_table_root; ++ ++struct ctl_table_set; ++ ++struct ctl_dir; ++ ++struct ctl_node; ++ ++struct ctl_table_header { ++ union { ++ struct { ++ struct ctl_table *ctl_table; ++ int used; ++ int count; ++ int nreg; ++ }; ++ struct callback_head rcu; ++ }; ++ struct completion *unregistering; ++ struct ctl_table *ctl_table_arg; ++ struct ctl_table_root *root; ++ struct ctl_table_set *set; ++ struct ctl_dir *parent; ++ struct ctl_node *node; ++ struct hlist_head inodes; ++}; ++ ++struct ctl_dir { ++ struct ctl_table_header header; ++ struct rb_root root; ++}; ++ ++struct ctl_table_set { ++ int (*is_seen)(struct ctl_table_set *); ++ struct ctl_dir dir; ++}; ++ ++struct key; ++ ++struct ucounts; ++ ++struct user_namespace { ++ struct uid_gid_map uid_map; ++ struct uid_gid_map gid_map; ++ struct uid_gid_map projid_map; ++ atomic_t count; ++ struct user_namespace *parent; ++ int level; ++ kuid_t owner; ++ kgid_t group; ++ struct ns_common ns; ++ long unsigned int flags; ++ struct key *persistent_keyring_register; ++ struct rw_semaphore persistent_keyring_register_sem; ++ struct work_struct work; ++ struct ctl_table_set set; ++ struct ctl_table_header *sysctls; ++ struct ucounts *ucounts; ++ int ucount_max[9]; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); ++ ++struct ctl_table_poll; ++ ++struct ctl_table { ++ const char *procname; ++ void *data; ++ int maxlen; ++ umode_t mode; ++ struct ctl_table *child; ++ proc_handler *proc_handler; ++ struct ctl_table_poll *poll; ++ void *extra1; ++ void *extra2; ++}; ++ ++struct ctl_table_poll { ++ atomic_t event; ++ wait_queue_head_t wait; ++}; ++ ++struct ctl_node { ++ struct rb_node node; ++ struct ctl_table_header *header; ++}; ++ ++struct completion { ++ unsigned int done; ++ wait_queue_head_t wait; ++}; ++ ++struct ctl_table_root { ++ struct ctl_table_set default_set; ++ struct ctl_table_set * (*lookup)(struct ctl_table_root *); ++ void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); ++ int (*permissions)(struct ctl_table_header *, struct ctl_table *); ++}; ++ ++struct in6_addr { ++ union { ++ __u8 u6_addr8[16]; ++ __be16 u6_addr16[8]; ++ __be32 u6_addr32[4]; ++ } in6_u; ++}; ++ ++typedef struct { ++ u64 key[2]; ++} siphash_key_t; ++ ++struct ethhdr { ++ unsigned char h_dest[6]; ++ unsigned char h_source[6]; ++ __be16 h_proto; ++}; ++ ++struct flow_dissector { ++ unsigned int used_keys; ++ short unsigned int offset[24]; ++}; ++ ++struct flowi_tunnel { ++ __be64 tun_id; ++}; ++ ++struct flowi_common { ++ int flowic_oif; ++ int flowic_iif; ++ __u32 flowic_mark; ++ __u8 flowic_tos; ++ __u8 flowic_scope; ++ __u8 flowic_proto; ++ __u8 flowic_flags; ++ __u32 flowic_secid; ++ struct flowi_tunnel flowic_tun_key; ++ kuid_t flowic_uid; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++union flowi_uli { ++ struct { ++ __be16 dport; ++ __be16 sport; ++ } ports; ++ struct { ++ __u8 type; ++ __u8 code; ++ } icmpt; ++ struct { ++ __le16 dport; ++ __le16 sport; ++ } dnports; ++ __be32 spi; ++ __be32 gre_key; ++ struct { ++ __u8 type; ++ } mht; ++}; ++ ++struct flowi4 { ++ struct flowi_common __fl_common; ++ __be32 saddr; ++ __be32 daddr; ++ union flowi_uli uli; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct flowi6 { ++ struct flowi_common __fl_common; ++ struct in6_addr daddr; ++ struct in6_addr saddr; ++ __be32 flowlabel; ++ union flowi_uli uli; ++ __u32 mp_hash; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct flowidn { ++ struct flowi_common __fl_common; ++ __le16 daddr; ++ __le16 saddr; ++ union flowi_uli uli; ++}; ++ ++struct flowi { ++ union { ++ struct flowi_common __fl_common; ++ struct flowi4 ip4; ++ struct flowi6 ip6; ++ struct flowidn dn; ++ } u; ++}; ++ ++struct prot_inuse; ++ ++struct netns_core { ++ struct ctl_table_header *sysctl_hdr; ++ int sysctl_somaxconn; ++ int *sock_inuse; ++ struct prot_inuse *prot_inuse; ++}; ++ ++struct u64_stats_sync {}; ++ ++struct ipstats_mib { ++ u64 mibs[37]; ++ struct u64_stats_sync syncp; ++}; ++ ++struct icmp_mib { ++ long unsigned int mibs[28]; ++}; ++ ++struct icmpmsg_mib { ++ atomic_long_t mibs[512]; ++}; ++ ++struct icmpv6_mib { ++ long unsigned int mibs[6]; ++}; ++ ++struct icmpv6_mib_device { ++ atomic_long_t mibs[6]; ++}; ++ ++struct icmpv6msg_mib { ++ atomic_long_t mibs[512]; ++}; ++ ++struct icmpv6msg_mib_device { ++ atomic_long_t mibs[512]; ++}; ++ ++struct tcp_mib { ++ long unsigned int mibs[16]; ++}; ++ ++struct udp_mib { ++ long unsigned int mibs[9]; ++}; ++ ++struct linux_mib { ++ long unsigned int mibs[119]; ++}; ++ ++struct linux_xfrm_mib { ++ long unsigned int mibs[29]; ++}; ++ ++struct proc_dir_entry; ++ ++struct netns_mib { ++ struct tcp_mib *tcp_statistics; ++ struct ipstats_mib *ip_statistics; ++ struct linux_mib *net_statistics; ++ struct udp_mib *udp_statistics; ++ struct udp_mib *udplite_statistics; ++ struct icmp_mib *icmp_statistics; ++ struct icmpmsg_mib *icmpmsg_statistics; ++ struct proc_dir_entry *proc_net_devsnmp6; ++ struct udp_mib *udp_stats_in6; ++ struct udp_mib *udplite_stats_in6; ++ struct ipstats_mib *ipv6_statistics; ++ struct icmpv6_mib *icmpv6_statistics; ++ struct icmpv6msg_mib *icmpv6msg_statistics; ++ struct linux_xfrm_mib *xfrm_statistics; ++}; ++ ++struct netns_unix { ++ int sysctl_max_dgram_qlen; ++ struct ctl_table_header *ctl; ++}; ++ ++struct netns_packet { ++ struct mutex sklist_lock; ++ struct hlist_head sklist; ++}; ++ ++struct rhash_head { ++ struct rhash_head *next; ++}; ++ ++struct rhashtable; ++ ++struct rhashtable_compare_arg { ++ struct rhashtable *ht; ++ const void *key; ++}; ++ ++typedef u32 (*rht_hashfn_t)(const void *, u32, u32); ++ ++typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); ++ ++typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); ++ ++struct rhashtable_params { ++ u16 nelem_hint; ++ u16 key_len; ++ u16 key_offset; ++ u16 head_offset; ++ unsigned int max_size; ++ u16 min_size; ++ bool automatic_shrinking; ++ u8 locks_mul; ++ rht_hashfn_t hashfn; ++ rht_obj_hashfn_t obj_hashfn; ++ rht_obj_cmpfn_t obj_cmpfn; ++}; ++ ++struct bucket_table; ++ ++struct rhashtable { ++ struct bucket_table *tbl; ++ unsigned int key_len; ++ unsigned int max_elems; ++ struct rhashtable_params p; ++ bool rhlist; ++ struct work_struct run_work; ++ struct mutex mutex; ++ spinlock_t lock; ++ atomic_t nelems; ++}; ++ ++struct inet_frags; ++ ++struct netns_frags { ++ long int high_thresh; ++ long int low_thresh; ++ int timeout; ++ int max_dist; ++ struct inet_frags *f; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct rhashtable rhashtable; ++ long: 64; ++ long: 64; ++ long: 64; ++ atomic_long_t mem; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct inet_frag_queue; ++ ++struct kmem_cache; ++ ++struct inet_frags { ++ unsigned int qsize; ++ void (*constructor)(struct inet_frag_queue *, const void *); ++ void (*destructor)(struct inet_frag_queue *); ++ void (*frag_expire)(struct timer_list *); ++ struct kmem_cache *frags_cachep; ++ const char *frags_cache_name; ++ struct rhashtable_params rhash_params; ++}; ++ ++struct frag_v4_compare_key { ++ __be32 saddr; ++ __be32 daddr; ++ u32 user; ++ u32 vif; ++ __be16 id; ++ u16 protocol; ++}; ++ ++struct frag_v6_compare_key { ++ struct in6_addr saddr; ++ struct in6_addr daddr; ++ u32 user; ++ __be32 id; ++ u32 iif; ++}; ++ ++struct sk_buff; ++ ++struct inet_frag_queue { ++ struct rhash_head node; ++ union { ++ struct frag_v4_compare_key v4; ++ struct frag_v6_compare_key v6; ++ } key; ++ struct timer_list timer; ++ spinlock_t lock; ++ refcount_t refcnt; ++ struct sk_buff *fragments; ++ struct rb_root rb_fragments; ++ struct sk_buff *fragments_tail; ++ struct sk_buff *last_run_head; ++ ktime_t stamp; ++ int len; ++ int meat; ++ __u8 flags; ++ u16 max_size; ++ struct netns_frags *net; ++ struct callback_head rcu; ++}; ++ ++typedef unsigned int sk_buff_data_t; ++ ++struct net_device; ++ ++struct sock; ++ ++struct sec_path; ++ ++struct nf_bridge_info; ++ ++struct sk_buff { ++ union { ++ struct { ++ struct sk_buff *next; ++ struct sk_buff *prev; ++ union { ++ struct net_device *dev; ++ long unsigned int dev_scratch; ++ }; ++ }; ++ struct rb_node rbnode; ++ struct list_head list; ++ }; ++ union { ++ struct sock *sk; ++ int ip_defrag_offset; ++ }; ++ union { ++ ktime_t tstamp; ++ u64 skb_mstamp; ++ }; ++ char cb[48]; ++ union { ++ struct { ++ long unsigned int _skb_refdst; ++ void (*destructor)(struct sk_buff *); ++ }; ++ struct list_head tcp_tsorted_anchor; ++ }; ++ struct sec_path *sp; ++ long unsigned int _nfct; ++ struct nf_bridge_info *nf_bridge; ++ unsigned int len; ++ unsigned int data_len; ++ __u16 mac_len; ++ __u16 hdr_len; ++ __u16 queue_mapping; ++ __u8 __cloned_offset[0]; ++ __u8 cloned: 1; ++ __u8 nohdr: 1; ++ __u8 fclone: 2; ++ __u8 peeked: 1; ++ __u8 head_frag: 1; ++ __u8 xmit_more: 1; ++ __u8 pfmemalloc: 1; ++ __u32 headers_start[0]; ++ __u8 __pkt_type_offset[0]; ++ __u8 pkt_type: 3; ++ __u8 ignore_df: 1; ++ __u8 nf_trace: 1; ++ __u8 ip_summed: 2; ++ __u8 ooo_okay: 1; ++ __u8 l4_hash: 1; ++ __u8 sw_hash: 1; ++ __u8 wifi_acked_valid: 1; ++ __u8 wifi_acked: 1; ++ __u8 no_fcs: 1; ++ __u8 encapsulation: 1; ++ __u8 encap_hdr_csum: 1; ++ __u8 csum_valid: 1; ++ __u8 csum_complete_sw: 1; ++ __u8 csum_level: 2; ++ __u8 csum_not_inet: 1; ++ __u8 dst_pending_confirm: 1; ++ __u8 ndisc_nodetype: 2; ++ __u8 ipvs_property: 1; ++ __u8 inner_protocol_type: 1; ++ __u8 remcsum_offload: 1; ++ __u8 offload_fwd_mark: 1; ++ __u8 offload_mr_fwd_mark: 1; ++ __u8 tc_skip_classify: 1; ++ __u8 tc_at_ingress: 1; ++ __u8 tc_redirected: 1; ++ __u8 tc_from_ingress: 1; ++ __u8 decrypted: 1; ++ __u16 tc_index; ++ union { ++ __wsum csum; ++ struct { ++ __u16 csum_start; ++ __u16 csum_offset; ++ }; ++ }; ++ __u32 priority; ++ int skb_iif; ++ __u32 hash; ++ __be16 vlan_proto; ++ __u16 vlan_tci; ++ union { ++ unsigned int napi_id; ++ unsigned int sender_cpu; ++ }; ++ __u32 secmark; ++ union { ++ __u32 mark; ++ __u32 reserved_tailroom; ++ }; ++ union { ++ __be16 inner_protocol; ++ __u8 inner_ipproto; ++ }; ++ __u16 inner_transport_header; ++ __u16 inner_network_header; ++ __u16 inner_mac_header; ++ __be16 protocol; ++ __u16 transport_header; ++ __u16 network_header; ++ __u16 mac_header; ++ __u32 headers_end[0]; ++ sk_buff_data_t tail; ++ sk_buff_data_t end; ++ unsigned char *head; ++ unsigned char *data; ++ unsigned int truesize; ++ refcount_t users; ++}; ++ ++struct local_ports { ++ seqlock_t lock; ++ int range[2]; ++ bool warned; ++}; ++ ++struct ping_group_range { ++ seqlock_t lock; ++ kgid_t range[2]; ++}; ++ ++struct inet_hashinfo; ++ ++struct inet_timewait_death_row { ++ atomic_t tw_count; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct inet_hashinfo *hashinfo; ++ int sysctl_max_tw_buckets; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct ipv4_devconf; ++ ++struct ip_ra_chain; ++ ++struct fib_rules_ops; ++ ++struct fib_table; ++ ++struct inet_peer_base; ++ ++struct xt_table; ++ ++struct tcp_congestion_ops; ++ ++struct tcp_fastopen_context; ++ ++struct fib_notifier_ops; ++ ++struct netns_ipv4 { ++ struct ctl_table_header *forw_hdr; ++ struct ctl_table_header *frags_hdr; ++ struct ctl_table_header *ipv4_hdr; ++ struct ctl_table_header *route_hdr; ++ struct ctl_table_header *xfrm4_hdr; ++ struct ipv4_devconf *devconf_all; ++ struct ipv4_devconf *devconf_dflt; ++ struct ip_ra_chain *ra_chain; ++ struct mutex ra_mutex; ++ struct fib_rules_ops *rules_ops; ++ bool fib_has_custom_rules; ++ unsigned int fib_rules_require_fldissect; ++ struct fib_table *fib_main; ++ struct fib_table *fib_default; ++ bool fib_has_custom_local_routes; ++ int fib_num_tclassid_users; ++ struct hlist_head *fib_table_hash; ++ bool fib_offload_disabled; ++ struct sock *fibnl; ++ struct sock **icmp_sk; ++ struct sock *mc_autojoin_sk; ++ struct inet_peer_base *peers; ++ struct sock **tcp_sk; ++ struct netns_frags frags; ++ struct xt_table *iptable_filter; ++ struct xt_table *iptable_mangle; ++ struct xt_table *iptable_raw; ++ struct xt_table *arptable_filter; ++ struct xt_table *iptable_security; ++ struct xt_table *nat_table; ++ int sysctl_icmp_echo_ignore_all; ++ int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_ignore_bogus_error_responses; ++ int sysctl_icmp_ratelimit; ++ int sysctl_icmp_ratemask; ++ int sysctl_icmp_errors_use_inbound_ifaddr; ++ struct local_ports ip_local_ports; ++ int sysctl_tcp_ecn; ++ int sysctl_tcp_ecn_fallback; ++ int sysctl_ip_default_ttl; ++ int sysctl_ip_no_pmtu_disc; ++ int sysctl_ip_fwd_use_pmtu; ++ int sysctl_ip_fwd_update_priority; ++ int sysctl_ip_nonlocal_bind; ++ int sysctl_ip_dynaddr; ++ int sysctl_ip_early_demux; ++ int sysctl_raw_l3mdev_accept; ++ int sysctl_tcp_early_demux; ++ int sysctl_udp_early_demux; ++ int sysctl_fwmark_reflect; ++ int sysctl_tcp_fwmark_accept; ++ int sysctl_tcp_l3mdev_accept; ++ int sysctl_tcp_mtu_probing; ++ int sysctl_tcp_base_mss; ++ int sysctl_tcp_min_snd_mss; ++ int sysctl_tcp_probe_threshold; ++ u32 sysctl_tcp_probe_interval; ++ int sysctl_tcp_keepalive_time; ++ int sysctl_tcp_keepalive_probes; ++ int sysctl_tcp_keepalive_intvl; ++ int sysctl_tcp_syn_retries; ++ int sysctl_tcp_synack_retries; ++ int sysctl_tcp_syncookies; ++ int sysctl_tcp_reordering; ++ int sysctl_tcp_retries1; ++ int sysctl_tcp_retries2; ++ int sysctl_tcp_orphan_retries; ++ int sysctl_tcp_fin_timeout; ++ unsigned int sysctl_tcp_notsent_lowat; ++ int sysctl_tcp_tw_reuse; ++ int sysctl_tcp_sack; ++ int sysctl_tcp_window_scaling; ++ int sysctl_tcp_timestamps; ++ int sysctl_tcp_early_retrans; ++ int sysctl_tcp_recovery; ++ int sysctl_tcp_thin_linear_timeouts; ++ int sysctl_tcp_slow_start_after_idle; ++ int sysctl_tcp_retrans_collapse; ++ int sysctl_tcp_stdurg; ++ int sysctl_tcp_rfc1337; ++ int sysctl_tcp_abort_on_overflow; ++ int sysctl_tcp_fack; ++ int sysctl_tcp_max_reordering; ++ int sysctl_tcp_dsack; ++ int sysctl_tcp_app_win; ++ int sysctl_tcp_adv_win_scale; ++ int sysctl_tcp_frto; ++ int sysctl_tcp_nometrics_save; ++ int sysctl_tcp_moderate_rcvbuf; ++ int sysctl_tcp_tso_win_divisor; ++ int sysctl_tcp_workaround_signed_windows; ++ int sysctl_tcp_limit_output_bytes; ++ int sysctl_tcp_challenge_ack_limit; ++ int sysctl_tcp_min_tso_segs; ++ int sysctl_tcp_min_rtt_wlen; ++ int sysctl_tcp_autocorking; ++ int sysctl_tcp_invalid_ratelimit; ++ int sysctl_tcp_pacing_ss_ratio; ++ int sysctl_tcp_pacing_ca_ratio; ++ int sysctl_tcp_wmem[3]; ++ int sysctl_tcp_rmem[3]; ++ int sysctl_tcp_comp_sack_nr; ++ long unsigned int sysctl_tcp_comp_sack_delay_ns; ++ long: 64; ++ struct inet_timewait_death_row tcp_death_row; ++ int sysctl_max_syn_backlog; ++ int sysctl_tcp_fastopen; ++ const struct tcp_congestion_ops *tcp_congestion_control; ++ struct tcp_fastopen_context *tcp_fastopen_ctx; ++ spinlock_t tcp_fastopen_ctx_lock; ++ unsigned int sysctl_tcp_fastopen_blackhole_timeout; ++ atomic_t tfo_active_disable_times; ++ long unsigned int tfo_active_disable_stamp; ++ int sysctl_udp_wmem_min; ++ int sysctl_udp_rmem_min; ++ int sysctl_udp_l3mdev_accept; ++ int sysctl_igmp_max_memberships; ++ int sysctl_igmp_max_msf; ++ int sysctl_igmp_llm_reports; ++ int sysctl_igmp_qrv; ++ struct ping_group_range ping_group_range; ++ atomic_t dev_addr_genid; ++ long unsigned int *sysctl_local_reserved_ports; ++ int sysctl_ip_prot_sock; ++ struct list_head mr_tables; ++ struct fib_rules_ops *mr_rules_ops; ++ int sysctl_fib_multipath_use_neigh; ++ int sysctl_fib_multipath_hash_policy; ++ struct fib_notifier_ops *notifier_ops; ++ unsigned int fib_seq; ++ struct fib_notifier_ops *ipmr_notifier_ops; ++ unsigned int ipmr_seq; ++ atomic_t rt_genid; ++ siphash_key_t ip_id_key; ++}; ++ ++struct fib_rule; ++ ++struct fib_lookup_arg; ++ ++struct fib_rule_hdr; ++ ++struct nlattr; ++ ++struct netlink_ext_ack; ++ ++struct nla_policy; ++ ++struct net; ++ ++struct fib_rules_ops { ++ int family; ++ struct list_head list; ++ int rule_size; ++ int addr_size; ++ int unresolved_rules; ++ int nr_goto_rules; ++ unsigned int fib_rules_seq; ++ int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); ++ bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); ++ int (*match)(struct fib_rule *, struct flowi *, int); ++ int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **, struct netlink_ext_ack *); ++ int (*delete)(struct fib_rule *); ++ int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); ++ int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); ++ size_t (*nlmsg_payload)(struct fib_rule *); ++ void (*flush_cache)(struct fib_rules_ops *); ++ int nlgroup; ++ const struct nla_policy *policy; ++ struct list_head rules_list; ++ struct module *owner; ++ struct net *fro_net; ++ struct callback_head rcu; ++}; ++ ++struct fib_table { ++ struct hlist_node tb_hlist; ++ u32 tb_id; ++ int tb_num_default; ++ struct callback_head rcu; ++ long unsigned int *tb_data; ++ long unsigned int __data[0]; ++}; ++ ++typedef __u64 __addrpair; ++ ++typedef __u32 __portpair; ++ ++typedef struct { ++ struct net *net; ++} possible_net_t; ++ ++struct hlist_nulls_node { ++ struct hlist_nulls_node *next; ++ struct hlist_nulls_node **pprev; ++}; ++ ++struct proto; ++ ++struct sock_common { ++ union { ++ __addrpair skc_addrpair; ++ struct { ++ __be32 skc_daddr; ++ __be32 skc_rcv_saddr; ++ }; ++ }; ++ union { ++ unsigned int skc_hash; ++ __u16 skc_u16hashes[2]; ++ }; ++ union { ++ __portpair skc_portpair; ++ struct { ++ __be16 skc_dport; ++ __u16 skc_num; ++ }; ++ }; ++ short unsigned int skc_family; ++ volatile unsigned char skc_state; ++ unsigned char skc_reuse: 4; ++ unsigned char skc_reuseport: 1; ++ unsigned char skc_ipv6only: 1; ++ unsigned char skc_net_refcnt: 1; ++ int skc_bound_dev_if; ++ union { ++ struct hlist_node skc_bind_node; ++ struct hlist_node skc_portaddr_node; ++ }; ++ struct proto *skc_prot; ++ possible_net_t skc_net; ++ struct in6_addr skc_v6_daddr; ++ struct in6_addr skc_v6_rcv_saddr; ++ atomic64_t skc_cookie; ++ union { ++ long unsigned int skc_flags; ++ struct sock *skc_listener; ++ struct inet_timewait_death_row *skc_tw_dr; ++ }; ++ int skc_dontcopy_begin[0]; ++ union { ++ struct hlist_node skc_node; ++ struct hlist_nulls_node skc_nulls_node; ++ }; ++ short unsigned int skc_tx_queue_mapping; ++ short unsigned int skc_rx_queue_mapping; ++ union { ++ int skc_incoming_cpu; ++ u32 skc_rcv_wnd; ++ u32 skc_tw_rcv_nxt; ++ }; ++ refcount_t skc_refcnt; ++ int skc_dontcopy_end[0]; ++ union { ++ u32 skc_rxhash; ++ u32 skc_window_clamp; ++ u32 skc_tw_snd_nxt; ++ }; ++}; ++ ++typedef struct { ++ spinlock_t slock; ++ int owned; ++ wait_queue_head_t wq; ++} socket_lock_t; ++ ++struct sk_buff_head { ++ struct sk_buff *next; ++ struct sk_buff *prev; ++ __u32 qlen; ++ spinlock_t lock; ++}; ++ ++typedef u64 netdev_features_t; ++ ++struct sock_cgroup_data { ++ union { ++ struct { ++ u8 is_data; ++ u8 padding; ++ u16 prioidx; ++ u32 classid; ++ }; ++ u64 val; ++ }; ++}; ++ ++struct sk_filter; ++ ++struct socket_wq; ++ ++struct xfrm_policy; ++ ++struct dst_entry; ++ ++struct socket; ++ ++struct sock_reuseport; ++ ++struct sock { ++ struct sock_common __sk_common; ++ socket_lock_t sk_lock; ++ atomic_t sk_drops; ++ int sk_rcvlowat; ++ struct sk_buff_head sk_error_queue; ++ struct sk_buff_head sk_receive_queue; ++ struct { ++ atomic_t rmem_alloc; ++ int len; ++ struct sk_buff *head; ++ struct sk_buff *tail; ++ } sk_backlog; ++ int sk_forward_alloc; ++ unsigned int sk_ll_usec; ++ unsigned int sk_napi_id; ++ int sk_rcvbuf; ++ struct sk_filter *sk_filter; ++ union { ++ struct socket_wq *sk_wq; ++ struct socket_wq *sk_wq_raw; ++ }; ++ struct xfrm_policy *sk_policy[2]; ++ struct dst_entry *sk_rx_dst; ++ struct dst_entry *sk_dst_cache; ++ atomic_t sk_omem_alloc; ++ int sk_sndbuf; ++ int sk_wmem_queued; ++ refcount_t sk_wmem_alloc; ++ long unsigned int sk_tsq_flags; ++ union { ++ struct sk_buff *sk_send_head; ++ struct rb_root tcp_rtx_queue; ++ }; ++ struct sk_buff_head sk_write_queue; ++ __s32 sk_peek_off; ++ int sk_write_pending; ++ __u32 sk_dst_pending_confirm; ++ u32 sk_pacing_status; ++ long int sk_sndtimeo; ++ struct timer_list sk_timer; ++ __u32 sk_priority; ++ __u32 sk_mark; ++ u32 sk_pacing_rate; ++ u32 sk_max_pacing_rate; ++ struct page_frag sk_frag; ++ netdev_features_t sk_route_caps; ++ netdev_features_t sk_route_nocaps; ++ netdev_features_t sk_route_forced_caps; ++ int sk_gso_type; ++ unsigned int sk_gso_max_size; ++ gfp_t sk_allocation; ++ __u32 sk_txhash; ++ unsigned int __sk_flags_offset[0]; ++ unsigned int sk_padding: 1; ++ unsigned int sk_kern_sock: 1; ++ unsigned int sk_no_check_tx: 1; ++ unsigned int sk_no_check_rx: 1; ++ unsigned int sk_userlocks: 4; ++ unsigned int sk_protocol: 8; ++ unsigned int sk_type: 16; ++ u16 sk_gso_max_segs; ++ u8 sk_pacing_shift; ++ long unsigned int sk_lingertime; ++ struct proto *sk_prot_creator; ++ rwlock_t sk_callback_lock; ++ int sk_err; ++ int sk_err_soft; ++ u32 sk_ack_backlog; ++ u32 sk_max_ack_backlog; ++ kuid_t sk_uid; ++ struct pid *sk_peer_pid; ++ const struct cred *sk_peer_cred; ++ long int sk_rcvtimeo; ++ ktime_t sk_stamp; ++ u16 sk_tsflags; ++ u8 sk_shutdown; ++ u32 sk_tskey; ++ atomic_t sk_zckey; ++ u8 sk_clockid; ++ u8 sk_txtime_deadline_mode: 1; ++ u8 sk_txtime_report_errors: 1; ++ u8 sk_txtime_unused: 6; ++ struct socket *sk_socket; ++ void *sk_user_data; ++ void *sk_security; ++ struct sock_cgroup_data sk_cgrp_data; ++ struct mem_cgroup *sk_memcg; ++ void (*sk_state_change)(struct sock *); ++ void (*sk_data_ready)(struct sock *); ++ void (*sk_write_space)(struct sock *); ++ void (*sk_error_report)(struct sock *); ++ int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); ++ struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *); ++ void (*sk_destruct)(struct sock *); ++ struct sock_reuseport *sk_reuseport_cb; ++ struct callback_head sk_rcu; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++}; ++ ++struct inet_peer_base { ++ struct rb_root rb_root; ++ seqlock_t lock; ++ int total; ++}; ++ ++struct xt_table_info; ++ ++struct xt_table { ++ struct list_head list; ++ unsigned int valid_hooks; ++ struct xt_table_info *private; ++ struct module *me; ++ u_int8_t af; ++ int priority; ++ int (*table_init)(struct net *); ++ const char name[32]; ++}; ++ ++enum tcp_ca_event { ++ CA_EVENT_TX_START = 0, ++ CA_EVENT_CWND_RESTART = 1, ++ CA_EVENT_COMPLETE_CWR = 2, ++ CA_EVENT_LOSS = 3, ++ CA_EVENT_ECN_NO_CE = 4, ++ CA_EVENT_ECN_IS_CE = 5, ++}; ++ ++struct ack_sample; ++ ++struct rate_sample; ++ ++union tcp_cc_info; ++ ++struct tcp_congestion_ops { ++ struct list_head list; ++ u32 key; ++ u32 flags; ++ void (*init)(struct sock *); ++ void (*release)(struct sock *); ++ u32 (*ssthresh)(struct sock *); ++ void (*cong_avoid)(struct sock *, u32, u32); ++ void (*set_state)(struct sock *, u8); ++ void (*cwnd_event)(struct sock *, enum tcp_ca_event); ++ void (*in_ack_event)(struct sock *, u32); ++ u32 (*undo_cwnd)(struct sock *); ++ void (*pkts_acked)(struct sock *, const struct ack_sample *); ++ u32 (*min_tso_segs)(struct sock *); ++ u32 (*sndbuf_expand)(struct sock *); ++ void (*cong_control)(struct sock *, const struct rate_sample *); ++ size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); ++ char name[16]; ++ struct module *owner; ++}; ++ ++struct fib_notifier_ops { ++ int family; ++ struct list_head list; ++ unsigned int (*fib_seq_read)(struct net *); ++ int (*fib_dump)(struct net *, struct notifier_block *); ++ struct module *owner; ++ struct callback_head rcu; ++}; ++ ++typedef u64 pteval_t; ++ ++typedef u64 pmdval_t; ++ ++typedef u64 pgdval_t; ++ ++typedef struct { ++ pteval_t pte; ++} pte_t; ++ ++typedef struct { ++ pmdval_t pmd; ++} pmd_t; ++ ++typedef struct { ++ pgdval_t pgd; ++} pgd_t; ++ ++typedef struct { ++ pteval_t pgprot; ++} pgprot_t; ++ ++typedef struct { ++ pgd_t pgd; ++} pud_t; ++ ++typedef struct page *pgtable_t; ++ ++struct address_space; ++ ++struct dev_pagemap; ++ ++struct page { ++ long unsigned int flags; ++ union { ++ struct { ++ struct list_head lru; ++ struct address_space *mapping; ++ long unsigned int index; ++ long unsigned int private; ++ }; ++ struct { ++ union { ++ struct list_head slab_list; ++ struct { ++ struct page *next; ++ int pages; ++ int pobjects; ++ }; ++ }; ++ struct kmem_cache *slab_cache; ++ void *freelist; ++ union { ++ void *s_mem; ++ long unsigned int counters; ++ struct { ++ unsigned int inuse: 16; ++ unsigned int objects: 15; ++ unsigned int frozen: 1; ++ }; ++ }; ++ }; ++ struct { ++ long unsigned int compound_head; ++ unsigned char compound_dtor; ++ unsigned char compound_order; ++ atomic_t compound_mapcount; ++ }; ++ struct { ++ long unsigned int _compound_pad_1; ++ long unsigned int _compound_pad_2; ++ struct list_head deferred_list; ++ }; ++ struct { ++ long unsigned int _pt_pad_1; ++ pgtable_t pmd_huge_pte; ++ long unsigned int _pt_pad_2; ++ union { ++ struct mm_struct *pt_mm; ++ atomic_t pt_frag_refcount; ++ }; ++ spinlock_t ptl; ++ }; ++ struct { ++ struct dev_pagemap *pgmap; ++ long unsigned int hmm_data; ++ long unsigned int _zd_pad_1; ++ }; ++ struct callback_head callback_head; ++ }; ++ union { ++ atomic_t _mapcount; ++ unsigned int page_type; ++ unsigned int active; ++ int units; ++ }; ++ atomic_t _refcount; ++ struct mem_cgroup *mem_cgroup; ++}; ++ ++struct free_area { ++ struct list_head free_list[6]; ++ long unsigned int nr_free; ++}; ++ ++struct zone_padding { ++ char x[0]; ++}; ++ ++struct zone_reclaim_stat { ++ long unsigned int recent_rotated[2]; ++ long unsigned int recent_scanned[2]; ++}; ++ ++struct pglist_data; ++ ++struct lruvec { ++ struct list_head lists[5]; ++ struct zone_reclaim_stat reclaim_stat; ++ atomic_long_t inactive_age; ++ long unsigned int refaults; ++ struct pglist_data *pgdat; ++}; ++ ++struct per_cpu_pageset; ++ ++struct zone { ++ long unsigned int watermark[3]; ++ long unsigned int nr_reserved_highatomic; ++ long int lowmem_reserve[3]; ++ int node; ++ struct pglist_data *zone_pgdat; ++ struct per_cpu_pageset *pageset; ++ long unsigned int zone_start_pfn; ++ long unsigned int managed_pages; ++ long unsigned int spanned_pages; ++ long unsigned int present_pages; ++ const char *name; ++ long unsigned int nr_isolate_pageblock; ++ seqlock_t span_seqlock; ++ int initialized; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad1_; ++ struct free_area free_area[14]; ++ long unsigned int flags; ++ spinlock_t lock; ++ int: 32; ++ struct zone_padding _pad2_; ++ long unsigned int percpu_drift_mark; ++ long unsigned int compact_cached_free_pfn; ++ long unsigned int compact_cached_migrate_pfn[2]; ++ unsigned int compact_considered; ++ unsigned int compact_defer_shift; ++ int compact_order_failed; ++ bool compact_blockskip_flush; ++ bool contiguous; ++ long: 16; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad3_; ++ atomic_long_t vm_stat[13]; ++ atomic_long_t vm_numa_stat[6]; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct zoneref { ++ struct zone *zone; ++ int zone_idx; ++}; ++ ++struct zonelist { ++ struct zoneref _zonerefs[49]; ++}; ++ ++enum zone_type { ++ ZONE_DMA32 = 0, ++ ZONE_NORMAL = 1, ++ ZONE_MOVABLE = 2, ++ __MAX_NR_ZONES = 3, ++}; ++ ++struct per_cpu_nodestat; ++ ++struct pglist_data { ++ struct zone node_zones[3]; ++ struct zonelist node_zonelists[2]; ++ int nr_zones; ++ spinlock_t node_size_lock; ++ long unsigned int node_start_pfn; ++ long unsigned int node_present_pages; ++ long unsigned int node_spanned_pages; ++ int node_id; ++ wait_queue_head_t kswapd_wait; ++ wait_queue_head_t pfmemalloc_wait; ++ struct task_struct *kswapd; ++ int kswapd_order; ++ enum zone_type kswapd_classzone_idx; ++ int kswapd_failures; ++ int kcompactd_max_order; ++ enum zone_type kcompactd_classzone_idx; ++ wait_queue_head_t kcompactd_wait; ++ struct task_struct *kcompactd; ++ long unsigned int totalreserve_pages; ++ long unsigned int min_unmapped_pages; ++ long unsigned int min_slab_pages; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad1_; ++ spinlock_t lru_lock; ++ spinlock_t split_queue_lock; ++ struct list_head split_queue; ++ long unsigned int split_queue_len; ++ struct lruvec lruvec; ++ long unsigned int flags; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad2_; ++ struct per_cpu_nodestat *per_cpu_nodestats; ++ atomic_long_t vm_stat[28]; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long: 64; ++}; ++ ++typedef unsigned int isolate_mode_t; ++ ++struct per_cpu_pages { ++ int count; ++ int high; ++ int batch; ++ struct list_head lists[3]; ++}; ++ ++struct per_cpu_pageset { ++ struct per_cpu_pages pcp; ++ s8 expire; ++ u16 vm_numa_stat_diff[6]; ++ s8 stat_threshold; ++ s8 vm_stat_diff[13]; ++}; ++ ++struct per_cpu_nodestat { ++ s8 stat_threshold; ++ s8 vm_node_stat_diff[28]; ++}; ++ ++typedef struct pglist_data pg_data_t; ++ ++typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); ++ ++struct notifier_block { ++ notifier_fn_t notifier_call; ++ struct notifier_block *next; ++ int priority; ++}; ++ ++struct blocking_notifier_head { ++ struct rw_semaphore rwsem; ++ struct notifier_block *head; ++}; ++ ++struct cpu_topology { ++ int thread_id; ++ int core_id; ++ int package_id; ++ int llc_id; ++ cpumask_t thread_sibling; ++ cpumask_t core_sibling; ++ cpumask_t llc_sibling; ++}; ++ ++struct mem_section { ++ long unsigned int section_mem_map; ++ long unsigned int *pageblock_flags; ++}; ++ ++struct percpu_counter { ++ raw_spinlock_t lock; ++ s64 count; ++ struct list_head list; ++ s32 *counters; ++}; ++ ++struct neighbour; ++ ++struct dst_ops { ++ short unsigned int family; ++ unsigned int gc_thresh; ++ int (*gc)(struct dst_ops *); ++ struct dst_entry * (*check)(struct dst_entry *, __u32); ++ unsigned int (*default_advmss)(const struct dst_entry *); ++ unsigned int (*mtu)(const struct dst_entry *); ++ u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); ++ void (*destroy)(struct dst_entry *); ++ void (*ifdown)(struct dst_entry *, struct net_device *, int); ++ struct dst_entry * (*negative_advice)(struct dst_entry *); ++ void (*link_failure)(struct sk_buff *); ++ void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); ++ void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); ++ int (*local_out)(struct net *, struct sock *, struct sk_buff *); ++ struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); ++ void (*confirm_neigh)(const struct dst_entry *, const void *); ++ struct kmem_cache *kmem_cachep; ++ struct percpu_counter pcpuc_entries; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct xfrm_state; ++ ++struct lwtunnel_state; ++ ++struct dst_entry { ++ struct net_device *dev; ++ struct dst_ops *ops; ++ long unsigned int _metrics; ++ long unsigned int expires; ++ struct xfrm_state *xfrm; ++ int (*input)(struct sk_buff *); ++ int (*output)(struct net *, struct sock *, struct sk_buff *); ++ short unsigned int flags; ++ short int obsolete; ++ short unsigned int header_len; ++ short unsigned int trailer_len; ++ atomic_t __refcnt; ++ int __use; ++ long unsigned int lastuse; ++ struct lwtunnel_state *lwtstate; ++ struct callback_head callback_head; ++ short int error; ++ short int __pad; ++ __u32 tclassid; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++}; ++ ++struct net_device_stats { ++ long unsigned int rx_packets; ++ long unsigned int tx_packets; ++ long unsigned int rx_bytes; ++ long unsigned int tx_bytes; ++ long unsigned int rx_errors; ++ long unsigned int tx_errors; ++ long unsigned int rx_dropped; ++ long unsigned int tx_dropped; ++ long unsigned int multicast; ++ long unsigned int collisions; ++ long unsigned int rx_length_errors; ++ long unsigned int rx_over_errors; ++ long unsigned int rx_crc_errors; ++ long unsigned int rx_frame_errors; ++ long unsigned int rx_fifo_errors; ++ long unsigned int rx_missed_errors; ++ long unsigned int tx_aborted_errors; ++ long unsigned int tx_carrier_errors; ++ long unsigned int tx_fifo_errors; ++ long unsigned int tx_heartbeat_errors; ++ long unsigned int tx_window_errors; ++ long unsigned int rx_compressed; ++ long unsigned int tx_compressed; ++}; ++ ++struct netdev_hw_addr_list { ++ struct list_head list; ++ int count; ++}; ++ ++struct tipc_bearer; ++ ++enum rx_handler_result { ++ RX_HANDLER_CONSUMED = 0, ++ RX_HANDLER_ANOTHER = 1, ++ RX_HANDLER_EXACT = 2, ++ RX_HANDLER_PASS = 3, ++}; ++ ++typedef enum rx_handler_result rx_handler_result_t; ++ ++typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); ++ ++struct pcpu_dstats; ++ ++struct pcpu_vstats; ++ ++struct garp_port; ++ ++struct mrp_port; ++ ++enum dl_dev_state { ++ DL_DEV_NO_DRIVER = 0, ++ DL_DEV_PROBING = 1, ++ DL_DEV_DRIVER_BOUND = 2, ++ DL_DEV_UNBINDING = 3, ++}; ++ ++struct dev_links_info { ++ struct list_head suppliers; ++ struct list_head consumers; ++ enum dl_dev_state status; ++}; ++ ++struct pm_message { ++ int event; ++}; ++ ++typedef struct pm_message pm_message_t; ++ ++enum rpm_request { ++ RPM_REQ_NONE = 0, ++ RPM_REQ_IDLE = 1, ++ RPM_REQ_SUSPEND = 2, ++ RPM_REQ_AUTOSUSPEND = 3, ++ RPM_REQ_RESUME = 4, ++}; ++ ++enum rpm_status { ++ RPM_ACTIVE = 0, ++ RPM_RESUMING = 1, ++ RPM_SUSPENDED = 2, ++ RPM_SUSPENDING = 3, ++}; ++ ++struct wakeup_source; ++ ++struct wake_irq; ++ ++struct pm_subsys_data; ++ ++struct device; ++ ++struct dev_pm_qos; ++ ++struct dev_pm_info { ++ pm_message_t power_state; ++ unsigned int can_wakeup: 1; ++ unsigned int async_suspend: 1; ++ bool in_dpm_list: 1; ++ bool is_prepared: 1; ++ bool is_suspended: 1; ++ bool is_noirq_suspended: 1; ++ bool is_late_suspended: 1; ++ bool early_init: 1; ++ bool direct_complete: 1; ++ u32 driver_flags; ++ spinlock_t lock; ++ struct list_head entry; ++ struct completion completion; ++ struct wakeup_source *wakeup; ++ bool wakeup_path: 1; ++ bool syscore: 1; ++ bool no_pm_callbacks: 1; ++ unsigned int must_resume: 1; ++ unsigned int may_skip_resume: 1; ++ struct timer_list suspend_timer; ++ long unsigned int timer_expires; ++ struct work_struct work; ++ wait_queue_head_t wait_queue; ++ struct wake_irq *wakeirq; ++ atomic_t usage_count; ++ atomic_t child_count; ++ unsigned int disable_depth: 3; ++ unsigned int idle_notification: 1; ++ unsigned int request_pending: 1; ++ unsigned int deferred_resume: 1; ++ unsigned int runtime_auto: 1; ++ bool ignore_children: 1; ++ unsigned int no_callbacks: 1; ++ unsigned int irq_safe: 1; ++ unsigned int use_autosuspend: 1; ++ unsigned int timer_autosuspends: 1; ++ unsigned int memalloc_noio: 1; ++ unsigned int links_count; ++ enum rpm_request request; ++ enum rpm_status runtime_status; ++ int runtime_error; ++ int autosuspend_delay; ++ long unsigned int last_busy; ++ long unsigned int active_jiffies; ++ long unsigned int suspended_jiffies; ++ long unsigned int accounting_timestamp; ++ struct pm_subsys_data *subsys_data; ++ void (*set_latency_tolerance)(struct device *, s32); ++ struct dev_pm_qos *qos; ++}; ++ ++struct dev_archdata { ++ void *iommu; ++ bool dma_coherent; ++}; ++ ++struct klist_node { ++ void *n_klist; ++ struct list_head n_node; ++ struct kref n_ref; ++}; ++ ++struct device_private; ++ ++struct device_type; ++ ++struct bus_type; ++ ++struct device_driver; ++ ++struct dev_pm_domain; ++ ++struct irq_domain; ++ ++struct dev_pin_info; ++ ++struct dma_map_ops; ++ ++struct device_dma_parameters; ++ ++struct dma_coherent_mem; ++ ++struct cma; ++ ++struct device_node; ++ ++struct fwnode_handle; ++ ++struct class; ++ ++struct attribute_group; ++ ++struct iommu_group; ++ ++struct iommu_fwspec; ++ ++struct iommu_param; ++ ++struct device { ++ struct device *parent; ++ struct device_private *p; ++ struct kobject kobj; ++ const char *init_name; ++ const struct device_type *type; ++ struct mutex mutex; ++ struct bus_type *bus; ++ struct device_driver *driver; ++ void *platform_data; ++ void *driver_data; ++ struct dev_links_info links; ++ struct dev_pm_info power; ++ struct dev_pm_domain *pm_domain; ++ struct irq_domain *msi_domain; ++ struct dev_pin_info *pins; ++ struct list_head msi_list; ++ int numa_node; ++ const struct dma_map_ops *dma_ops; ++ u64 *dma_mask; ++ u64 coherent_dma_mask; ++ u64 bus_dma_mask; ++ long unsigned int dma_pfn_offset; ++ struct device_dma_parameters *dma_parms; ++ struct list_head dma_pools; ++ struct dma_coherent_mem *dma_mem; ++ struct cma *cma_area; ++ struct dev_archdata archdata; ++ struct device_node *of_node; ++ struct fwnode_handle *fwnode; ++ dev_t devt; ++ u32 id; ++ spinlock_t devres_lock; ++ struct list_head devres_head; ++ struct klist_node knode_class; ++ struct class *class; ++ const struct attribute_group **groups; ++ void (*release)(struct device *); ++ struct iommu_group *iommu_group; ++ struct iommu_fwspec *iommu_fwspec; ++ struct iommu_param *iommu_param; ++ bool offline_disabled: 1; ++ bool offline: 1; ++ bool of_node_reused: 1; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++}; ++ ++struct netdev_tc_txq { ++ u16 count; ++ u16 offset; ++}; ++ ++struct sfp_bus; ++ ++struct dev_ifalias; ++ ++struct net_device_ops; ++ ++struct ethtool_ops; ++ ++struct switchdev_ops; ++ ++struct l3mdev_ops; ++ ++struct ndisc_ops; ++ ++struct xfrmdev_ops; ++ ++struct tlsdev_ops; ++ ++struct header_ops; ++ ++struct vlan_info; ++ ++struct in_device; ++ ++struct inet6_dev; ++ ++struct wireless_dev; ++ ++struct wpan_dev; ++ ++struct netdev_rx_queue; ++ ++struct bpf_prog; ++ ++struct mini_Qdisc; ++ ++struct netdev_queue; ++ ++struct nf_hook_entries; ++ ++struct cpu_rmap; ++ ++struct Qdisc; ++ ++struct xps_dev_maps; ++ ++struct netpoll_info; ++ ++struct pcpu_lstats; ++ ++struct pcpu_sw_netstats; ++ ++struct rtnl_link_ops; ++ ++struct dcbnl_rtnl_ops; ++ ++struct netprio_map; ++ ++struct phy_device; ++ ++struct net_device { ++ char name[16]; ++ struct hlist_node name_hlist; ++ struct dev_ifalias *ifalias; ++ long unsigned int mem_end; ++ long unsigned int mem_start; ++ long unsigned int base_addr; ++ int irq; ++ long unsigned int state; ++ struct list_head dev_list; ++ struct list_head napi_list; ++ struct list_head unreg_list; ++ struct list_head close_list; ++ struct list_head ptype_all; ++ struct list_head ptype_specific; ++ struct { ++ struct list_head upper; ++ struct list_head lower; ++ } adj_list; ++ netdev_features_t features; ++ netdev_features_t hw_features; ++ netdev_features_t wanted_features; ++ netdev_features_t vlan_features; ++ netdev_features_t hw_enc_features; ++ netdev_features_t mpls_features; ++ netdev_features_t gso_partial_features; ++ int ifindex; ++ int group; ++ struct net_device_stats stats; ++ atomic_long_t rx_dropped; ++ atomic_long_t tx_dropped; ++ atomic_long_t rx_nohandler; ++ atomic_t carrier_up_count; ++ atomic_t carrier_down_count; ++ const struct net_device_ops *netdev_ops; ++ const struct ethtool_ops *ethtool_ops; ++ const struct switchdev_ops *switchdev_ops; ++ const struct l3mdev_ops *l3mdev_ops; ++ const struct ndisc_ops *ndisc_ops; ++ const struct xfrmdev_ops *xfrmdev_ops; ++ const struct tlsdev_ops *tlsdev_ops; ++ const struct header_ops *header_ops; ++ unsigned int flags; ++ unsigned int priv_flags; ++ short unsigned int gflags; ++ short unsigned int padded; ++ unsigned char operstate; ++ unsigned char link_mode; ++ unsigned char if_port; ++ unsigned char dma; ++ unsigned int mtu; ++ unsigned int min_mtu; ++ unsigned int max_mtu; ++ short unsigned int type; ++ short unsigned int hard_header_len; ++ unsigned char min_header_len; ++ short unsigned int needed_headroom; ++ short unsigned int needed_tailroom; ++ unsigned char perm_addr[32]; ++ unsigned char addr_assign_type; ++ unsigned char addr_len; ++ unsigned char upper_level; ++ unsigned char lower_level; ++ short unsigned int neigh_priv_len; ++ short unsigned int dev_id; ++ short unsigned int dev_port; ++ spinlock_t addr_list_lock; ++ unsigned char name_assign_type; ++ bool uc_promisc; ++ struct netdev_hw_addr_list uc; ++ struct netdev_hw_addr_list mc; ++ struct netdev_hw_addr_list dev_addrs; ++ struct kset *queues_kset; ++ unsigned int promiscuity; ++ unsigned int allmulti; ++ struct vlan_info *vlan_info; ++ struct tipc_bearer *tipc_ptr; ++ struct in_device *ip_ptr; ++ struct inet6_dev *ip6_ptr; ++ struct wireless_dev *ieee80211_ptr; ++ struct wpan_dev *ieee802154_ptr; ++ unsigned char *dev_addr; ++ struct netdev_rx_queue *_rx; ++ unsigned int num_rx_queues; ++ unsigned int real_num_rx_queues; ++ struct bpf_prog *xdp_prog; ++ long unsigned int gro_flush_timeout; ++ rx_handler_func_t *rx_handler; ++ void *rx_handler_data; ++ struct mini_Qdisc *miniq_ingress; ++ struct netdev_queue *ingress_queue; ++ struct nf_hook_entries *nf_hooks_ingress; ++ unsigned char broadcast[32]; ++ struct cpu_rmap *rx_cpu_rmap; ++ struct hlist_node index_hlist; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netdev_queue *_tx; ++ unsigned int num_tx_queues; ++ unsigned int real_num_tx_queues; ++ struct Qdisc *qdisc; ++ struct hlist_head qdisc_hash[16]; ++ unsigned int tx_queue_len; ++ spinlock_t tx_global_lock; ++ int watchdog_timeo; ++ struct xps_dev_maps *xps_cpus_map; ++ struct xps_dev_maps *xps_rxqs_map; ++ struct mini_Qdisc *miniq_egress; ++ struct timer_list watchdog_timer; ++ int *pcpu_refcnt; ++ struct list_head todo_list; ++ struct list_head link_watch_list; ++ enum { ++ NETREG_UNINITIALIZED = 0, ++ NETREG_REGISTERED = 1, ++ NETREG_UNREGISTERING = 2, ++ NETREG_UNREGISTERED = 3, ++ NETREG_RELEASED = 4, ++ NETREG_DUMMY = 5, ++ } reg_state: 8; ++ bool dismantle; ++ enum { ++ RTNL_LINK_INITIALIZED = 0, ++ RTNL_LINK_INITIALIZING = 1, ++ } rtnl_link_state: 16; ++ bool needs_free_netdev; ++ void (*priv_destructor)(struct net_device *); ++ struct netpoll_info *npinfo; ++ possible_net_t nd_net; ++ union { ++ void *ml_priv; ++ struct pcpu_lstats *lstats; ++ struct pcpu_sw_netstats *tstats; ++ struct pcpu_dstats *dstats; ++ struct pcpu_vstats *vstats; ++ }; ++ struct garp_port *garp_port; ++ struct mrp_port *mrp_port; ++ struct device dev; ++ const struct attribute_group *sysfs_groups[4]; ++ const struct attribute_group *sysfs_rx_queue_group; ++ const struct rtnl_link_ops *rtnl_link_ops; ++ unsigned int gso_max_size; ++ u16 gso_max_segs; ++ const struct dcbnl_rtnl_ops *dcbnl_ops; ++ s16 num_tc; ++ struct netdev_tc_txq tc_to_txq[16]; ++ u8 prio_tc_map[16]; ++ unsigned int fcoe_ddp_xid; ++ struct netprio_map *priomap; ++ struct phy_device *phydev; ++ struct sfp_bus *sfp_bus; ++ struct lock_class_key *qdisc_tx_busylock; ++ struct lock_class_key *qdisc_running_key; ++ bool proto_down; ++ unsigned int wol_enabled: 1; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++ long unsigned int kabi_reserved17; ++ long unsigned int kabi_reserved18; ++ long unsigned int kabi_reserved19; ++ long unsigned int kabi_reserved20; ++ long unsigned int kabi_reserved21; ++ long unsigned int kabi_reserved22; ++ long unsigned int kabi_reserved23; ++ long unsigned int kabi_reserved24; ++ long unsigned int kabi_reserved25; ++ long unsigned int kabi_reserved26; ++ long unsigned int kabi_reserved27; ++ long unsigned int kabi_reserved28; ++ long unsigned int kabi_reserved29; ++ long unsigned int kabi_reserved30; ++ long: 64; ++}; ++ ++struct radix_tree_node; ++ ++struct radix_tree_root { ++ spinlock_t xa_lock; ++ gfp_t gfp_mask; ++ struct radix_tree_node *rnode; ++}; ++ ++struct idr { ++ struct radix_tree_root idr_rt; ++ unsigned int idr_base; ++ unsigned int idr_next; ++}; ++ ++struct netns_sysctl_ipv6 { ++ struct ctl_table_header *hdr; ++ struct ctl_table_header *route_hdr; ++ struct ctl_table_header *icmp_hdr; ++ struct ctl_table_header *frags_hdr; ++ struct ctl_table_header *xfrm6_hdr; ++ int bindv6only; ++ int flush_delay; ++ int ip6_rt_max_size; ++ int ip6_rt_gc_min_interval; ++ int ip6_rt_gc_timeout; ++ int ip6_rt_gc_interval; ++ int ip6_rt_gc_elasticity; ++ int ip6_rt_mtu_expires; ++ int ip6_rt_min_advmss; ++ int multipath_hash_policy; ++ int flowlabel_consistency; ++ int auto_flowlabels; ++ int icmpv6_time; ++ int icmpv6_echo_ignore_all; ++ int anycast_src_echo_reply; ++ int ip_nonlocal_bind; ++ int fwmark_reflect; ++ int idgen_retries; ++ int idgen_delay; ++ int flowlabel_state_ranges; ++ int flowlabel_reflect; ++ int max_dst_opts_cnt; ++ int max_hbh_opts_cnt; ++ int max_dst_opts_len; ++ int max_hbh_opts_len; ++ int seg6_flowlabel; ++}; ++ ++struct ipv6_devconf; ++ ++struct fib6_info; ++ ++struct rt6_info; ++ ++struct rt6_statistics; ++ ++struct fib6_table; ++ ++struct seg6_pernet_data; ++ ++struct netns_ipv6 { ++ struct netns_sysctl_ipv6 sysctl; ++ struct ipv6_devconf *devconf_all; ++ struct ipv6_devconf *devconf_dflt; ++ struct inet_peer_base *peers; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netns_frags frags; ++ struct xt_table *ip6table_filter; ++ struct xt_table *ip6table_mangle; ++ struct xt_table *ip6table_raw; ++ struct xt_table *ip6table_security; ++ struct xt_table *ip6table_nat; ++ struct fib6_info *fib6_null_entry; ++ struct rt6_info *ip6_null_entry; ++ struct rt6_statistics *rt6_stats; ++ struct timer_list ip6_fib_timer; ++ struct hlist_head *fib_table_hash; ++ struct fib6_table *fib6_main_tbl; ++ struct list_head fib6_walkers; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct dst_ops ip6_dst_ops; ++ rwlock_t fib6_walker_lock; ++ spinlock_t fib6_gc_lock; ++ unsigned int ip6_rt_gc_expire; ++ long unsigned int ip6_rt_last_gc; ++ unsigned int fib6_rules_require_fldissect; ++ bool fib6_has_custom_rules; ++ struct rt6_info *ip6_prohibit_entry; ++ struct rt6_info *ip6_blk_hole_entry; ++ struct fib6_table *fib6_local_tbl; ++ struct fib_rules_ops *fib6_rules_ops; ++ struct sock **icmp_sk; ++ struct sock *ndisc_sk; ++ struct sock *tcp_sk; ++ struct sock *igmp_sk; ++ struct sock *mc_autojoin_sk; ++ struct list_head mr6_tables; ++ struct fib_rules_ops *mr6_rules_ops; ++ atomic_t dev_addr_genid; ++ atomic_t fib6_sernum; ++ struct seg6_pernet_data *seg6_data; ++ struct fib_notifier_ops *notifier_ops; ++ struct fib_notifier_ops *ip6mr_notifier_ops; ++ unsigned int ipmr_seq; ++ struct { ++ struct hlist_head head; ++ spinlock_t lock; ++ u32 seq; ++ } ip6addrlbl_table; ++ long: 64; ++}; ++ ++struct sctp_mib; ++ ++struct netns_sctp { ++ struct sctp_mib *sctp_statistics; ++ struct proc_dir_entry *proc_net_sctp; ++ struct ctl_table_header *sysctl_header; ++ struct sock *ctl_sock; ++ struct list_head local_addr_list; ++ struct list_head addr_waitq; ++ struct timer_list addr_wq_timer; ++ struct list_head auto_asconf_splist; ++ spinlock_t addr_wq_lock; ++ spinlock_t local_addr_lock; ++ unsigned int rto_initial; ++ unsigned int rto_min; ++ unsigned int rto_max; ++ int rto_alpha; ++ int rto_beta; ++ int max_burst; ++ int cookie_preserve_enable; ++ char *sctp_hmac_alg; ++ unsigned int valid_cookie_life; ++ unsigned int sack_timeout; ++ unsigned int hb_interval; ++ int max_retrans_association; ++ int max_retrans_path; ++ int max_retrans_init; ++ int pf_retrans; ++ int pf_enable; ++ int sndbuf_policy; ++ int rcvbuf_policy; ++ int default_auto_asconf; ++ int addip_enable; ++ int addip_noauth; ++ int prsctp_enable; ++ int reconf_enable; ++ int auth_enable; ++ int intl_enable; ++ int scope_policy; ++ int rwnd_upd_shift; ++ long unsigned int max_autoclose; ++}; ++ ++struct nf_queue_handler; ++ ++struct nf_logger; ++ ++struct netns_nf { ++ struct proc_dir_entry *proc_netfilter; ++ const struct nf_queue_handler *queue_handler; ++ const struct nf_logger *nf_loggers[13]; ++ struct ctl_table_header *nf_log_dir_header; ++ struct nf_hook_entries *hooks_ipv4[5]; ++ struct nf_hook_entries *hooks_ipv6[5]; ++ struct nf_hook_entries *hooks_arp[3]; ++ struct nf_hook_entries *hooks_bridge[5]; ++ bool defrag_ipv4; ++ bool defrag_ipv6; ++}; ++ ++struct ebt_table; ++ ++struct netns_xt { ++ struct list_head tables[13]; ++ bool notrack_deprecated_warning; ++ bool clusterip_deprecated_warning; ++ struct ebt_table *broute_table; ++ struct ebt_table *frame_filter; ++ struct ebt_table *frame_nat; ++}; ++ ++struct nf_proto_net { ++ struct ctl_table_header *ctl_table_header; ++ struct ctl_table *ctl_table; ++ unsigned int users; ++}; ++ ++struct nf_generic_net { ++ struct nf_proto_net pn; ++ unsigned int timeout; ++}; ++ ++struct nf_tcp_net { ++ struct nf_proto_net pn; ++ unsigned int timeouts[14]; ++ unsigned int tcp_loose; ++ unsigned int tcp_be_liberal; ++ unsigned int tcp_max_retrans; ++}; ++ ++struct nf_udp_net { ++ struct nf_proto_net pn; ++ unsigned int timeouts[2]; ++}; ++ ++struct nf_icmp_net { ++ struct nf_proto_net pn; ++ unsigned int timeout; ++}; ++ ++struct nf_dccp_net { ++ struct nf_proto_net pn; ++ int dccp_loose; ++ unsigned int dccp_timeout[10]; ++}; ++ ++struct nf_sctp_net { ++ struct nf_proto_net pn; ++ unsigned int timeouts[10]; ++}; ++ ++struct nf_ip_net { ++ struct nf_generic_net generic; ++ struct nf_tcp_net tcp; ++ struct nf_udp_net udp; ++ struct nf_icmp_net icmp; ++ struct nf_icmp_net icmpv6; ++ struct nf_dccp_net dccp; ++ struct nf_sctp_net sctp; ++}; ++ ++struct ct_pcpu; ++ ++struct ip_conntrack_stat; ++ ++struct nf_ct_event_notifier; ++ ++struct nf_exp_event_notifier; ++ ++struct netns_ct { ++ atomic_t count; ++ unsigned int expect_count; ++ struct delayed_work ecache_dwork; ++ bool ecache_dwork_pending; ++ struct ctl_table_header *sysctl_header; ++ struct ctl_table_header *acct_sysctl_header; ++ struct ctl_table_header *tstamp_sysctl_header; ++ struct ctl_table_header *event_sysctl_header; ++ struct ctl_table_header *helper_sysctl_header; ++ unsigned int sysctl_log_invalid; ++ int sysctl_events; ++ int sysctl_acct; ++ int sysctl_auto_assign_helper; ++ bool auto_assign_helper_warned; ++ int sysctl_tstamp; ++ int sysctl_checksum; ++ struct ct_pcpu *pcpu_lists; ++ struct ip_conntrack_stat *stat; ++ struct nf_ct_event_notifier *nf_conntrack_event_cb; ++ struct nf_exp_event_notifier *nf_expect_event_cb; ++ struct nf_ip_net nf_ct_proto; ++ unsigned int labels_used; ++}; ++ ++struct netns_nftables { ++ struct list_head tables; ++ struct list_head commit_list; ++ struct mutex commit_mutex; ++ unsigned int base_seq; ++ u8 gencursor; ++ u8 validate_state; ++}; ++ ++struct netns_nf_frag { ++ struct netns_frags frags; ++}; ++ ++struct xfrm_policy_hash { ++ struct hlist_head *table; ++ unsigned int hmask; ++ u8 dbits4; ++ u8 sbits4; ++ u8 dbits6; ++ u8 sbits6; ++}; ++ ++struct xfrm_policy_hthresh { ++ struct work_struct work; ++ seqlock_t lock; ++ u8 lbits4; ++ u8 rbits4; ++ u8 lbits6; ++ u8 rbits6; ++}; ++ ++struct netns_xfrm { ++ struct list_head state_all; ++ struct hlist_head *state_bydst; ++ struct hlist_head *state_bysrc; ++ struct hlist_head *state_byspi; ++ unsigned int state_hmask; ++ unsigned int state_num; ++ struct work_struct state_hash_work; ++ struct list_head policy_all; ++ struct hlist_head *policy_byidx; ++ unsigned int policy_idx_hmask; ++ struct hlist_head policy_inexact[3]; ++ struct xfrm_policy_hash policy_bydst[3]; ++ unsigned int policy_count[6]; ++ struct work_struct policy_hash_work; ++ struct xfrm_policy_hthresh policy_hthresh; ++ struct sock *nlsk; ++ struct sock *nlsk_stash; ++ u32 sysctl_aevent_etime; ++ u32 sysctl_aevent_rseqth; ++ int sysctl_larval_drop; ++ u32 sysctl_acq_expires; ++ struct ctl_table_header *sysctl_hdr; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct dst_ops xfrm4_dst_ops; ++ struct dst_ops xfrm6_dst_ops; ++ spinlock_t xfrm_state_lock; ++ spinlock_t xfrm_policy_lock; ++ struct mutex xfrm_cfg_mutex; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct mpls_route; ++ ++struct netns_mpls { ++ int ip_ttl_propagate; ++ int default_ttl; ++ size_t platform_labels; ++ struct mpls_route **platform_label; ++ struct ctl_table_header *ctl; ++}; ++ ++struct can_dev_rcv_lists; ++ ++struct s_stats; ++ ++struct s_pstats; ++ ++struct netns_can { ++ struct proc_dir_entry *proc_dir; ++ struct proc_dir_entry *pde_version; ++ struct proc_dir_entry *pde_stats; ++ struct proc_dir_entry *pde_reset_stats; ++ struct proc_dir_entry *pde_rcvlist_all; ++ struct proc_dir_entry *pde_rcvlist_fil; ++ struct proc_dir_entry *pde_rcvlist_inv; ++ struct proc_dir_entry *pde_rcvlist_sff; ++ struct proc_dir_entry *pde_rcvlist_eff; ++ struct proc_dir_entry *pde_rcvlist_err; ++ struct proc_dir_entry *bcmproc_dir; ++ struct can_dev_rcv_lists *can_rx_alldev_list; ++ spinlock_t can_rcvlists_lock; ++ struct timer_list can_stattimer; ++ struct s_stats *can_stats; ++ struct s_pstats *can_pstats; ++ struct hlist_head cgw_list; ++}; ++ ++struct uevent_sock; ++ ++struct net_generic; ++ ++struct netns_ipvs; ++ ++struct net { ++ refcount_t passive; ++ refcount_t count; ++ spinlock_t rules_mod_lock; ++ u32 hash_mix; ++ atomic64_t cookie_gen; ++ struct list_head list; ++ struct list_head exit_list; ++ struct llist_node cleanup_list; ++ struct user_namespace *user_ns; ++ struct ucounts *ucounts; ++ spinlock_t nsid_lock; ++ struct idr netns_ids; ++ struct ns_common ns; ++ struct proc_dir_entry *proc_net; ++ struct proc_dir_entry *proc_net_stat; ++ struct ctl_table_set sysctls; ++ struct sock *rtnl; ++ struct sock *genl_sock; ++ struct uevent_sock *uevent_sock; ++ struct list_head dev_base_head; ++ struct hlist_head *dev_name_head; ++ struct hlist_head *dev_index_head; ++ unsigned int dev_base_seq; ++ int ifindex; ++ unsigned int dev_unreg_count; ++ struct list_head rules_ops; ++ struct list_head fib_notifier_ops; ++ struct net_device *loopback_dev; ++ struct netns_core core; ++ struct netns_mib mib; ++ struct netns_packet packet; ++ struct netns_unix unx; ++ long: 64; ++ long: 64; ++ struct netns_ipv4 ipv4; ++ struct netns_ipv6 ipv6; ++ struct netns_sctp sctp; ++ struct netns_nf nf; ++ struct netns_xt xt; ++ struct netns_ct ct; ++ struct netns_nftables nft; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netns_nf_frag nf_frag; ++ struct ctl_table_header *nf_frag_frags_hdr; ++ struct sock *nfnl; ++ struct sock *nfnl_stash; ++ struct list_head nfnl_acct_list; ++ struct list_head nfct_timeout_list; ++ struct sk_buff_head wext_nlevents; ++ struct net_generic *gen; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netns_xfrm xfrm; ++ struct netns_ipvs *ipvs; ++ struct netns_mpls mpls; ++ struct netns_can can; ++ struct sock *diag_nlsk; ++ atomic_t fnhe_genid; ++}; ++ ++struct hh_cache { ++ unsigned int hh_len; ++ seqlock_t hh_lock; ++ long unsigned int hh_data[12]; ++}; ++ ++struct neigh_table; ++ ++struct neigh_parms; ++ ++struct neigh_ops; ++ ++struct neighbour { ++ struct neighbour *next; ++ struct neigh_table *tbl; ++ struct neigh_parms *parms; ++ long unsigned int confirmed; ++ long unsigned int updated; ++ rwlock_t lock; ++ refcount_t refcnt; ++ struct sk_buff_head arp_queue; ++ unsigned int arp_queue_len_bytes; ++ struct timer_list timer; ++ long unsigned int used; ++ atomic_t probes; ++ __u8 flags; ++ __u8 nud_state; ++ __u8 type; ++ __u8 dead; ++ seqlock_t ha_lock; ++ unsigned char ha[32]; ++ struct hh_cache hh; ++ int (*output)(struct neighbour *, struct sk_buff *); ++ const struct neigh_ops *ops; ++ struct callback_head rcu; ++ struct net_device *dev; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ u8 primary_key[0]; ++}; ++ ++struct ipv6_stable_secret { ++ bool initialized; ++ struct in6_addr secret; ++}; ++ ++struct ipv6_devconf { ++ __s32 forwarding; ++ __s32 hop_limit; ++ __s32 mtu6; ++ __s32 accept_ra; ++ __s32 accept_redirects; ++ __s32 autoconf; ++ __s32 dad_transmits; ++ __s32 rtr_solicits; ++ __s32 rtr_solicit_interval; ++ __s32 rtr_solicit_max_interval; ++ __s32 rtr_solicit_delay; ++ __s32 force_mld_version; ++ __s32 mldv1_unsolicited_report_interval; ++ __s32 mldv2_unsolicited_report_interval; ++ __s32 use_tempaddr; ++ __s32 temp_valid_lft; ++ __s32 temp_prefered_lft; ++ __s32 regen_max_retry; ++ __s32 max_desync_factor; ++ __s32 max_addresses; ++ __s32 accept_ra_defrtr; ++ __s32 accept_ra_min_hop_limit; ++ __s32 accept_ra_pinfo; ++ __s32 ignore_routes_with_linkdown; ++ __s32 accept_ra_rtr_pref; ++ __s32 rtr_probe_interval; ++ __s32 accept_ra_rt_info_min_plen; ++ __s32 accept_ra_rt_info_max_plen; ++ __s32 proxy_ndp; ++ __s32 accept_source_route; ++ __s32 accept_ra_from_local; ++ __s32 optimistic_dad; ++ __s32 use_optimistic; ++ __s32 mc_forwarding; ++ __s32 disable_ipv6; ++ __s32 drop_unicast_in_l2_multicast; ++ __s32 accept_dad; ++ __s32 force_tllao; ++ __s32 ndisc_notify; ++ __s32 suppress_frag_ndisc; ++ __s32 accept_ra_mtu; ++ __s32 drop_unsolicited_na; ++ struct ipv6_stable_secret stable_secret; ++ __s32 use_oif_addrs_only; ++ __s32 keep_addr_on_down; ++ __s32 seg6_enabled; ++ __u32 enhanced_dad; ++ __u32 addr_gen_mode; ++ __s32 disable_policy; ++ __s32 ndisc_tclass; ++ struct ctl_table_header *sysctl_header; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++}; ++ ++struct rt6key { ++ struct in6_addr addr; ++ int plen; ++}; ++ ++struct fib6_nh { ++ struct in6_addr nh_gw; ++ struct net_device *nh_dev; ++ struct lwtunnel_state *nh_lwtstate; ++ unsigned int nh_flags; ++ atomic_t nh_upper_bound; ++ int nh_weight; ++}; ++ ++struct fib6_node; ++ ++struct dst_metrics; ++ ++struct rt6_exception_bucket; ++ ++struct fib6_info { ++ struct fib6_table *fib6_table; ++ struct fib6_info *fib6_next; ++ struct fib6_node *fib6_node; ++ struct list_head fib6_siblings; ++ unsigned int fib6_nsiblings; ++ atomic_t fib6_ref; ++ long unsigned int expires; ++ struct dst_metrics *fib6_metrics; ++ struct rt6key fib6_dst; ++ u32 fib6_flags; ++ struct rt6key fib6_src; ++ struct rt6key fib6_prefsrc; ++ struct rt6_info **rt6i_pcpu; ++ struct rt6_exception_bucket *rt6i_exception_bucket; ++ long unsigned int last_probe; ++ u32 fib6_metric; ++ u8 fib6_protocol; ++ u8 fib6_type; ++ u8 exception_bucket_flushed: 1; ++ u8 should_flush: 1; ++ u8 dst_nocount: 1; ++ u8 dst_nopolicy: 1; ++ u8 dst_host: 1; ++ u8 fib6_destroying: 1; ++ u8 unused: 2; ++ struct fib6_nh fib6_nh; ++ struct callback_head rcu; ++}; ++ ++struct uncached_list; ++ ++struct rt6_info { ++ struct dst_entry dst; ++ struct fib6_info *from; ++ struct rt6key rt6i_dst; ++ struct rt6key rt6i_src; ++ struct in6_addr rt6i_gateway; ++ struct inet6_dev *rt6i_idev; ++ u32 rt6i_flags; ++ struct rt6key rt6i_prefsrc; ++ struct list_head rt6i_uncached; ++ struct uncached_list *rt6i_uncached_list; ++ short unsigned int rt6i_nfheader_len; ++}; ++ ++struct rt6_statistics { ++ __u32 fib_nodes; ++ __u32 fib_route_nodes; ++ __u32 fib_rt_entries; ++ __u32 fib_rt_cache; ++ __u32 fib_discarded_routes; ++ atomic_t fib_rt_alloc; ++ atomic_t fib_rt_uncache; ++}; ++ ++struct fib6_node { ++ struct fib6_node *parent; ++ struct fib6_node *left; ++ struct fib6_node *right; ++ struct fib6_info *leaf; ++ __u16 fn_bit; ++ __u16 fn_flags; ++ int fn_sernum; ++ struct fib6_info *rr_ptr; ++ struct callback_head rcu; ++}; ++ ++struct fib6_table { ++ struct hlist_node tb6_hlist; ++ u32 tb6_id; ++ spinlock_t tb6_lock; ++ struct fib6_node tb6_root; ++ struct inet_peer_base tb6_peers; ++ unsigned int flags; ++ unsigned int fib_seq; ++}; ++ ++enum nf_inet_hooks { ++ NF_INET_PRE_ROUTING = 0, ++ NF_INET_LOCAL_IN = 1, ++ NF_INET_FORWARD = 2, ++ NF_INET_LOCAL_OUT = 3, ++ NF_INET_POST_ROUTING = 4, ++ NF_INET_NUMHOOKS = 5, ++}; ++ ++union nf_inet_addr { ++ __u32 all[4]; ++ __be32 ip; ++ __be32 ip6[4]; ++ struct in_addr in; ++ struct in6_addr in6; ++}; ++ ++struct nf_queue_entry; ++ ++struct nf_queue_handler { ++ int (*outfn)(struct nf_queue_entry *, unsigned int); ++ void (*nf_hook_drop)(struct net *); ++}; ++ ++enum nf_log_type { ++ NF_LOG_TYPE_LOG = 0, ++ NF_LOG_TYPE_ULOG = 1, ++ NF_LOG_TYPE_MAX = 2, ++}; ++ ++struct nf_loginfo; ++ ++typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); ++ ++struct nf_logger { ++ char *name; ++ enum nf_log_type type; ++ nf_logfn *logfn; ++ struct module *me; ++}; ++ ++struct nf_hook_state; ++ ++typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); ++ ++struct nf_hook_entry { ++ nf_hookfn *hook; ++ void *priv; ++}; ++ ++struct nf_hook_entries { ++ u16 num_hook_entries; ++ struct nf_hook_entry hooks[0]; ++}; ++ ++struct hlist_nulls_head { ++ struct hlist_nulls_node *first; ++}; ++ ++struct ip_ct_tcp_state { ++ u_int32_t td_end; ++ u_int32_t td_maxend; ++ u_int32_t td_maxwin; ++ u_int32_t td_maxack; ++ u_int8_t td_scale; ++ u_int8_t flags; ++}; ++ ++struct ip_ct_tcp { ++ struct ip_ct_tcp_state seen[2]; ++ u_int8_t state; ++ u_int8_t last_dir; ++ u_int8_t retrans; ++ u_int8_t last_index; ++ u_int32_t last_seq; ++ u_int32_t last_ack; ++ u_int32_t last_end; ++ u_int16_t last_win; ++ u_int8_t last_wscale; ++ u_int8_t last_flags; ++}; ++ ++enum ip_conntrack_info { ++ IP_CT_ESTABLISHED = 0, ++ IP_CT_RELATED = 1, ++ IP_CT_NEW = 2, ++ IP_CT_IS_REPLY = 3, ++ IP_CT_ESTABLISHED_REPLY = 3, ++ IP_CT_RELATED_REPLY = 4, ++ IP_CT_NUMBER = 5, ++ IP_CT_UNTRACKED = 7, ++}; ++ ++struct ip_conntrack_stat { ++ unsigned int found; ++ unsigned int invalid; ++ unsigned int ignore; ++ unsigned int insert; ++ unsigned int insert_failed; ++ unsigned int drop; ++ unsigned int early_drop; ++ unsigned int error; ++ unsigned int expect_new; ++ unsigned int expect_create; ++ unsigned int expect_delete; ++ unsigned int search_restart; ++}; ++ ++enum ip_conntrack_dir { ++ IP_CT_DIR_ORIGINAL = 0, ++ IP_CT_DIR_REPLY = 1, ++ IP_CT_DIR_MAX = 2, ++}; ++ ++union nf_conntrack_man_proto { ++ __be16 all; ++ struct { ++ __be16 port; ++ } tcp; ++ struct { ++ __be16 port; ++ } udp; ++ struct { ++ __be16 id; ++ } icmp; ++ struct { ++ __be16 port; ++ } dccp; ++ struct { ++ __be16 port; ++ } sctp; ++ struct { ++ __be16 key; ++ } gre; ++}; ++ ++struct nf_ct_dccp { ++ u_int8_t role[2]; ++ u_int8_t state; ++ u_int8_t last_pkt; ++ u_int8_t last_dir; ++ u_int64_t handshake_seq; ++}; ++ ++enum sctp_conntrack { ++ SCTP_CONNTRACK_NONE = 0, ++ SCTP_CONNTRACK_CLOSED = 1, ++ SCTP_CONNTRACK_COOKIE_WAIT = 2, ++ SCTP_CONNTRACK_COOKIE_ECHOED = 3, ++ SCTP_CONNTRACK_ESTABLISHED = 4, ++ SCTP_CONNTRACK_SHUTDOWN_SENT = 5, ++ SCTP_CONNTRACK_SHUTDOWN_RECD = 6, ++ SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, ++ SCTP_CONNTRACK_HEARTBEAT_SENT = 8, ++ SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, ++ SCTP_CONNTRACK_MAX = 10, ++}; ++ ++struct ip_ct_sctp { ++ enum sctp_conntrack state; ++ __be32 vtag[2]; ++}; ++ ++struct ct_pcpu { ++ spinlock_t lock; ++ struct hlist_nulls_head unconfirmed; ++ struct hlist_nulls_head dying; ++}; ++ ++struct proc_ns_operations { ++ const char *name; ++ const char *real_ns_name; ++ int type; ++ struct ns_common * (*get)(struct task_struct *); ++ void (*put)(struct ns_common *); ++ int (*install)(struct nsproxy *, struct ns_common *); ++ struct user_namespace * (*owner)(struct ns_common *); ++ struct ns_common * (*get_parent)(struct ns_common *); ++}; ++ ++struct radix_tree_node { ++ unsigned char shift; ++ unsigned char offset; ++ unsigned char count; ++ unsigned char exceptional; ++ struct radix_tree_node *parent; ++ struct radix_tree_root *root; ++ union { ++ struct list_head private_list; ++ struct callback_head callback_head; ++ }; ++ void *slots[64]; ++ long unsigned int tags[3]; ++}; ++ ++struct ida_bitmap { ++ long unsigned int bitmap[16]; ++}; ++ ++struct rnd_state { ++ __u32 s1; ++ __u32 s2; ++ __u32 s3; ++ __u32 s4; ++}; ++ ++struct hlist_bl_node; ++ ++struct hlist_bl_head { ++ struct hlist_bl_node *first; ++}; ++ ++struct hlist_bl_node { ++ struct hlist_bl_node *next; ++ struct hlist_bl_node **pprev; ++}; ++ ++struct lockref { ++ union { ++ __u64 lock_count; ++ struct { ++ spinlock_t lock; ++ int count; ++ }; ++ }; ++}; ++ ++struct qstr { ++ union { ++ struct { ++ u32 hash; ++ u32 len; ++ }; ++ u64 hash_len; ++ }; ++ const unsigned char *name; ++}; ++ ++struct dentry_stat_t { ++ long int nr_dentry; ++ long int nr_unused; ++ long int age_limit; ++ long int want_pages; ++ long int dummy[2]; ++}; ++ ++struct dentry_operations; ++ ++struct super_block; ++ ++struct dentry { ++ unsigned int d_flags; ++ seqcount_t d_seq; ++ struct hlist_bl_node d_hash; ++ struct dentry *d_parent; ++ struct qstr d_name; ++ struct inode *d_inode; ++ unsigned char d_iname[32]; ++ struct lockref d_lockref; ++ const struct dentry_operations *d_op; ++ struct super_block *d_sb; ++ long unsigned int d_time; ++ void *d_fsdata; ++ union { ++ struct list_head d_lru; ++ wait_queue_head_t *d_wait; ++ }; ++ struct list_head d_child; ++ struct list_head d_subdirs; ++ union { ++ struct hlist_node d_alias; ++ struct hlist_bl_node d_in_lookup_hash; ++ struct callback_head d_rcu; ++ } d_u; ++ atomic_t d_neg_dnum; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++typedef u32 errseq_t; ++ ++struct address_space_operations; ++ ++struct address_space { ++ struct inode *host; ++ struct radix_tree_root i_pages; ++ atomic_t i_mmap_writable; ++ struct rb_root_cached i_mmap; ++ struct rw_semaphore i_mmap_rwsem; ++ long unsigned int nrpages; ++ long unsigned int nrexceptional; ++ long unsigned int writeback_index; ++ const struct address_space_operations *a_ops; ++ long unsigned int flags; ++ spinlock_t private_lock; ++ gfp_t gfp_mask; ++ struct list_head private_list; ++ void *private_data; ++ errseq_t wb_err; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct posix_acl; ++ ++struct inode_operations; ++ ++struct bdi_writeback; ++ ++struct file_lock_context; ++ ++struct block_device; ++ ++struct cdev; ++ ++struct fsnotify_mark_connector; ++ ++struct inode { ++ umode_t i_mode; ++ short unsigned int i_opflags; ++ kuid_t i_uid; ++ kgid_t i_gid; ++ unsigned int i_flags; ++ struct posix_acl *i_acl; ++ struct posix_acl *i_default_acl; ++ const struct inode_operations *i_op; ++ struct super_block *i_sb; ++ struct address_space *i_mapping; ++ void *i_security; ++ long unsigned int i_ino; ++ union { ++ const unsigned int i_nlink; ++ unsigned int __i_nlink; ++ }; ++ dev_t i_rdev; ++ loff_t i_size; ++ struct timespec64 i_atime; ++ struct timespec64 i_mtime; ++ struct timespec64 i_ctime; ++ spinlock_t i_lock; ++ short unsigned int i_bytes; ++ u8 i_blkbits; ++ u8 i_write_hint; ++ blkcnt_t i_blocks; ++ long unsigned int i_state; ++ struct rw_semaphore i_rwsem; ++ long unsigned int dirtied_when; ++ long unsigned int dirtied_time_when; ++ struct hlist_node i_hash; ++ struct list_head i_io_list; ++ struct bdi_writeback *i_wb; ++ int i_wb_frn_winner; ++ u16 i_wb_frn_avg_time; ++ u16 i_wb_frn_history; ++ struct list_head i_lru; ++ struct list_head i_sb_list; ++ struct list_head i_wb_list; ++ union { ++ struct hlist_head i_dentry; ++ struct callback_head i_rcu; ++ }; ++ atomic64_t i_version; ++ atomic_t i_count; ++ atomic_t i_dio_count; ++ atomic_t i_writecount; ++ const struct file_operations *i_fop; ++ struct file_lock_context *i_flctx; ++ struct address_space i_data; ++ struct list_head i_devices; ++ union { ++ struct pipe_inode_info *i_pipe; ++ struct block_device *i_bdev; ++ struct cdev *i_cdev; ++ char *i_link; ++ unsigned int i_dir_seq; ++ }; ++ __u32 i_generation; ++ __u32 i_fsnotify_mask; ++ struct fsnotify_mark_connector *i_fsnotify_marks; ++ void *i_private; ++ atomic64_t i_sequence; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct vfsmount; ++ ++struct path; ++ ++struct dentry_operations { ++ int (*d_revalidate)(struct dentry *, unsigned int); ++ int (*d_weak_revalidate)(struct dentry *, unsigned int); ++ int (*d_hash)(const struct dentry *, struct qstr *); ++ int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); ++ int (*d_delete)(const struct dentry *); ++ int (*d_init)(struct dentry *); ++ void (*d_release)(struct dentry *); ++ void (*d_prune)(struct dentry *); ++ void (*d_iput)(struct dentry *, struct inode *); ++ char * (*d_dname)(struct dentry *, char *, int); ++ struct vfsmount * (*d_automount)(struct path *); ++ int (*d_manage)(const struct path *, bool); ++ struct dentry * (*d_real)(struct dentry *, const struct inode *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct mtd_info; ++ ++typedef long long int qsize_t; ++ ++struct quota_format_type; ++ ++struct mem_dqinfo { ++ struct quota_format_type *dqi_format; ++ int dqi_fmt_id; ++ struct list_head dqi_dirty_list; ++ long unsigned int dqi_flags; ++ unsigned int dqi_bgrace; ++ unsigned int dqi_igrace; ++ qsize_t dqi_max_spc_limit; ++ qsize_t dqi_max_ino_limit; ++ void *dqi_priv; ++}; ++ ++struct quota_format_ops; ++ ++struct quota_info { ++ unsigned int flags; ++ struct rw_semaphore dqio_sem; ++ struct inode *files[3]; ++ struct mem_dqinfo info[3]; ++ const struct quota_format_ops *ops[3]; ++}; ++ ++enum rcu_sync_type { ++ RCU_SYNC = 0, ++ RCU_SCHED_SYNC = 1, ++ RCU_BH_SYNC = 2, ++}; ++ ++struct rcu_sync { ++ int gp_state; ++ int gp_count; ++ wait_queue_head_t gp_wait; ++ int cb_state; ++ struct callback_head cb_head; ++ enum rcu_sync_type gp_type; ++}; ++ ++struct rcuwait { ++ struct task_struct *task; ++}; ++ ++struct percpu_rw_semaphore { ++ struct rcu_sync rss; ++ unsigned int *read_count; ++ struct rw_semaphore rw_sem; ++ struct rcuwait writer; ++ int readers_block; ++}; ++ ++struct sb_writers { ++ int frozen; ++ wait_queue_head_t wait_unfrozen; ++ struct percpu_rw_semaphore rw_sem[3]; ++}; ++ ++typedef struct { ++ __u8 b[16]; ++} uuid_t; ++ ++struct shrink_control; ++ ++struct shrinker { ++ long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); ++ long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); ++ long int batch; ++ int seeks; ++ unsigned int flags; ++ struct list_head list; ++ int id; ++ atomic_long_t *nr_deferred; ++}; ++ ++struct list_lru_node; ++ ++struct list_lru { ++ struct list_lru_node *node; ++ struct list_head list; ++ int shrinker_id; ++ bool memcg_aware; ++}; ++ ++struct file_system_type; ++ ++struct super_operations; ++ ++struct dquot_operations; ++ ++struct quotactl_ops; ++ ++struct export_operations; ++ ++struct xattr_handler; ++ ++struct super_block { ++ struct list_head s_list; ++ dev_t s_dev; ++ unsigned char s_blocksize_bits; ++ long unsigned int s_blocksize; ++ loff_t s_maxbytes; ++ struct file_system_type *s_type; ++ const struct super_operations *s_op; ++ const struct dquot_operations *dq_op; ++ const struct quotactl_ops *s_qcop; ++ const struct export_operations *s_export_op; ++ long unsigned int s_flags; ++ long unsigned int s_iflags; ++ long unsigned int s_magic; ++ struct dentry *s_root; ++ struct rw_semaphore s_umount; ++ int s_count; ++ atomic_t s_active; ++ void *s_security; ++ const struct xattr_handler **s_xattr; ++ struct hlist_bl_head s_roots; ++ struct list_head s_mounts; ++ struct block_device *s_bdev; ++ struct backing_dev_info *s_bdi; ++ struct mtd_info *s_mtd; ++ struct hlist_node s_instances; ++ unsigned int s_quota_types; ++ struct quota_info s_dquot; ++ struct sb_writers s_writers; ++ char s_id[32]; ++ uuid_t s_uuid; ++ void *s_fs_info; ++ unsigned int s_max_links; ++ fmode_t s_mode; ++ u32 s_time_gran; ++ struct mutex s_vfs_rename_mutex; ++ char *s_subtype; ++ const struct dentry_operations *s_d_op; ++ int cleancache_poolid; ++ struct shrinker s_shrink; ++ atomic_long_t s_remove_count; ++ atomic_long_t s_fsnotify_inode_refs; ++ int s_readonly_remount; ++ struct workqueue_struct *s_dio_done_wq; ++ struct hlist_head s_pins; ++ struct user_namespace *s_user_ns; ++ struct list_lru s_dentry_lru; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct list_lru s_inode_lru; ++ struct callback_head rcu; ++ struct work_struct destroy_work; ++ struct mutex s_sync_lock; ++ int s_stack_depth; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t s_inode_list_lock; ++ struct list_head s_inodes; ++ spinlock_t s_inode_wblist_lock; ++ struct list_head s_inodes_wb; ++ long: 64; ++ long: 64; ++}; ++ ++struct path { ++ struct vfsmount *mnt; ++ struct dentry *dentry; ++}; ++ ++enum pid_type { ++ PIDTYPE_PID = 0, ++ PIDTYPE_TGID = 1, ++ PIDTYPE_PGID = 2, ++ PIDTYPE_SID = 3, ++ PIDTYPE_MAX = 4, ++}; ++ ++struct pid_namespace; ++ ++struct upid { ++ int nr; ++ struct pid_namespace *ns; ++}; ++ ++struct fs_pin; ++ ++struct pid_namespace { ++ struct kref kref; ++ struct idr idr; ++ struct callback_head rcu; ++ unsigned int pid_allocated; ++ struct task_struct *child_reaper; ++ struct kmem_cache *pid_cachep; ++ unsigned int level; ++ struct pid_namespace *parent; ++ struct vfsmount *proc_mnt; ++ struct dentry *proc_self; ++ struct dentry *proc_thread_self; ++ struct fs_pin *bacct; ++ struct user_namespace *user_ns; ++ struct ucounts *ucounts; ++ struct work_struct proc_work; ++ kgid_t pid_gid; ++ int hide_pid; ++ int pid_max; ++ int reboot; ++ struct ns_common ns; ++}; ++ ++struct pid { ++ atomic_t count; ++ unsigned int level; ++ struct hlist_head tasks[4]; ++ struct callback_head rcu; ++ struct upid numbers[1]; ++}; ++ ++struct rlimit { ++ __kernel_ulong_t rlim_cur; ++ __kernel_ulong_t rlim_max; ++}; ++ ++typedef void __signalfn_t(int); ++ ++typedef __signalfn_t *__sighandler_t; ++ ++typedef void __restorefn_t(); ++ ++typedef __restorefn_t *__sigrestore_t; ++ ++union sigval { ++ int sival_int; ++ void *sival_ptr; ++}; ++ ++typedef union sigval sigval_t; ++ ++struct siginfo { ++ int si_signo; ++ int si_errno; ++ int si_code; ++ union { ++ int _pad[28]; ++ struct { ++ __kernel_pid_t _pid; ++ __kernel_uid32_t _uid; ++ } _kill; ++ struct { ++ __kernel_timer_t _tid; ++ int _overrun; ++ sigval_t _sigval; ++ int _sys_private; ++ } _timer; ++ struct { ++ __kernel_pid_t _pid; ++ __kernel_uid32_t _uid; ++ sigval_t _sigval; ++ } _rt; ++ struct { ++ __kernel_pid_t _pid; ++ __kernel_uid32_t _uid; ++ int _status; ++ __kernel_clock_t _utime; ++ __kernel_clock_t _stime; ++ } _sigchld; ++ struct { ++ void *_addr; ++ union { ++ short int _addr_lsb; ++ struct { ++ char _dummy_bnd[8]; ++ void *_lower; ++ void *_upper; ++ } _addr_bnd; ++ struct { ++ char _dummy_pkey[8]; ++ __u32 _pkey; ++ } _addr_pkey; ++ }; ++ } _sigfault; ++ struct { ++ long int _band; ++ int _fd; ++ } _sigpoll; ++ struct { ++ void *_call_addr; ++ int _syscall; ++ unsigned int _arch; ++ } _sigsys; ++ } _sifields; ++}; ++ ++struct ratelimit_state { ++ raw_spinlock_t lock; ++ int interval; ++ int burst; ++ int printed; ++ int missed; ++ long unsigned int begin; ++ long unsigned int flags; ++}; ++ ++struct user_struct { ++ refcount_t __count; ++ atomic_t processes; ++ atomic_t sigpending; ++ atomic_t fanotify_listeners; ++ atomic_long_t epoll_watches; ++ long unsigned int mq_bytes; ++ long unsigned int locked_shm; ++ long unsigned int unix_inflight; ++ atomic_long_t pipe_bufs; ++ struct key *uid_keyring; ++ struct key *session_keyring; ++ struct hlist_node uidhash_node; ++ kuid_t uid; ++ atomic_long_t locked_vm; ++ struct ratelimit_state ratelimit; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct sigaction { ++ __sighandler_t sa_handler; ++ long unsigned int sa_flags; ++ __sigrestore_t sa_restorer; ++ sigset_t sa_mask; ++}; ++ ++struct k_sigaction { ++ struct sigaction sa; ++}; ++ ++struct userfaultfd_ctx; ++ ++struct vm_userfaultfd_ctx { ++ struct userfaultfd_ctx *ctx; ++}; ++ ++struct anon_vma; ++ ++struct vm_operations_struct; ++ ++struct vm_area_struct { ++ long unsigned int vm_start; ++ long unsigned int vm_end; ++ struct vm_area_struct *vm_next; ++ struct vm_area_struct *vm_prev; ++ struct rb_node vm_rb; ++ long unsigned int rb_subtree_gap; ++ struct mm_struct *vm_mm; ++ pgprot_t vm_page_prot; ++ long unsigned int vm_flags; ++ struct { ++ struct rb_node rb; ++ long unsigned int rb_subtree_last; ++ } shared; ++ struct list_head anon_vma_chain; ++ struct anon_vma *anon_vma; ++ const struct vm_operations_struct *vm_ops; ++ long unsigned int vm_pgoff; ++ struct file *vm_file; ++ void *vm_private_data; ++ atomic_long_t swap_readahead_info; ++ struct mempolicy *vm_policy; ++ struct vm_userfaultfd_ctx vm_userfaultfd_ctx; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct mm_rss_stat { ++ atomic_long_t count[4]; ++}; ++ ++struct rseq { ++ __u32 cpu_id_start; ++ __u32 cpu_id; ++ union { ++ __u64 ptr64; ++ __u64 ptr; ++ } rseq_cs; ++ __u32 flags; ++ long: 32; ++ long: 64; ++}; ++ ++struct rq; ++ ++struct rq_flags; ++ ++struct sched_class { ++ const struct sched_class *next; ++ void (*enqueue_task)(struct rq *, struct task_struct *, int); ++ void (*dequeue_task)(struct rq *, struct task_struct *, int); ++ void (*yield_task)(struct rq *); ++ bool (*yield_to_task)(struct rq *, struct task_struct *, bool); ++ void (*check_preempt_curr)(struct rq *, struct task_struct *, int); ++ struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *, struct rq_flags *); ++ void (*put_prev_task)(struct rq *, struct task_struct *); ++ int (*select_task_rq)(struct task_struct *, int, int, int); ++ void (*migrate_task_rq)(struct task_struct *, int); ++ void (*task_woken)(struct rq *, struct task_struct *); ++ void (*set_cpus_allowed)(struct task_struct *, const struct cpumask *); ++ void (*rq_online)(struct rq *); ++ void (*rq_offline)(struct rq *); ++ void (*set_curr_task)(struct rq *); ++ void (*task_tick)(struct rq *, struct task_struct *, int); ++ void (*task_fork)(struct task_struct *); ++ void (*task_dead)(struct task_struct *); ++ void (*switched_from)(struct rq *, struct task_struct *); ++ void (*switched_to)(struct rq *, struct task_struct *); ++ void (*prio_changed)(struct rq *, struct task_struct *, int); ++ unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); ++ void (*update_curr)(struct rq *); ++ void (*task_change_group)(struct task_struct *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++typedef struct { ++ atomic64_t id; ++ long unsigned int pinned; ++ void *vdso; ++ long unsigned int refcount; ++ long unsigned int flags; ++} mm_context_t; ++ ++struct xol_area; ++ ++struct uprobes_state { ++ struct xol_area *xol_area; ++}; ++ ++struct linux_binfmt; ++ ++struct core_state; ++ ++struct kioctx_table; ++ ++struct mmu_notifier_mm; ++ ++struct mm_struct { ++ struct { ++ struct vm_area_struct *mmap; ++ struct rb_root mm_rb; ++ u64 vmacache_seqnum; ++ long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); ++ long unsigned int mmap_base; ++ long unsigned int mmap_legacy_base; ++ long unsigned int task_size; ++ long unsigned int highest_vm_end; ++ pgd_t *pgd; ++ atomic_t membarrier_state; ++ atomic_t mm_users; ++ atomic_t mm_count; ++ atomic_long_t pgtables_bytes; ++ int map_count; ++ spinlock_t page_table_lock; ++ struct rw_semaphore mmap_sem; ++ struct list_head mmlist; ++ long unsigned int hiwater_rss; ++ long unsigned int hiwater_vm; ++ long unsigned int total_vm; ++ atomic_long_t locked_vm; ++ long unsigned int pinned_vm; ++ long unsigned int data_vm; ++ long unsigned int exec_vm; ++ long unsigned int stack_vm; ++ long unsigned int def_flags; ++ spinlock_t arg_lock; ++ long unsigned int start_code; ++ long unsigned int end_code; ++ long unsigned int start_data; ++ long unsigned int end_data; ++ long unsigned int start_brk; ++ long unsigned int brk; ++ long unsigned int start_stack; ++ long unsigned int arg_start; ++ long unsigned int arg_end; ++ long unsigned int env_start; ++ long unsigned int env_end; ++ long unsigned int saved_auxv[46]; ++ struct mm_rss_stat rss_stat; ++ struct linux_binfmt *binfmt; ++ mm_context_t context; ++ long unsigned int flags; ++ struct core_state *core_state; ++ spinlock_t ioctx_lock; ++ struct kioctx_table *ioctx_table; ++ struct task_struct *owner; ++ struct user_namespace *user_ns; ++ struct file *exe_file; ++ struct mmu_notifier_mm *mmu_notifier_mm; ++ pgtable_t pmd_huge_pte; ++ long unsigned int numa_next_scan; ++ long unsigned int numa_scan_offset; ++ int numa_scan_seq; ++ atomic_t tlb_flush_pending; ++ struct uprobes_state uprobes_state; ++ atomic_long_t hugetlb_usage; ++ struct work_struct async_put_work; ++ }; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int cpu_bitmap[0]; ++}; ++ ++struct kernel_cap_struct { ++ __u32 cap[2]; ++}; ++ ++typedef struct kernel_cap_struct kernel_cap_t; ++ ++struct group_info; ++ ++struct cred { ++ atomic_t usage; ++ kuid_t uid; ++ kgid_t gid; ++ kuid_t suid; ++ kgid_t sgid; ++ kuid_t euid; ++ kgid_t egid; ++ kuid_t fsuid; ++ kgid_t fsgid; ++ unsigned int securebits; ++ kernel_cap_t cap_inheritable; ++ kernel_cap_t cap_permitted; ++ kernel_cap_t cap_effective; ++ kernel_cap_t cap_bset; ++ kernel_cap_t cap_ambient; ++ unsigned char jit_keyring; ++ struct key *session_keyring; ++ struct key *process_keyring; ++ struct key *thread_keyring; ++ struct key *request_key_auth; ++ void *security; ++ struct user_struct *user; ++ struct user_namespace *user_ns; ++ struct group_info *group_info; ++ union { ++ int non_rcu; ++ struct callback_head rcu; ++ }; ++}; ++ ++struct uts_namespace; ++ ++struct ipc_namespace; ++ ++struct mnt_namespace; ++ ++struct cgroup_namespace; ++ ++struct nsproxy { ++ atomic_t count; ++ struct uts_namespace *uts_ns; ++ struct ipc_namespace *ipc_ns; ++ struct mnt_namespace *mnt_ns; ++ struct pid_namespace *pid_ns_for_children; ++ struct net *net_ns; ++ struct cgroup_namespace *cgroup_ns; ++}; ++ ++struct cpu_itimer { ++ u64 expires; ++ u64 incr; ++}; ++ ++struct task_cputime_atomic { ++ atomic64_t utime; ++ atomic64_t stime; ++ atomic64_t sum_exec_runtime; ++}; ++ ++struct thread_group_cputimer { ++ struct task_cputime_atomic cputime_atomic; ++ bool running; ++ bool checking_timer; ++}; ++ ++struct pacct_struct { ++ int ac_flag; ++ long int ac_exitcode; ++ long unsigned int ac_mem; ++ u64 ac_utime; ++ u64 ac_stime; ++ long unsigned int ac_minflt; ++ long unsigned int ac_majflt; ++}; ++ ++struct tty_struct; ++ ++struct autogroup; ++ ++struct taskstats; ++ ++struct tty_audit_buf; ++ ++struct signal_struct { ++ atomic_t sigcnt; ++ atomic_t live; ++ int nr_threads; ++ struct list_head thread_head; ++ wait_queue_head_t wait_chldexit; ++ struct task_struct *curr_target; ++ struct sigpending shared_pending; ++ struct hlist_head multiprocess; ++ int group_exit_code; ++ int notify_count; ++ struct task_struct *group_exit_task; ++ int group_stop_count; ++ unsigned int flags; ++ unsigned int is_child_subreaper: 1; ++ unsigned int has_child_subreaper: 1; ++ int posix_timer_id; ++ struct list_head posix_timers; ++ struct hrtimer real_timer; ++ ktime_t it_real_incr; ++ struct cpu_itimer it[2]; ++ struct thread_group_cputimer cputimer; ++ struct task_cputime cputime_expires; ++ struct list_head cpu_timers[3]; ++ struct pid *pids[4]; ++ atomic_t tick_dep_mask; ++ struct pid *tty_old_pgrp; ++ int leader; ++ struct tty_struct *tty; ++ struct autogroup *autogroup; ++ seqlock_t stats_lock; ++ u64 utime; ++ u64 stime; ++ u64 cutime; ++ u64 cstime; ++ u64 gtime; ++ u64 cgtime; ++ struct prev_cputime prev_cputime; ++ long unsigned int nvcsw; ++ long unsigned int nivcsw; ++ long unsigned int cnvcsw; ++ long unsigned int cnivcsw; ++ long unsigned int min_flt; ++ long unsigned int maj_flt; ++ long unsigned int cmin_flt; ++ long unsigned int cmaj_flt; ++ long unsigned int inblock; ++ long unsigned int oublock; ++ long unsigned int cinblock; ++ long unsigned int coublock; ++ long unsigned int maxrss; ++ long unsigned int cmaxrss; ++ struct task_io_accounting ioac; ++ long long unsigned int sum_sched_runtime; ++ struct rlimit rlim[16]; ++ struct pacct_struct pacct; ++ struct taskstats *stats; ++ unsigned int audit_tty; ++ struct tty_audit_buf *tty_audit_buf; ++ bool oom_flag_origin; ++ short int oom_score_adj; ++ short int oom_score_adj_min; ++ struct mm_struct *oom_mm; ++ struct mutex cred_guard_mutex; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct sighand_struct { ++ atomic_t count; ++ struct k_sigaction action[64]; ++ spinlock_t siglock; ++ wait_queue_head_t signalfd_wqh; ++}; ++ ++struct bio; ++ ++struct bio_list { ++ struct bio *head; ++ struct bio *tail; ++}; ++ ++typedef int congested_fn(void *, int); ++ ++struct fprop_local_percpu { ++ struct percpu_counter events; ++ unsigned int period; ++ raw_spinlock_t lock; ++}; ++ ++enum wb_reason { ++ WB_REASON_BACKGROUND = 0, ++ WB_REASON_VMSCAN = 1, ++ WB_REASON_SYNC = 2, ++ WB_REASON_PERIODIC = 3, ++ WB_REASON_LAPTOP_TIMER = 4, ++ WB_REASON_FREE_MORE_MEM = 5, ++ WB_REASON_FS_FREE_SPACE = 6, ++ WB_REASON_FORKER_THREAD = 7, ++ WB_REASON_MAX = 8, ++}; ++ ++struct percpu_ref; ++ ++typedef void percpu_ref_func_t(struct percpu_ref *); ++ ++struct percpu_ref { ++ atomic_long_t count; ++ long unsigned int percpu_count_ptr; ++ percpu_ref_func_t *release; ++ percpu_ref_func_t *confirm_switch; ++ bool force_atomic: 1; ++ struct callback_head rcu; ++}; ++ ++struct bdi_writeback_congested; ++ ++struct cgroup_subsys_state; ++ ++struct bdi_writeback { ++ struct backing_dev_info *bdi; ++ long unsigned int state; ++ long unsigned int last_old_flush; ++ struct list_head b_dirty; ++ struct list_head b_io; ++ struct list_head b_more_io; ++ struct list_head b_dirty_time; ++ spinlock_t list_lock; ++ struct percpu_counter stat[4]; ++ struct bdi_writeback_congested *congested; ++ long unsigned int bw_time_stamp; ++ long unsigned int dirtied_stamp; ++ long unsigned int written_stamp; ++ long unsigned int write_bandwidth; ++ long unsigned int avg_write_bandwidth; ++ long unsigned int dirty_ratelimit; ++ long unsigned int balanced_dirty_ratelimit; ++ struct fprop_local_percpu completions; ++ int dirty_exceeded; ++ enum wb_reason start_all_reason; ++ spinlock_t work_lock; ++ struct list_head work_list; ++ struct delayed_work dwork; ++ long unsigned int dirty_sleep; ++ struct list_head bdi_node; ++ struct percpu_ref refcnt; ++ struct fprop_local_percpu memcg_completions; ++ struct cgroup_subsys_state *memcg_css; ++ struct cgroup_subsys_state *blkcg_css; ++ struct list_head memcg_node; ++ struct list_head blkcg_node; ++ union { ++ struct work_struct release_work; ++ struct callback_head rcu; ++ }; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct rcu_device; ++ ++struct backing_dev_info { ++ struct list_head bdi_list; ++ long unsigned int ra_pages; ++ long unsigned int io_pages; ++ congested_fn *congested_fn; ++ void *congested_data; ++ const char *name; ++ struct kref refcnt; ++ unsigned int capabilities; ++ unsigned int min_ratio; ++ unsigned int max_ratio; ++ unsigned int max_prop_frac; ++ atomic_long_t tot_write_bandwidth; ++ struct bdi_writeback wb; ++ struct list_head wb_list; ++ struct radix_tree_root cgwb_tree; ++ struct rb_root cgwb_congested_tree; ++ struct mutex cgwb_release_mutex; ++ struct rw_semaphore wb_switch_rwsem; ++ wait_queue_head_t wb_waitq; ++ union { ++ struct rcu_device *rcu_dev; ++ struct device *dev; ++ }; ++ struct device *owner; ++ struct timer_list laptop_mode_wb_timer; ++ struct dentry *debug_dir; ++ struct dentry *debug_stats; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct io_cq; ++ ++struct io_context { ++ atomic_long_t refcount; ++ atomic_t active_ref; ++ atomic_t nr_tasks; ++ spinlock_t lock; ++ short unsigned int ioprio; ++ int nr_batch_requests; ++ long unsigned int last_waited; ++ struct radix_tree_root icq_tree; ++ struct io_cq *icq_hint; ++ struct hlist_head icq_list; ++ struct work_struct release_work; ++}; ++ ++struct cgroup; ++ ++struct css_set { ++ struct cgroup_subsys_state *subsys[14]; ++ refcount_t refcount; ++ struct css_set *dom_cset; ++ struct cgroup *dfl_cgrp; ++ int nr_tasks; ++ struct list_head tasks; ++ struct list_head mg_tasks; ++ struct list_head dying_tasks; ++ struct list_head task_iters; ++ struct list_head e_cset_node[14]; ++ struct list_head threaded_csets; ++ struct list_head threaded_csets_node; ++ struct hlist_node hlist; ++ struct list_head cgrp_links; ++ struct list_head mg_preload_node; ++ struct list_head mg_node; ++ struct cgroup *mg_src_cgrp; ++ struct cgroup *mg_dst_cgrp; ++ struct css_set *mg_dst_cset; ++ bool dead; ++ struct callback_head callback_head; ++}; ++ ++typedef u32 compat_uptr_t; ++ ++struct compat_robust_list { ++ compat_uptr_t next; ++}; ++ ++typedef s32 compat_long_t; ++ ++struct compat_robust_list_head { ++ struct compat_robust_list list; ++ compat_long_t futex_offset; ++ compat_uptr_t list_op_pending; ++}; ++ ++struct cgroup_subsys; ++ ++struct cgroup_subsys_state { ++ struct cgroup *cgroup; ++ struct cgroup_subsys *ss; ++ struct percpu_ref refcnt; ++ struct list_head sibling; ++ struct list_head children; ++ struct list_head rstat_css_node; ++ int id; ++ unsigned int flags; ++ u64 serial_nr; ++ atomic_t online_cnt; ++ struct work_struct destroy_work; ++ struct rcu_work destroy_rwork; ++ struct cgroup_subsys_state *parent; ++}; ++ ++struct mem_cgroup_id { ++ int id; ++ atomic_t ref; ++}; ++ ++struct page_counter { ++ atomic_long_t usage; ++ long unsigned int min; ++ long unsigned int low; ++ long unsigned int max; ++ struct page_counter *parent; ++ long unsigned int emin; ++ atomic_long_t min_usage; ++ atomic_long_t children_min_usage; ++ long unsigned int elow; ++ atomic_long_t low_usage; ++ atomic_long_t children_low_usage; ++ long unsigned int watermark; ++ long unsigned int failcnt; ++}; ++ ++struct vmpressure { ++ long unsigned int scanned; ++ long unsigned int reclaimed; ++ long unsigned int tree_scanned; ++ long unsigned int tree_reclaimed; ++ struct spinlock sr_lock; ++ struct list_head events; ++ struct mutex events_lock; ++ struct work_struct work; ++}; ++ ++struct cgroup_file { ++ struct kernfs_node *kn; ++ long unsigned int notified_at; ++ struct timer_list notify_timer; ++}; ++ ++struct mem_cgroup_threshold_ary; ++ ++struct mem_cgroup_thresholds { ++ struct mem_cgroup_threshold_ary *primary; ++ struct mem_cgroup_threshold_ary *spare; ++}; ++ ++struct memcg_padding { ++ char x[0]; ++}; ++ ++enum memcg_kmem_state { ++ KMEM_NONE = 0, ++ KMEM_ALLOCATED = 1, ++ KMEM_ONLINE = 2, ++}; ++ ++struct fprop_global { ++ struct percpu_counter events; ++ unsigned int period; ++ seqcount_t sequence; ++}; ++ ++struct wb_domain { ++ spinlock_t lock; ++ struct fprop_global completions; ++ struct timer_list period_timer; ++ long unsigned int period_time; ++ long unsigned int dirty_limit_tstamp; ++ long unsigned int dirty_limit; ++}; ++ ++struct mem_cgroup_stat_cpu; ++ ++struct mem_cgroup_per_node; ++ ++struct mem_cgroup { ++ struct cgroup_subsys_state css; ++ struct mem_cgroup_id id; ++ struct page_counter memory; ++ struct page_counter swap; ++ struct page_counter memsw; ++ struct page_counter kmem; ++ struct page_counter tcpmem; ++ long unsigned int high; ++ struct work_struct high_work; ++ long unsigned int soft_limit; ++ struct vmpressure vmpressure; ++ bool use_hierarchy; ++ bool oom_group; ++ bool oom_lock; ++ int under_oom; ++ int swappiness; ++ int oom_kill_disable; ++ struct cgroup_file events_file; ++ struct cgroup_file swap_events_file; ++ struct mutex thresholds_lock; ++ struct mem_cgroup_thresholds thresholds; ++ struct mem_cgroup_thresholds memsw_thresholds; ++ struct list_head oom_notify; ++ long unsigned int move_charge_at_immigrate; ++ spinlock_t move_lock; ++ long unsigned int move_lock_flags; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct memcg_padding _pad1_; ++ atomic_t moving_account; ++ struct task_struct *move_lock_task; ++ struct mem_cgroup_stat_cpu *stat_cpu; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct memcg_padding _pad2_; ++ atomic_long_t stat[34]; ++ atomic_long_t events[81]; ++ atomic_long_t memory_events[7]; ++ long unsigned int socket_pressure; ++ bool tcpmem_active; ++ int tcpmem_pressure; ++ int kmemcg_id; ++ enum memcg_kmem_state kmem_state; ++ struct list_head kmem_caches; ++ int last_scanned_node; ++ nodemask_t scan_nodes; ++ atomic_t numainfo_events; ++ atomic_t numainfo_updating; ++ struct list_head cgwb_list; ++ struct wb_domain cgwb_domain; ++ struct list_head event_list; ++ spinlock_t event_list_lock; ++ struct mem_cgroup_per_node *nodeinfo[0]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++enum uprobe_task_state { ++ UTASK_RUNNING = 0, ++ UTASK_SSTEP = 1, ++ UTASK_SSTEP_ACK = 2, ++ UTASK_SSTEP_TRAPPED = 3, ++}; ++ ++struct arch_uprobe_task {}; ++ ++struct uprobe; ++ ++struct return_instance; ++ ++struct uprobe_task { ++ enum uprobe_task_state state; ++ union { ++ struct { ++ struct arch_uprobe_task autask; ++ long unsigned int vaddr; ++ }; ++ struct { ++ struct callback_head dup_xol_work; ++ long unsigned int dup_xol_addr; ++ }; ++ }; ++ struct uprobe *active_uprobe; ++ long unsigned int xol_vaddr; ++ struct return_instance *return_instances; ++ unsigned int depth; ++}; ++ ++struct vm_struct { ++ struct vm_struct *next; ++ void *addr; ++ long unsigned int size; ++ long unsigned int flags; ++ struct page **pages; ++ unsigned int nr_pages; ++ phys_addr_t phys_addr; ++ const void *caller; ++}; ++ ++union thread_union { ++ struct task_struct task; ++ long unsigned int stack[8192]; ++}; ++ ++struct kstat { ++ u32 result_mask; ++ umode_t mode; ++ unsigned int nlink; ++ uint32_t blksize; ++ u64 attributes; ++ u64 attributes_mask; ++ u64 ino; ++ dev_t dev; ++ dev_t rdev; ++ kuid_t uid; ++ kgid_t gid; ++ loff_t size; ++ struct timespec64 atime; ++ struct timespec64 mtime; ++ struct timespec64 ctime; ++ struct timespec64 btime; ++ u64 blocks; ++}; ++ ++struct shrink_control { ++ gfp_t gfp_mask; ++ int nid; ++ long unsigned int nr_to_scan; ++ long unsigned int nr_scanned; ++ struct mem_cgroup *memcg; ++}; ++ ++struct list_lru_one { ++ struct list_head list; ++ long int nr_items; ++}; ++ ++struct list_lru_memcg { ++ struct callback_head rcu; ++ struct list_lru_one *lru[0]; ++}; ++ ++struct list_lru_node { ++ spinlock_t lock; ++ struct list_lru_one lru; ++ struct list_lru_memcg *memcg_lrus; ++ long int nr_items; ++ long: 64; ++ long: 64; ++}; ++ ++struct return_instance { ++ struct uprobe *uprobe; ++ long unsigned int func; ++ long unsigned int stack; ++ long unsigned int orig_ret_vaddr; ++ bool chained; ++ struct return_instance *next; ++}; ++ ++struct res_mem { ++ phys_addr_t base; ++ phys_addr_t size; ++}; ++ ++typedef void (*bp_hardening_cb_t)(); ++ ++struct bp_hardening_data { ++ int hyp_vectors_slot; ++ bp_hardening_cb_t fn; ++}; ++ ++typedef int vm_fault_t; ++ ++typedef int (*dev_page_fault_t)(struct vm_area_struct *, long unsigned int, const struct page *, unsigned int, pmd_t *); ++ ++typedef void (*dev_page_free_t)(struct page *, void *); ++ ++struct vmem_altmap { ++ const long unsigned int base_pfn; ++ const long unsigned int reserve; ++ long unsigned int free; ++ long unsigned int align; ++ long unsigned int alloc; ++}; ++ ++struct resource { ++ resource_size_t start; ++ resource_size_t end; ++ const char *name; ++ long unsigned int flags; ++ long unsigned int desc; ++ struct resource *parent; ++ struct resource *sibling; ++ struct resource *child; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++enum memory_type { ++ MEMORY_DEVICE_PRIVATE = 1, ++ MEMORY_DEVICE_PUBLIC = 2, ++ MEMORY_DEVICE_FS_DAX = 3, ++}; ++ ++struct dev_pagemap { ++ dev_page_fault_t page_fault; ++ dev_page_free_t page_free; ++ struct vmem_altmap altmap; ++ bool altmap_valid; ++ struct resource res; ++ struct percpu_ref *ref; ++ void (*kill)(struct percpu_ref *); ++ struct device *dev; ++ void *data; ++ enum memory_type type; ++}; ++ ++enum rw_hint { ++ WRITE_LIFE_NOT_SET = 0, ++ WRITE_LIFE_NONE = 1, ++ WRITE_LIFE_SHORT = 2, ++ WRITE_LIFE_MEDIUM = 3, ++ WRITE_LIFE_LONG = 4, ++ WRITE_LIFE_EXTREME = 5, ++}; ++ ++struct fown_struct { ++ rwlock_t lock; ++ struct pid *pid; ++ enum pid_type pid_type; ++ kuid_t uid; ++ kuid_t euid; ++ int signum; ++}; ++ ++struct file_ra_state { ++ long unsigned int start; ++ unsigned int size; ++ unsigned int async_size; ++ unsigned int ra_pages; ++ unsigned int mmap_miss; ++ loff_t prev_pos; ++}; ++ ++struct file { ++ union { ++ struct llist_node fu_llist; ++ struct callback_head fu_rcuhead; ++ } f_u; ++ struct path f_path; ++ struct inode *f_inode; ++ const struct file_operations *f_op; ++ spinlock_t f_lock; ++ enum rw_hint f_write_hint; ++ atomic_long_t f_count; ++ unsigned int f_flags; ++ fmode_t f_mode; ++ struct mutex f_pos_lock; ++ loff_t f_pos; ++ struct fown_struct f_owner; ++ const struct cred *f_cred; ++ struct file_ra_state f_ra; ++ u64 f_version; ++ void *f_security; ++ void *private_data; ++ struct list_head f_ep_links; ++ struct list_head f_tfile_llink; ++ struct address_space *f_mapping; ++ errseq_t f_wb_err; ++}; ++ ++enum page_entry_size { ++ PE_SIZE_PTE = 0, ++ PE_SIZE_PMD = 1, ++ PE_SIZE_PUD = 2, ++}; ++ ++struct vm_fault; ++ ++struct vm_operations_struct { ++ void (*open)(struct vm_area_struct *); ++ void (*close)(struct vm_area_struct *); ++ int (*split)(struct vm_area_struct *, long unsigned int); ++ int (*mremap)(struct vm_area_struct *); ++ vm_fault_t (*fault)(struct vm_fault *); ++ vm_fault_t (*huge_fault)(struct vm_fault *, enum page_entry_size); ++ void (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); ++ long unsigned int (*pagesize)(struct vm_area_struct *); ++ vm_fault_t (*page_mkwrite)(struct vm_fault *); ++ vm_fault_t (*pfn_mkwrite)(struct vm_fault *); ++ int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); ++ const char * (*name)(struct vm_area_struct *); ++ int (*set_policy)(struct vm_area_struct *, struct mempolicy *); ++ struct mempolicy * (*get_policy)(struct vm_area_struct *, long unsigned int); ++ struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct core_thread { ++ struct task_struct *task; ++ struct core_thread *next; ++}; ++ ++struct core_state { ++ atomic_t nr_threads; ++ struct core_thread dumper; ++ struct completion startup; ++}; ++ ++struct vm_fault { ++ struct vm_area_struct *vma; ++ unsigned int flags; ++ gfp_t gfp_mask; ++ long unsigned int pgoff; ++ long unsigned int address; ++ pmd_t *pmd; ++ pud_t *pud; ++ pte_t orig_pte; ++ struct page *cow_page; ++ struct mem_cgroup *memcg; ++ struct page *page; ++ pte_t *pte; ++ spinlock_t *ptl; ++ pgtable_t prealloc_pte; ++}; ++ ++struct fiemap_extent { ++ __u64 fe_logical; ++ __u64 fe_physical; ++ __u64 fe_length; ++ __u64 fe_reserved64[2]; ++ __u32 fe_flags; ++ __u32 fe_reserved[3]; ++}; ++ ++enum migrate_mode { ++ MIGRATE_ASYNC = 0, ++ MIGRATE_SYNC_LIGHT = 1, ++ MIGRATE_SYNC = 2, ++ MIGRATE_SYNC_NO_COPY = 3, ++}; ++ ++struct delayed_call { ++ void (*fn)(void *); ++ void *arg; ++}; ++ ++typedef struct { ++ __u8 b[16]; ++} guid_t; ++ ++struct io_cq { ++ struct request_queue *q; ++ struct io_context *ioc; ++ union { ++ struct list_head q_node; ++ struct kmem_cache *__rcu_icq_cache; ++ }; ++ union { ++ struct hlist_node ioc_node; ++ struct callback_head __rcu_head; ++ }; ++ unsigned int flags; ++}; ++ ++struct files_stat_struct { ++ long unsigned int nr_files; ++ long unsigned int nr_free_files; ++ long unsigned int max_files; ++}; ++ ++struct inodes_stat_t { ++ long int nr_inodes; ++ long int nr_unused; ++ long int dummy[5]; ++}; ++ ++struct iattr { ++ unsigned int ia_valid; ++ umode_t ia_mode; ++ kuid_t ia_uid; ++ kgid_t ia_gid; ++ loff_t ia_size; ++ struct timespec64 ia_atime; ++ struct timespec64 ia_mtime; ++ struct timespec64 ia_ctime; ++ struct file *ia_file; ++}; ++ ++typedef __kernel_uid32_t projid_t; ++ ++typedef struct { ++ projid_t val; ++} kprojid_t; ++ ++enum quota_type { ++ USRQUOTA = 0, ++ GRPQUOTA = 1, ++ PRJQUOTA = 2, ++}; ++ ++struct kqid { ++ union { ++ kuid_t uid; ++ kgid_t gid; ++ kprojid_t projid; ++ }; ++ enum quota_type type; ++}; ++ ++struct mem_dqblk { ++ qsize_t dqb_bhardlimit; ++ qsize_t dqb_bsoftlimit; ++ qsize_t dqb_curspace; ++ qsize_t dqb_rsvspace; ++ qsize_t dqb_ihardlimit; ++ qsize_t dqb_isoftlimit; ++ qsize_t dqb_curinodes; ++ time64_t dqb_btime; ++ time64_t dqb_itime; ++}; ++ ++struct dquot { ++ struct hlist_node dq_hash; ++ struct list_head dq_inuse; ++ struct list_head dq_free; ++ struct list_head dq_dirty; ++ struct mutex dq_lock; ++ spinlock_t dq_dqb_lock; ++ atomic_t dq_count; ++ struct super_block *dq_sb; ++ struct kqid dq_id; ++ loff_t dq_off; ++ long unsigned int dq_flags; ++ struct mem_dqblk dq_dqb; ++}; ++ ++struct quota_format_type { ++ int qf_fmt_id; ++ const struct quota_format_ops *qf_ops; ++ struct module *qf_owner; ++ struct quota_format_type *qf_next; ++}; ++ ++struct dqstats { ++ long unsigned int stat[8]; ++ struct percpu_counter counter[8]; ++}; ++ ++struct quota_format_ops { ++ int (*check_quota_file)(struct super_block *, int); ++ int (*read_file_info)(struct super_block *, int); ++ int (*write_file_info)(struct super_block *, int); ++ int (*free_file_info)(struct super_block *, int); ++ int (*read_dqblk)(struct dquot *); ++ int (*commit_dqblk)(struct dquot *); ++ int (*release_dqblk)(struct dquot *); ++ int (*get_next_id)(struct super_block *, struct kqid *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct dquot_operations { ++ int (*write_dquot)(struct dquot *); ++ struct dquot * (*alloc_dquot)(struct super_block *, int); ++ void (*destroy_dquot)(struct dquot *); ++ int (*acquire_dquot)(struct dquot *); ++ int (*release_dquot)(struct dquot *); ++ int (*mark_dirty)(struct dquot *); ++ int (*write_info)(struct super_block *, int); ++ qsize_t * (*get_reserved_space)(struct inode *); ++ int (*get_projid)(struct inode *, kprojid_t *); ++ int (*get_inode_usage)(struct inode *, qsize_t *); ++ int (*get_next_id)(struct super_block *, struct kqid *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct qc_dqblk { ++ int d_fieldmask; ++ u64 d_spc_hardlimit; ++ u64 d_spc_softlimit; ++ u64 d_ino_hardlimit; ++ u64 d_ino_softlimit; ++ u64 d_space; ++ u64 d_ino_count; ++ s64 d_ino_timer; ++ s64 d_spc_timer; ++ int d_ino_warns; ++ int d_spc_warns; ++ u64 d_rt_spc_hardlimit; ++ u64 d_rt_spc_softlimit; ++ u64 d_rt_space; ++ s64 d_rt_spc_timer; ++ int d_rt_spc_warns; ++}; ++ ++struct qc_type_state { ++ unsigned int flags; ++ unsigned int spc_timelimit; ++ unsigned int ino_timelimit; ++ unsigned int rt_spc_timelimit; ++ unsigned int spc_warnlimit; ++ unsigned int ino_warnlimit; ++ unsigned int rt_spc_warnlimit; ++ long long unsigned int ino; ++ blkcnt_t blocks; ++ blkcnt_t nextents; ++}; ++ ++struct qc_state { ++ unsigned int s_incoredqs; ++ struct qc_type_state s_state[3]; ++}; ++ ++struct qc_info { ++ int i_fieldmask; ++ unsigned int i_flags; ++ unsigned int i_spc_timelimit; ++ unsigned int i_ino_timelimit; ++ unsigned int i_rt_spc_timelimit; ++ unsigned int i_spc_warnlimit; ++ unsigned int i_ino_warnlimit; ++ unsigned int i_rt_spc_warnlimit; ++}; ++ ++struct quotactl_ops { ++ int (*quota_on)(struct super_block *, int, int, const struct path *); ++ int (*quota_off)(struct super_block *, int); ++ int (*quota_enable)(struct super_block *, unsigned int); ++ int (*quota_disable)(struct super_block *, unsigned int); ++ int (*quota_sync)(struct super_block *, int); ++ int (*set_info)(struct super_block *, int, struct qc_info *); ++ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); ++ int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); ++ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); ++ int (*get_state)(struct super_block *, struct qc_state *); ++ int (*rm_xquota)(struct super_block *, unsigned int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++typedef struct { ++ size_t written; ++ size_t count; ++ union { ++ char *buf; ++ void *data; ++ } arg; ++ int error; ++} read_descriptor_t; ++ ++struct writeback_control; ++ ++struct swap_info_struct; ++ ++struct address_space_operations { ++ int (*writepage)(struct page *, struct writeback_control *); ++ int (*readpage)(struct file *, struct page *); ++ int (*writepages)(struct address_space *, struct writeback_control *); ++ int (*set_page_dirty)(struct page *); ++ int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); ++ int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page **, void **); ++ int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page *, void *); ++ sector_t (*bmap)(struct address_space *, sector_t); ++ void (*invalidatepage)(struct page *, unsigned int, unsigned int); ++ int (*releasepage)(struct page *, gfp_t); ++ void (*freepage)(struct page *); ++ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); ++ int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode); ++ bool (*isolate_page)(struct page *, isolate_mode_t); ++ void (*putback_page)(struct page *); ++ int (*launder_page)(struct page *); ++ int (*is_partially_uptodate)(struct page *, long unsigned int, long unsigned int); ++ void (*is_dirty_writeback)(struct page *, bool *, bool *); ++ int (*error_remove_page)(struct address_space *, struct page *); ++ int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); ++ void (*swap_deactivate)(struct file *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++enum writeback_sync_modes { ++ WB_SYNC_NONE = 0, ++ WB_SYNC_ALL = 1, ++}; ++ ++struct writeback_control { ++ long int nr_to_write; ++ long int pages_skipped; ++ loff_t range_start; ++ loff_t range_end; ++ enum writeback_sync_modes sync_mode; ++ unsigned int for_kupdate: 1; ++ unsigned int for_background: 1; ++ unsigned int tagged_writepages: 1; ++ unsigned int for_reclaim: 1; ++ unsigned int range_cyclic: 1; ++ unsigned int for_sync: 1; ++ struct bdi_writeback *wb; ++ struct inode *inode; ++ int wb_id; ++ int wb_lcand_id; ++ int wb_tcand_id; ++ size_t wb_bytes; ++ size_t wb_lcand_bytes; ++ size_t wb_tcand_bytes; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct hd_struct; ++ ++struct gendisk; ++ ++struct block_device { ++ dev_t bd_dev; ++ int bd_openers; ++ int bd_write_openers; ++ struct inode *bd_inode; ++ struct super_block *bd_super; ++ struct mutex bd_mutex; ++ void *bd_claiming; ++ void *bd_holder; ++ int bd_holders; ++ bool bd_write_holder; ++ struct list_head bd_holder_disks; ++ struct block_device *bd_contains; ++ unsigned int bd_block_size; ++ u8 bd_partno; ++ struct hd_struct *bd_part; ++ unsigned int bd_part_count; ++ int bd_invalidated; ++ struct gendisk *bd_disk; ++ struct request_queue *bd_queue; ++ struct backing_dev_info *bd_bdi; ++ struct list_head bd_list; ++ long unsigned int bd_private; ++ int bd_fsfreeze_count; ++ struct mutex bd_fsfreeze_mutex; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct fiemap_extent_info; ++ ++struct inode_operations { ++ struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); ++ const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); ++ int (*permission)(struct inode *, int); ++ struct posix_acl * (*get_acl)(struct inode *, int); ++ int (*readlink)(struct dentry *, char *, int); ++ int (*create)(struct inode *, struct dentry *, umode_t, bool); ++ int (*link)(struct dentry *, struct inode *, struct dentry *); ++ int (*unlink)(struct inode *, struct dentry *); ++ int (*symlink)(struct inode *, struct dentry *, const char *); ++ int (*mkdir)(struct inode *, struct dentry *, umode_t); ++ int (*rmdir)(struct inode *, struct dentry *); ++ int (*mknod)(struct inode *, struct dentry *, umode_t, dev_t); ++ int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); ++ int (*setattr)(struct dentry *, struct iattr *); ++ int (*getattr)(const struct path *, struct kstat *, u32, unsigned int); ++ ssize_t (*listxattr)(struct dentry *, char *, size_t); ++ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); ++ int (*update_time)(struct inode *, struct timespec64 *, int); ++ int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); ++ int (*tmpfile)(struct inode *, struct dentry *, umode_t); ++ int (*set_acl)(struct inode *, struct posix_acl *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct file_lock_context { ++ spinlock_t flc_lock; ++ struct list_head flc_flock; ++ struct list_head flc_posix; ++ struct list_head flc_lease; ++}; ++ ++struct file_lock_operations { ++ void (*fl_copy_lock)(struct file_lock *, struct file_lock *); ++ void (*fl_release_private)(struct file_lock *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct nlm_lockowner; ++ ++struct nfs_lock_info { ++ u32 state; ++ struct nlm_lockowner *owner; ++ struct list_head list; ++}; ++ ++struct nfs4_lock_state; ++ ++struct nfs4_lock_info { ++ struct nfs4_lock_state *owner; ++}; ++ ++struct lock_manager_operations; ++ ++struct file_lock { ++ struct file_lock *fl_next; ++ struct list_head fl_list; ++ struct hlist_node fl_link; ++ struct list_head fl_block; ++ fl_owner_t fl_owner; ++ unsigned int fl_flags; ++ unsigned char fl_type; ++ unsigned int fl_pid; ++ int fl_link_cpu; ++ wait_queue_head_t fl_wait; ++ struct file *fl_file; ++ loff_t fl_start; ++ loff_t fl_end; ++ struct fasync_struct *fl_fasync; ++ long unsigned int fl_break_time; ++ long unsigned int fl_downgrade_time; ++ const struct file_lock_operations *fl_ops; ++ const struct lock_manager_operations *fl_lmops; ++ union { ++ struct nfs_lock_info nfs_fl; ++ struct nfs4_lock_info nfs4_fl; ++ struct { ++ struct list_head link; ++ int state; ++ } afs; ++ } fl_u; ++}; ++ ++struct lock_manager_operations { ++ int (*lm_compare_owner)(struct file_lock *, struct file_lock *); ++ long unsigned int (*lm_owner_key)(struct file_lock *); ++ fl_owner_t (*lm_get_owner)(fl_owner_t); ++ void (*lm_put_owner)(fl_owner_t); ++ void (*lm_notify)(struct file_lock *); ++ int (*lm_grant)(struct file_lock *, int); ++ bool (*lm_break)(struct file_lock *); ++ int (*lm_change)(struct file_lock *, int, struct list_head *); ++ void (*lm_setup)(struct file_lock *, void **); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct fasync_struct { ++ rwlock_t fa_lock; ++ int magic; ++ int fa_fd; ++ struct fasync_struct *fa_next; ++ struct file *fa_file; ++ struct callback_head fa_rcu; ++}; ++ ++struct file_system_type { ++ const char *name; ++ int fs_flags; ++ struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); ++ void (*kill_sb)(struct super_block *); ++ struct module *owner; ++ struct file_system_type *next; ++ struct hlist_head fs_supers; ++ struct lock_class_key s_lock_key; ++ struct lock_class_key s_umount_key; ++ struct lock_class_key s_vfs_rename_key; ++ struct lock_class_key s_writers_key[3]; ++ struct lock_class_key i_lock_key; ++ struct lock_class_key i_mutex_key; ++ struct lock_class_key i_mutex_dir_key; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kstatfs; ++ ++struct super_operations { ++ struct inode * (*alloc_inode)(struct super_block *); ++ void (*destroy_inode)(struct inode *); ++ void (*dirty_inode)(struct inode *, int); ++ int (*write_inode)(struct inode *, struct writeback_control *); ++ int (*drop_inode)(struct inode *); ++ void (*evict_inode)(struct inode *); ++ void (*put_super)(struct super_block *); ++ int (*sync_fs)(struct super_block *, int); ++ int (*freeze_super)(struct super_block *); ++ int (*freeze_fs)(struct super_block *); ++ int (*thaw_super)(struct super_block *); ++ int (*unfreeze_fs)(struct super_block *); ++ int (*statfs)(struct dentry *, struct kstatfs *); ++ int (*remount_fs)(struct super_block *, int *, char *); ++ void (*umount_begin)(struct super_block *); ++ int (*show_options)(struct seq_file *, struct dentry *); ++ int (*show_devname)(struct seq_file *, struct dentry *); ++ int (*show_path)(struct seq_file *, struct dentry *); ++ int (*show_stats)(struct seq_file *, struct dentry *); ++ ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ++ ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); ++ struct dquot ** (*get_dquots)(struct inode *); ++ int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t); ++ long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); ++ long int (*free_cached_objects)(struct super_block *, struct shrink_control *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct iomap; ++ ++struct inode___2; ++ ++struct dentry___2; ++ ++struct super_block___2; ++ ++struct fid; ++ ++struct iattr___2; ++ ++struct export_operations { ++ int (*encode_fh)(struct inode___2 *, __u32 *, int *, struct inode___2 *); ++ struct dentry___2 * (*fh_to_dentry)(struct super_block___2 *, struct fid *, int, int); ++ struct dentry___2 * (*fh_to_parent)(struct super_block___2 *, struct fid *, int, int); ++ int (*get_name)(struct dentry___2 *, char *, struct dentry___2 *); ++ struct dentry___2 * (*get_parent)(struct dentry___2 *); ++ int (*commit_metadata)(struct inode___2 *); ++ int (*get_uuid)(struct super_block___2 *, u8 *, u32 *, u64 *); ++ int (*map_blocks)(struct inode___2 *, loff_t, u64, struct iomap *, bool, u32 *); ++ int (*commit_blocks)(struct inode___2 *, struct iomap *, int, struct iattr___2 *); ++}; ++ ++struct xattr_handler { ++ const char *name; ++ const char *prefix; ++ int flags; ++ bool (*list)(struct dentry *); ++ int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); ++ int (*set)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, const void *, size_t, int); ++}; ++ ++struct fiemap_extent_info { ++ unsigned int fi_flags; ++ unsigned int fi_extents_mapped; ++ unsigned int fi_extents_max; ++ struct fiemap_extent *fi_extents_start; ++}; ++ ++typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); ++ ++struct dir_context { ++ filldir_t actor; ++ loff_t pos; ++}; ++ ++typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); ++ ++struct poll_table_struct { ++ poll_queue_proc _qproc; ++ __poll_t _key; ++}; ++ ++struct seq_operations; ++ ++struct seq_file { ++ char *buf; ++ size_t size; ++ size_t from; ++ size_t count; ++ size_t pad_until; ++ loff_t index; ++ loff_t read_pos; ++ u64 version; ++ struct mutex lock; ++ const struct seq_operations *op; ++ int poll_event; ++ const struct file *file; ++ void *private; ++}; ++ ++typedef u8 blk_status_t; ++ ++struct bvec_iter { ++ sector_t bi_sector; ++ unsigned int bi_size; ++ unsigned int bi_idx; ++ unsigned int bi_done; ++ unsigned int bi_bvec_done; ++}; ++ ++typedef void bio_end_io_t(struct bio *); ++ ++struct bio_issue { ++ u64 value; ++}; ++ ++struct blkcg_gq; ++ ++struct bio_integrity_payload; ++ ++struct bio_set; ++ ++struct bio { ++ struct bio *bi_next; ++ struct gendisk *bi_disk; ++ unsigned int bi_opf; ++ short unsigned int bi_flags; ++ short unsigned int bi_ioprio; ++ short unsigned int bi_write_hint; ++ blk_status_t bi_status; ++ u8 bi_partno; ++ unsigned int bi_phys_segments; ++ unsigned int bi_seg_front_size; ++ unsigned int bi_seg_back_size; ++ struct bvec_iter bi_iter; ++ atomic_t __bi_remaining; ++ bio_end_io_t *bi_end_io; ++ void *bi_private; ++ struct io_context *bi_ioc; ++ struct cgroup_subsys_state *bi_css; ++ struct blkcg_gq *bi_blkg; ++ struct bio_issue bi_issue; ++ union { ++ struct bio_integrity_payload *bi_integrity; ++ }; ++ short unsigned int bi_vcnt; ++ short unsigned int bi_max_vecs; ++ atomic_t __bi_cnt; ++ struct bio_vec *bi_io_vec; ++ struct bio_set *bi_pool; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ struct bio_vec bi_inline_vecs[0]; ++}; ++ ++typedef enum { ++ SS_FREE = 0, ++ SS_UNCONNECTED = 1, ++ SS_CONNECTING = 2, ++ SS_CONNECTED = 3, ++ SS_DISCONNECTING = 4, ++} socket_state; ++ ++struct socket_wq { ++ wait_queue_head_t wait; ++ struct fasync_struct *fasync_list; ++ long unsigned int flags; ++ struct callback_head rcu; ++ long: 64; ++}; ++ ++struct proto_ops; ++ ++struct socket { ++ socket_state state; ++ short int type; ++ long unsigned int flags; ++ struct socket_wq *wq; ++ struct file *file; ++ struct sock *sk; ++ const struct proto_ops *ops; ++}; ++ ++typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); ++ ++struct proto_ops { ++ int family; ++ struct module *owner; ++ int (*release)(struct socket *); ++ int (*bind)(struct socket *, struct sockaddr *, int); ++ int (*connect)(struct socket *, struct sockaddr *, int, int); ++ int (*socketpair)(struct socket *, struct socket *); ++ int (*accept)(struct socket *, struct socket *, int, bool); ++ int (*getname)(struct socket *, struct sockaddr *, int); ++ __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); ++ int (*ioctl)(struct socket *, unsigned int, long unsigned int); ++ int (*compat_ioctl)(struct socket *, unsigned int, long unsigned int); ++ int (*listen)(struct socket *, int); ++ int (*shutdown)(struct socket *, int); ++ int (*setsockopt)(struct socket *, int, int, char *, unsigned int); ++ int (*getsockopt)(struct socket *, int, int, char *, int *); ++ int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int); ++ int (*compat_getsockopt)(struct socket *, int, int, char *, int *); ++ int (*sendmsg)(struct socket *, struct msghdr *, size_t); ++ int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); ++ int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ++ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); ++ ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); ++ int (*set_peek_off)(struct sock *, int); ++ int (*peek_len)(struct socket *); ++ int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); ++ int (*sendpage_locked)(struct sock *, struct page *, int, size_t, int); ++ int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); ++ int (*set_rcvlowat)(struct sock *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct exception_table_entry { ++ int insn; ++ int fixup; ++}; ++ ++struct kernfs_root; ++ ++struct kernfs_elem_dir { ++ long unsigned int subdirs; ++ struct rb_root children; ++ struct kernfs_root *root; ++}; ++ ++struct kernfs_syscall_ops; ++ ++struct kernfs_root { ++ struct kernfs_node *kn; ++ unsigned int flags; ++ struct idr ino_idr; ++ u32 last_ino; ++ u32 next_generation; ++ struct kernfs_syscall_ops *syscall_ops; ++ struct list_head supers; ++ wait_queue_head_t deactivate_waitq; ++}; ++ ++struct kernfs_elem_symlink { ++ struct kernfs_node *target_kn; ++}; ++ ++struct kernfs_ops; ++ ++struct kernfs_open_node; ++ ++struct kernfs_elem_attr { ++ const struct kernfs_ops *ops; ++ struct kernfs_open_node *open; ++ loff_t size; ++ struct kernfs_node *notify_next; ++}; ++ ++union kernfs_node_id { ++ struct { ++ u32 ino; ++ u32 generation; ++ }; ++ u64 id; ++}; ++ ++struct kernfs_iattrs; ++ ++struct kernfs_node { ++ atomic_t count; ++ atomic_t active; ++ struct kernfs_node *parent; ++ const char *name; ++ struct rb_node rb; ++ const void *ns; ++ unsigned int hash; ++ union { ++ struct kernfs_elem_dir dir; ++ struct kernfs_elem_symlink symlink; ++ struct kernfs_elem_attr attr; ++ }; ++ void *priv; ++ union kernfs_node_id id; ++ short unsigned int flags; ++ umode_t mode; ++ struct kernfs_iattrs *iattr; ++}; ++ ++struct kernfs_open_file; ++ ++struct kernfs_ops { ++ int (*open)(struct kernfs_open_file *); ++ void (*release)(struct kernfs_open_file *); ++ int (*seq_show)(struct seq_file *, void *); ++ void * (*seq_start)(struct seq_file *, loff_t *); ++ void * (*seq_next)(struct seq_file *, void *, loff_t *); ++ void (*seq_stop)(struct seq_file *, void *); ++ ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); ++ size_t atomic_write_len; ++ bool prealloc; ++ ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); ++ int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct kernfs_syscall_ops { ++ int (*remount_fs)(struct kernfs_root *, int *, char *); ++ int (*show_options)(struct seq_file *, struct kernfs_root *); ++ int (*mkdir)(struct kernfs_node *, const char *, umode_t); ++ int (*rmdir)(struct kernfs_node *); ++ int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); ++ int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kernfs_open_file { ++ struct kernfs_node *kn; ++ struct file *file; ++ struct seq_file *seq_file; ++ void *priv; ++ struct mutex mutex; ++ struct mutex prealloc_mutex; ++ int event; ++ struct list_head list; ++ char *prealloc_buf; ++ size_t atomic_write_len; ++ bool mmapped: 1; ++ bool released: 1; ++ const struct vm_operations_struct *vm_ops; ++}; ++ ++enum kobj_ns_type { ++ KOBJ_NS_TYPE_NONE = 0, ++ KOBJ_NS_TYPE_NET = 1, ++ KOBJ_NS_TYPES = 2, ++}; ++ ++struct kobj_ns_type_operations { ++ enum kobj_ns_type type; ++ bool (*current_may_mount)(); ++ void * (*grab_current_ns)(); ++ const void * (*netlink_ns)(struct sock *); ++ const void * (*initial_ns)(); ++ void (*drop_ns)(void *); ++}; ++ ++struct attribute { ++ const char *name; ++ umode_t mode; ++}; ++ ++struct bin_attribute; ++ ++struct attribute_group { ++ const char *name; ++ umode_t (*is_visible)(struct kobject *, struct attribute *, int); ++ umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); ++ struct attribute **attrs; ++ struct bin_attribute **bin_attrs; ++}; ++ ++struct bin_attribute { ++ struct attribute attr; ++ size_t size; ++ void *private; ++ ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ++ ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ++ int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); ++}; ++ ++struct sysfs_ops { ++ ssize_t (*show)(struct kobject *, struct attribute *, char *); ++ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); ++}; ++ ++struct kset_uevent_ops; ++ ++struct kset { ++ struct list_head list; ++ spinlock_t list_lock; ++ struct kobject kobj; ++ const struct kset_uevent_ops *uevent_ops; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kobj_type { ++ void (*release)(struct kobject *); ++ const struct sysfs_ops *sysfs_ops; ++ struct attribute **default_attrs; ++ const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); ++ const void * (*namespace)(struct kobject *); ++ void (*get_ownership)(struct kobject *, kuid_t *, kgid_t *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kobj_uevent_env { ++ char *argv[3]; ++ char *envp[32]; ++ int envp_idx; ++ char buf[2048]; ++ int buflen; ++}; ++ ++struct kset_uevent_ops { ++ int (* const filter)(struct kset *, struct kobject *); ++ const char * (* const name)(struct kset *, struct kobject *); ++ int (* const uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); ++}; ++ ++struct kobj_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); ++ ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); ++}; ++ ++struct dev_pm_ops { ++ int (*prepare)(struct device *); ++ void (*complete)(struct device *); ++ int (*suspend)(struct device *); ++ int (*resume)(struct device *); ++ int (*freeze)(struct device *); ++ int (*thaw)(struct device *); ++ int (*poweroff)(struct device *); ++ int (*restore)(struct device *); ++ int (*suspend_late)(struct device *); ++ int (*resume_early)(struct device *); ++ int (*freeze_late)(struct device *); ++ int (*thaw_early)(struct device *); ++ int (*poweroff_late)(struct device *); ++ int (*restore_early)(struct device *); ++ int (*suspend_noirq)(struct device *); ++ int (*resume_noirq)(struct device *); ++ int (*freeze_noirq)(struct device *); ++ int (*thaw_noirq)(struct device *); ++ int (*poweroff_noirq)(struct device *); ++ int (*restore_noirq)(struct device *); ++ int (*runtime_suspend)(struct device *); ++ int (*runtime_resume)(struct device *); ++ int (*runtime_idle)(struct device *); ++}; ++ ++struct pm_domain_data; ++ ++struct pm_subsys_data { ++ spinlock_t lock; ++ unsigned int refcount; ++ struct list_head clock_list; ++ struct pm_domain_data *domain_data; ++}; ++ ++struct wakeup_source { ++ const char *name; ++ struct list_head entry; ++ spinlock_t lock; ++ struct wake_irq *wakeirq; ++ struct timer_list timer; ++ long unsigned int timer_expires; ++ ktime_t total_time; ++ ktime_t max_time; ++ ktime_t last_time; ++ ktime_t start_prevent_time; ++ ktime_t prevent_sleep_time; ++ long unsigned int event_count; ++ long unsigned int active_count; ++ long unsigned int relax_count; ++ long unsigned int expire_count; ++ long unsigned int wakeup_count; ++ bool active: 1; ++ bool autosleep_enabled: 1; ++}; ++ ++struct dev_pm_domain { ++ struct dev_pm_ops ops; ++ void (*detach)(struct device *, bool); ++ int (*activate)(struct device *); ++ void (*sync)(struct device *); ++ void (*dismiss)(struct device *); ++}; ++ ++struct iommu_ops; ++ ++struct subsys_private; ++ ++struct bus_type { ++ const char *name; ++ const char *dev_name; ++ struct device *dev_root; ++ const struct attribute_group **bus_groups; ++ const struct attribute_group **dev_groups; ++ const struct attribute_group **drv_groups; ++ int (*match)(struct device *, struct device_driver *); ++ int (*uevent)(struct device *, struct kobj_uevent_env *); ++ int (*probe)(struct device *); ++ int (*remove)(struct device *); ++ void (*shutdown)(struct device *); ++ int (*online)(struct device *); ++ int (*offline)(struct device *); ++ int (*suspend)(struct device *, pm_message_t); ++ int (*resume)(struct device *); ++ int (*num_vf)(struct device *); ++ int (*dma_configure)(struct device *); ++ const struct dev_pm_ops *pm; ++ const struct iommu_ops *iommu_ops; ++ struct subsys_private *p; ++ struct lock_class_key lock_key; ++ bool need_parent_lock; ++}; ++ ++enum probe_type { ++ PROBE_DEFAULT_STRATEGY = 0, ++ PROBE_PREFER_ASYNCHRONOUS = 1, ++ PROBE_FORCE_SYNCHRONOUS = 2, ++}; ++ ++struct of_device_id; ++ ++struct acpi_device_id; ++ ++struct driver_private; ++ ++struct device_driver { ++ const char *name; ++ struct bus_type *bus; ++ struct module *owner; ++ const char *mod_name; ++ bool suppress_bind_attrs; ++ enum probe_type probe_type; ++ const struct of_device_id *of_match_table; ++ const struct acpi_device_id *acpi_match_table; ++ int (*probe)(struct device *); ++ int (*remove)(struct device *); ++ void (*shutdown)(struct device *); ++ int (*suspend)(struct device *, pm_message_t); ++ int (*resume)(struct device *); ++ const struct attribute_group **groups; ++ const struct dev_pm_ops *pm; ++ void (*coredump)(struct device *); ++ struct driver_private *p; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++enum iommu_cap { ++ IOMMU_CAP_CACHE_COHERENCY = 0, ++ IOMMU_CAP_INTR_REMAP = 1, ++ IOMMU_CAP_NOEXEC = 2, ++}; ++ ++enum iommu_attr { ++ DOMAIN_ATTR_GEOMETRY = 0, ++ DOMAIN_ATTR_PAGING = 1, ++ DOMAIN_ATTR_WINDOWS = 2, ++ DOMAIN_ATTR_FSL_PAMU_STASH = 3, ++ DOMAIN_ATTR_FSL_PAMU_ENABLE = 4, ++ DOMAIN_ATTR_FSL_PAMUV1 = 5, ++ DOMAIN_ATTR_NESTING = 6, ++ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE = 7, ++ DOMAIN_ATTR_MAX = 8, ++}; ++ ++struct iommu_domain; ++ ++struct iommu_sva_param; ++ ++struct io_mm; ++ ++struct iommu_resv_region; ++ ++struct of_phandle_args; ++ ++struct pasid_table_config; ++ ++struct tlb_invalidate_info; ++ ++struct page_response_msg; ++ ++struct iommu_ops { ++ bool (*capable)(enum iommu_cap); ++ struct iommu_domain * (*domain_alloc)(unsigned int); ++ void (*domain_free)(struct iommu_domain *); ++ int (*attach_dev)(struct iommu_domain *, struct device *); ++ void (*detach_dev)(struct iommu_domain *, struct device *); ++ int (*sva_device_init)(struct device *, struct iommu_sva_param *); ++ void (*sva_device_shutdown)(struct device *, struct iommu_sva_param *); ++ struct io_mm * (*mm_alloc)(struct iommu_domain *, struct mm_struct *, long unsigned int); ++ void (*mm_free)(struct io_mm *); ++ int (*mm_attach)(struct iommu_domain *, struct device *, struct io_mm *, bool); ++ void (*mm_detach)(struct iommu_domain *, struct device *, struct io_mm *, bool); ++ void (*mm_invalidate)(struct iommu_domain *, struct device *, struct io_mm *, long unsigned int, size_t); ++ int (*map)(struct iommu_domain *, long unsigned int, phys_addr_t, size_t, int); ++ size_t (*unmap)(struct iommu_domain *, long unsigned int, size_t); ++ void (*flush_iotlb_all)(struct iommu_domain *); ++ void (*iotlb_range_add)(struct iommu_domain *, long unsigned int, size_t); ++ void (*iotlb_sync)(struct iommu_domain *); ++ phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t); ++ int (*add_device)(struct device *); ++ void (*remove_device)(struct device *); ++ struct iommu_group * (*device_group)(struct device *); ++ int (*domain_get_attr)(struct iommu_domain *, enum iommu_attr, void *); ++ int (*domain_set_attr)(struct iommu_domain *, enum iommu_attr, void *); ++ void (*get_resv_regions)(struct device *, struct list_head *); ++ void (*put_resv_regions)(struct device *, struct list_head *); ++ void (*apply_resv_region)(struct device *, struct iommu_domain *, struct iommu_resv_region *); ++ int (*domain_window_enable)(struct iommu_domain *, u32, phys_addr_t, u64, int); ++ void (*domain_window_disable)(struct iommu_domain *, u32); ++ int (*domain_set_windows)(struct iommu_domain *, u32); ++ u32 (*domain_get_windows)(struct iommu_domain *); ++ int (*of_xlate)(struct device *, struct of_phandle_args *); ++ bool (*is_attach_deferred)(struct iommu_domain *, struct device *); ++ int (*bind_pasid_table)(struct iommu_domain *, struct device *, struct pasid_table_config *); ++ void (*unbind_pasid_table)(struct iommu_domain *, struct device *); ++ int (*sva_invalidate)(struct iommu_domain *, struct device *, struct tlb_invalidate_info *); ++ int (*page_response)(struct device *, struct page_response_msg *); ++ long unsigned int pgsize_bitmap; ++ int (*device_domain_type)(struct device *, unsigned int *); ++}; ++ ++struct device_type { ++ const char *name; ++ const struct attribute_group **groups; ++ int (*uevent)(struct device *, struct kobj_uevent_env *); ++ char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); ++ void (*release)(struct device *); ++ const struct dev_pm_ops *pm; ++}; ++ ++struct of_device_id { ++ char name[32]; ++ char type[32]; ++ char compatible[128]; ++ const void *data; ++}; ++ ++typedef long unsigned int kernel_ulong_t; ++ ++struct acpi_device_id { ++ __u8 id[9]; ++ kernel_ulong_t driver_data; ++ __u32 cls; ++ __u32 cls_msk; ++}; ++ ++struct class { ++ const char *name; ++ struct module *owner; ++ const struct attribute_group **class_groups; ++ const struct attribute_group **dev_groups; ++ struct kobject *dev_kobj; ++ int (*dev_uevent)(struct device *, struct kobj_uevent_env *); ++ char * (*devnode)(struct device *, umode_t *); ++ void (*class_release)(struct class *); ++ void (*dev_release)(struct device *); ++ int (*shutdown_pre)(struct device *); ++ const struct kobj_ns_type_operations *ns_type; ++ const void * (*namespace)(struct device *); ++ void (*get_ownership)(struct device *, kuid_t *, kgid_t *); ++ const struct dev_pm_ops *pm; ++ struct subsys_private *p; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct device_dma_parameters { ++ unsigned int max_segment_size; ++ long unsigned int segment_boundary_mask; ++}; ++ ++enum dma_data_direction { ++ DMA_BIDIRECTIONAL = 0, ++ DMA_TO_DEVICE = 1, ++ DMA_FROM_DEVICE = 2, ++ DMA_NONE = 3, ++}; ++ ++struct sg_table; ++ ++struct scatterlist; ++ ++struct dma_map_ops { ++ void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); ++ void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); ++ int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); ++ int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); ++ dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); ++ void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); ++ int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); ++ void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); ++ dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); ++ void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); ++ void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); ++ void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); ++ void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); ++ void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); ++ void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); ++ int (*mapping_error)(struct device *, dma_addr_t); ++ int (*dma_supported)(struct device *, u64); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++}; ++ ++struct fwnode_operations; ++ ++struct fwnode_handle { ++ struct fwnode_handle *secondary; ++ const struct fwnode_operations *ops; ++}; ++ ++struct rcu_device { ++ struct device dev; ++ struct callback_head callback_head; ++}; ++ ++struct page_ext_operations { ++ size_t offset; ++ size_t size; ++ bool (*need)(); ++ void (*init)(); ++}; ++ ++struct tracepoint_func { ++ void *func; ++ void *data; ++ int prio; ++}; ++ ++struct tracepoint { ++ const char *name; ++ struct static_key key; ++ int (*regfunc)(); ++ void (*unregfunc)(); ++ struct tracepoint_func *funcs; ++}; ++ ++struct bpf_raw_event_map { ++ struct tracepoint *tp; ++ void *bpf_func; ++ u32 num_args; ++ long: 32; ++ long: 64; ++}; ++ ++typedef void compound_page_dtor(struct page *); ++ ++struct vm_event_state { ++ long unsigned int event[81]; ++}; ++ ++struct fwnode_reference_args; ++ ++struct fwnode_endpoint; ++ ++struct fwnode_operations { ++ struct fwnode_handle * (*get)(struct fwnode_handle *); ++ void (*put)(struct fwnode_handle *); ++ bool (*device_is_available)(const struct fwnode_handle *); ++ const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); ++ bool (*property_present)(const struct fwnode_handle *, const char *); ++ int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); ++ int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); ++ struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); ++ struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); ++ struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); ++ int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); ++ struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); ++ struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); ++ struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); ++ int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); ++}; ++ ++struct fwnode_endpoint { ++ unsigned int port; ++ unsigned int id; ++ const struct fwnode_handle *local_fwnode; ++}; ++ ++struct fwnode_reference_args { ++ struct fwnode_handle *fwnode; ++ unsigned int nargs; ++ u64 args[8]; ++}; ++ ++struct scatterlist { ++ long unsigned int page_link; ++ unsigned int offset; ++ unsigned int length; ++ dma_addr_t dma_address; ++ unsigned int dma_length; ++}; ++ ++struct sg_table { ++ struct scatterlist *sgl; ++ unsigned int nents; ++ unsigned int orig_nents; ++}; ++ ++struct shared_info; ++ ++struct start_info; ++ ++struct pipe_buf_operations; ++ ++struct pipe_buffer { ++ struct page *page; ++ unsigned int offset; ++ unsigned int len; ++ const struct pipe_buf_operations *ops; ++ unsigned int flags; ++ long unsigned int private; ++}; ++ ++struct pipe_buf_operations { ++ int can_merge; ++ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); ++ void (*release)(struct pipe_inode_info *, struct pipe_buffer *); ++ int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); ++ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); ++}; ++ ++struct nf_conntrack { ++ atomic_t use; ++}; ++ ++struct nf_bridge_info { ++ refcount_t use; ++ enum { ++ BRNF_PROTO_UNCHANGED = 0, ++ BRNF_PROTO_8021Q = 1, ++ BRNF_PROTO_PPPOE = 2, ++ } orig_proto: 8; ++ u8 pkt_otherhost: 1; ++ u8 in_prerouting: 1; ++ u8 bridged_dnat: 1; ++ __u16 frag_max_size; ++ struct net_device *physindev; ++ struct net_device *physoutdev; ++ union { ++ __be32 ipv4_daddr; ++ struct in6_addr ipv6_daddr; ++ char neigh_header[8]; ++ }; ++}; ++ ++struct skb_checksum_ops { ++ __wsum (*update)(const void *, int, __wsum); ++ __wsum (*combine)(__wsum, __wsum, int, int); ++}; ++ ++struct ucounts { ++ struct hlist_node node; ++ struct user_namespace *ns; ++ kuid_t uid; ++ int count; ++ atomic_t ucount[9]; ++}; ++ ++struct net_generic { ++ union { ++ struct { ++ unsigned int len; ++ struct callback_head rcu; ++ } s; ++ void *ptr[0]; ++ }; ++}; ++ ++struct ip_vs_kstats { ++ u64 conns; ++ u64 inpkts; ++ u64 outpkts; ++ u64 inbytes; ++ u64 outbytes; ++ u64 cps; ++ u64 inpps; ++ u64 outpps; ++ u64 inbps; ++ u64 outbps; ++}; ++ ++struct ip_vs_estimator { ++ struct list_head list; ++ u64 last_inbytes; ++ u64 last_outbytes; ++ u64 last_conns; ++ u64 last_inpkts; ++ u64 last_outpkts; ++ u64 cps; ++ u64 inpps; ++ u64 outpps; ++ u64 inbps; ++ u64 outbps; ++}; ++ ++struct ip_vs_cpu_stats; ++ ++struct ip_vs_stats { ++ struct ip_vs_kstats kstats; ++ struct ip_vs_estimator est; ++ struct ip_vs_cpu_stats *cpustats; ++ spinlock_t lock; ++ struct ip_vs_kstats kstats0; ++}; ++ ++struct ipvs_sync_daemon_cfg { ++ union nf_inet_addr mcast_group; ++ int syncid; ++ u16 sync_maxlen; ++ u16 mcast_port; ++ u8 mcast_af; ++ u8 mcast_ttl; ++ char mcast_ifn[16]; ++}; ++ ++struct ip_vs_proto_data; ++ ++struct ipvs_master_sync_state; ++ ++struct ip_vs_sync_thread_data; ++ ++struct netns_ipvs { ++ int gen; ++ int enable; ++ struct hlist_head rs_table[16]; ++ struct list_head app_list; ++ struct ip_vs_proto_data *proto_data_table[32]; ++ struct list_head tcp_apps[16]; ++ struct list_head udp_apps[16]; ++ struct list_head sctp_apps[16]; ++ atomic_t conn_count; ++ struct ip_vs_stats tot_stats; ++ int num_services; ++ struct list_head dest_trash; ++ spinlock_t dest_trash_lock; ++ struct timer_list dest_trash_timer; ++ atomic_t ftpsvc_counter; ++ atomic_t nullsvc_counter; ++ atomic_t conn_out_counter; ++ struct delayed_work defense_work; ++ int drop_rate; ++ int drop_counter; ++ int old_secure_tcp; ++ atomic_t dropentry; ++ spinlock_t dropentry_lock; ++ spinlock_t droppacket_lock; ++ spinlock_t securetcp_lock; ++ struct ctl_table_header *sysctl_hdr; ++ struct ctl_table *sysctl_tbl; ++ int sysctl_amemthresh; ++ int sysctl_am_droprate; ++ int sysctl_drop_entry; ++ int sysctl_drop_packet; ++ int sysctl_secure_tcp; ++ int sysctl_conntrack; ++ int sysctl_snat_reroute; ++ int sysctl_sync_ver; ++ int sysctl_sync_ports; ++ int sysctl_sync_persist_mode; ++ long unsigned int sysctl_sync_qlen_max; ++ int sysctl_sync_sock_size; ++ int sysctl_cache_bypass; ++ int sysctl_expire_nodest_conn; ++ int sysctl_sloppy_tcp; ++ int sysctl_sloppy_sctp; ++ int sysctl_expire_quiescent_template; ++ int sysctl_sync_threshold[2]; ++ unsigned int sysctl_sync_refresh_period; ++ int sysctl_sync_retries; ++ int sysctl_nat_icmp_send; ++ int sysctl_pmtu_disc; ++ int sysctl_backup_only; ++ int sysctl_conn_reuse_mode; ++ int sysctl_schedule_icmp; ++ int sysctl_ignore_tunneled; ++ int sysctl_lblc_expiration; ++ struct ctl_table_header *lblc_ctl_header; ++ struct ctl_table *lblc_ctl_table; ++ int sysctl_lblcr_expiration; ++ struct ctl_table_header *lblcr_ctl_header; ++ struct ctl_table *lblcr_ctl_table; ++ struct list_head est_list; ++ spinlock_t est_lock; ++ struct timer_list est_timer; ++ spinlock_t sync_lock; ++ struct ipvs_master_sync_state *ms; ++ spinlock_t sync_buff_lock; ++ struct ip_vs_sync_thread_data *master_tinfo; ++ struct ip_vs_sync_thread_data *backup_tinfo; ++ int threads_mask; ++ volatile int sync_state; ++ struct mutex sync_mutex; ++ struct ipvs_sync_daemon_cfg mcfg; ++ struct ipvs_sync_daemon_cfg bcfg; ++ struct net *net; ++ unsigned int mixed_address_family_dests; ++}; ++ ++struct assoc_array_ptr; ++ ++struct assoc_array { ++ struct assoc_array_ptr *root; ++ long unsigned int nr_leaves_on_tree; ++}; ++ ++typedef int32_t key_serial_t; ++ ++typedef uint32_t key_perm_t; ++ ++struct key_type; ++ ++struct keyring_index_key { ++ struct key_type *type; ++ const char *description; ++ size_t desc_len; ++}; ++ ++typedef int (*request_key_actor_t)(struct key *, void *); ++ ++struct key_preparsed_payload; ++ ++struct key_match_data; ++ ++struct key_restriction; ++ ++struct key_type { ++ const char *name; ++ size_t def_datalen; ++ int (*vet_description)(const char *); ++ int (*preparse)(struct key_preparsed_payload *); ++ void (*free_preparse)(struct key_preparsed_payload *); ++ int (*instantiate)(struct key *, struct key_preparsed_payload *); ++ int (*update)(struct key *, struct key_preparsed_payload *); ++ int (*match_preparse)(struct key_match_data *); ++ void (*match_free)(struct key_match_data *); ++ void (*revoke)(struct key *); ++ void (*destroy)(struct key *); ++ void (*describe)(const struct key *, struct seq_file *); ++ long int (*read)(const struct key *, char *, size_t); ++ request_key_actor_t request_key; ++ struct key_restriction * (*lookup_restriction)(const char *); ++ struct list_head link; ++ struct lock_class_key lock_class; ++}; ++ ++union key_payload { ++ void *rcu_data0; ++ void *data[4]; ++}; ++ ++typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); ++ ++struct key_user; ++ ++struct key { ++ refcount_t usage; ++ key_serial_t serial; ++ union { ++ struct list_head graveyard_link; ++ struct rb_node serial_node; ++ }; ++ struct rw_semaphore sem; ++ struct key_user *user; ++ void *security; ++ union { ++ time64_t expiry; ++ time64_t revoked_at; ++ }; ++ time64_t last_used_at; ++ kuid_t uid; ++ kgid_t gid; ++ key_perm_t perm; ++ short unsigned int quotalen; ++ short unsigned int datalen; ++ short int state; ++ long unsigned int flags; ++ union { ++ struct keyring_index_key index_key; ++ struct { ++ struct key_type *type; ++ char *description; ++ }; ++ }; ++ union { ++ union key_payload payload; ++ struct { ++ struct list_head name_link; ++ struct assoc_array keys; ++ }; ++ }; ++ struct key_restriction *restrict_link; ++}; ++ ++struct key_restriction { ++ key_restrict_link_func_t check; ++ struct key *key; ++ struct key_type *keytype; ++}; ++ ++struct group_info { ++ atomic_t usage; ++ int ngroups; ++ kgid_t gid[0]; ++}; ++ ++struct seq_operations { ++ void * (*start)(struct seq_file *, loff_t *); ++ void (*stop)(struct seq_file *, void *); ++ void * (*next)(struct seq_file *, void *, loff_t *); ++ int (*show)(struct seq_file *, void *); ++}; ++ ++struct seq_net_private { ++ struct net *net; ++}; ++ ++struct pernet_operations { ++ struct list_head list; ++ int (*init)(struct net *); ++ void (*exit)(struct net *); ++ void (*exit_batch)(struct list_head *); ++ unsigned int *id; ++ size_t size; ++}; ++ ++typedef __u64 Elf64_Addr; ++ ++typedef __u16 Elf64_Half; ++ ++typedef __u64 Elf64_Off; ++ ++typedef __u32 Elf64_Word; ++ ++typedef __u64 Elf64_Xword; ++ ++typedef __s64 Elf64_Sxword; ++ ++typedef struct { ++ Elf64_Sxword d_tag; ++ union { ++ Elf64_Xword d_val; ++ Elf64_Addr d_ptr; ++ } d_un; ++} Elf64_Dyn; ++ ++struct elf64_sym { ++ Elf64_Word st_name; ++ unsigned char st_info; ++ unsigned char st_other; ++ Elf64_Half st_shndx; ++ Elf64_Addr st_value; ++ Elf64_Xword st_size; ++}; ++ ++struct elf64_hdr { ++ unsigned char e_ident[16]; ++ Elf64_Half e_type; ++ Elf64_Half e_machine; ++ Elf64_Word e_version; ++ Elf64_Addr e_entry; ++ Elf64_Off e_phoff; ++ Elf64_Off e_shoff; ++ Elf64_Word e_flags; ++ Elf64_Half e_ehsize; ++ Elf64_Half e_phentsize; ++ Elf64_Half e_phnum; ++ Elf64_Half e_shentsize; ++ Elf64_Half e_shnum; ++ Elf64_Half e_shstrndx; ++}; ++ ++typedef struct elf64_hdr Elf64_Ehdr; ++ ++struct elf64_shdr { ++ Elf64_Word sh_name; ++ Elf64_Word sh_type; ++ Elf64_Xword sh_flags; ++ Elf64_Addr sh_addr; ++ Elf64_Off sh_offset; ++ Elf64_Xword sh_size; ++ Elf64_Word sh_link; ++ Elf64_Word sh_info; ++ Elf64_Xword sh_addralign; ++ Elf64_Xword sh_entsize; ++}; ++ ++typedef struct elf64_shdr Elf64_Shdr; ++ ++struct kernel_param_ops { ++ unsigned int flags; ++ int (*set)(const char *, const struct kernel_param *); ++ int (*get)(char *, const struct kernel_param *); ++ void (*free)(void *); ++}; ++ ++struct kparam_string; ++ ++struct kparam_array; ++ ++struct kernel_param { ++ const char *name; ++ struct module *mod; ++ const struct kernel_param_ops *ops; ++ const u16 perm; ++ s8 level; ++ u8 flags; ++ union { ++ void *arg; ++ const struct kparam_string *str; ++ const struct kparam_array *arr; ++ }; ++}; ++ ++struct kparam_string { ++ unsigned int maxlen; ++ char *string; ++}; ++ ++struct kparam_array { ++ unsigned int max; ++ unsigned int elemsize; ++ unsigned int *num; ++ const struct kernel_param_ops *ops; ++ void *elem; ++}; ++ ++struct plt_entry { ++ __le32 mov0; ++ __le32 mov1; ++ __le32 mov2; ++ __le32 br; ++}; ++ ++struct module_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ++ ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); ++ void (*setup)(struct module *, const char *); ++ int (*test)(struct module *); ++ void (*free)(struct module *); ++}; ++ ++struct klp_modinfo { ++ Elf64_Ehdr hdr; ++ Elf64_Shdr *sechdrs; ++ char *secstrings; ++ unsigned int symndx; ++}; ++ ++struct __una_u32 { ++ u32 x; ++}; ++ ++typedef struct { ++ unsigned int clock_rate; ++ unsigned int clock_type; ++ short unsigned int loopback; ++} sync_serial_settings; ++ ++typedef struct { ++ unsigned int clock_rate; ++ unsigned int clock_type; ++ short unsigned int loopback; ++ unsigned int slot_map; ++} te1_settings; ++ ++typedef struct { ++ short unsigned int encoding; ++ short unsigned int parity; ++} raw_hdlc_proto; ++ ++typedef struct { ++ unsigned int t391; ++ unsigned int t392; ++ unsigned int n391; ++ unsigned int n392; ++ unsigned int n393; ++ short unsigned int lmi; ++ short unsigned int dce; ++} fr_proto; ++ ++typedef struct { ++ unsigned int dlci; ++} fr_proto_pvc; ++ ++typedef struct { ++ unsigned int dlci; ++ char master[16]; ++} fr_proto_pvc_info; ++ ++typedef struct { ++ unsigned int interval; ++ unsigned int timeout; ++} cisco_proto; ++ ++struct ifmap { ++ long unsigned int mem_start; ++ long unsigned int mem_end; ++ short unsigned int base_addr; ++ unsigned char irq; ++ unsigned char dma; ++ unsigned char port; ++}; ++ ++struct if_settings { ++ unsigned int type; ++ unsigned int size; ++ union { ++ raw_hdlc_proto *raw_hdlc; ++ cisco_proto *cisco; ++ fr_proto *fr; ++ fr_proto_pvc *fr_pvc; ++ fr_proto_pvc_info *fr_pvc_info; ++ sync_serial_settings *sync; ++ te1_settings *te1; ++ } ifs_ifsu; ++}; ++ ++struct ifreq { ++ union { ++ char ifrn_name[16]; ++ } ifr_ifrn; ++ union { ++ struct sockaddr ifru_addr; ++ struct sockaddr ifru_dstaddr; ++ struct sockaddr ifru_broadaddr; ++ struct sockaddr ifru_netmask; ++ struct sockaddr ifru_hwaddr; ++ short int ifru_flags; ++ int ifru_ivalue; ++ int ifru_mtu; ++ struct ifmap ifru_map; ++ char ifru_slave[16]; ++ char ifru_newname[16]; ++ void *ifru_data; ++ struct if_settings ifru_settings; ++ } ifr_ifru; ++}; ++ ++struct dql { ++ unsigned int num_queued; ++ unsigned int adj_limit; ++ unsigned int last_obj_cnt; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ unsigned int limit; ++ unsigned int num_completed; ++ unsigned int prev_ovlimit; ++ unsigned int prev_num_queued; ++ unsigned int prev_last_obj_cnt; ++ unsigned int lowest_slack; ++ long unsigned int slack_start_time; ++ unsigned int max_limit; ++ unsigned int min_limit; ++ unsigned int slack_hold_time; ++ long: 32; ++ long: 64; ++ long: 64; ++}; ++ ++struct ethtool_cmd { ++ __u32 cmd; ++ __u32 supported; ++ __u32 advertising; ++ __u16 speed; ++ __u8 duplex; ++ __u8 port; ++ __u8 phy_address; ++ __u8 transceiver; ++ __u8 autoneg; ++ __u8 mdio_support; ++ __u32 maxtxpkt; ++ __u32 maxrxpkt; ++ __u16 speed_hi; ++ __u8 eth_tp_mdix; ++ __u8 eth_tp_mdix_ctrl; ++ __u32 lp_advertising; ++ __u32 reserved[2]; ++}; ++ ++struct ethtool_drvinfo { ++ __u32 cmd; ++ char driver[32]; ++ char version[32]; ++ char fw_version[32]; ++ char bus_info[32]; ++ char erom_version[32]; ++ char reserved2[12]; ++ __u32 n_priv_flags; ++ __u32 n_stats; ++ __u32 testinfo_len; ++ __u32 eedump_len; ++ __u32 regdump_len; ++}; ++ ++struct ethtool_wolinfo { ++ __u32 cmd; ++ __u32 supported; ++ __u32 wolopts; ++ __u8 sopass[6]; ++}; ++ ++struct ethtool_tunable { ++ __u32 cmd; ++ __u32 id; ++ __u32 type_id; ++ __u32 len; ++ void *data[0]; ++}; ++ ++struct ethtool_regs { ++ __u32 cmd; ++ __u32 version; ++ __u32 len; ++ __u8 data[0]; ++}; ++ ++struct ethtool_eeprom { ++ __u32 cmd; ++ __u32 magic; ++ __u32 offset; ++ __u32 len; ++ __u8 data[0]; ++}; ++ ++struct ethtool_eee { ++ __u32 cmd; ++ __u32 supported; ++ __u32 advertised; ++ __u32 lp_advertised; ++ __u32 eee_active; ++ __u32 eee_enabled; ++ __u32 tx_lpi_enabled; ++ __u32 tx_lpi_timer; ++ __u32 reserved[2]; ++}; ++ ++struct ethtool_modinfo { ++ __u32 cmd; ++ __u32 type; ++ __u32 eeprom_len; ++ __u32 reserved[8]; ++}; ++ ++struct ethtool_coalesce { ++ __u32 cmd; ++ __u32 rx_coalesce_usecs; ++ __u32 rx_max_coalesced_frames; ++ __u32 rx_coalesce_usecs_irq; ++ __u32 rx_max_coalesced_frames_irq; ++ __u32 tx_coalesce_usecs; ++ __u32 tx_max_coalesced_frames; ++ __u32 tx_coalesce_usecs_irq; ++ __u32 tx_max_coalesced_frames_irq; ++ __u32 stats_block_coalesce_usecs; ++ __u32 use_adaptive_rx_coalesce; ++ __u32 use_adaptive_tx_coalesce; ++ __u32 pkt_rate_low; ++ __u32 rx_coalesce_usecs_low; ++ __u32 rx_max_coalesced_frames_low; ++ __u32 tx_coalesce_usecs_low; ++ __u32 tx_max_coalesced_frames_low; ++ __u32 pkt_rate_high; ++ __u32 rx_coalesce_usecs_high; ++ __u32 rx_max_coalesced_frames_high; ++ __u32 tx_coalesce_usecs_high; ++ __u32 tx_max_coalesced_frames_high; ++ __u32 rate_sample_interval; ++}; ++ ++struct ethtool_ringparam { ++ __u32 cmd; ++ __u32 rx_max_pending; ++ __u32 rx_mini_max_pending; ++ __u32 rx_jumbo_max_pending; ++ __u32 tx_max_pending; ++ __u32 rx_pending; ++ __u32 rx_mini_pending; ++ __u32 rx_jumbo_pending; ++ __u32 tx_pending; ++}; ++ ++struct ethtool_channels { ++ __u32 cmd; ++ __u32 max_rx; ++ __u32 max_tx; ++ __u32 max_other; ++ __u32 max_combined; ++ __u32 rx_count; ++ __u32 tx_count; ++ __u32 other_count; ++ __u32 combined_count; ++}; ++ ++struct ethtool_pauseparam { ++ __u32 cmd; ++ __u32 autoneg; ++ __u32 rx_pause; ++ __u32 tx_pause; ++}; ++ ++struct ethtool_test { ++ __u32 cmd; ++ __u32 flags; ++ __u32 reserved; ++ __u32 len; ++ __u64 data[0]; ++}; ++ ++struct ethtool_stats { ++ __u32 cmd; ++ __u32 n_stats; ++ __u64 data[0]; ++}; ++ ++struct ethtool_tcpip4_spec { ++ __be32 ip4src; ++ __be32 ip4dst; ++ __be16 psrc; ++ __be16 pdst; ++ __u8 tos; ++}; ++ ++struct ethtool_ah_espip4_spec { ++ __be32 ip4src; ++ __be32 ip4dst; ++ __be32 spi; ++ __u8 tos; ++}; ++ ++struct ethtool_usrip4_spec { ++ __be32 ip4src; ++ __be32 ip4dst; ++ __be32 l4_4_bytes; ++ __u8 tos; ++ __u8 ip_ver; ++ __u8 proto; ++}; ++ ++struct ethtool_tcpip6_spec { ++ __be32 ip6src[4]; ++ __be32 ip6dst[4]; ++ __be16 psrc; ++ __be16 pdst; ++ __u8 tclass; ++}; ++ ++struct ethtool_ah_espip6_spec { ++ __be32 ip6src[4]; ++ __be32 ip6dst[4]; ++ __be32 spi; ++ __u8 tclass; ++}; ++ ++struct ethtool_usrip6_spec { ++ __be32 ip6src[4]; ++ __be32 ip6dst[4]; ++ __be32 l4_4_bytes; ++ __u8 tclass; ++ __u8 l4_proto; ++}; ++ ++union ethtool_flow_union { ++ struct ethtool_tcpip4_spec tcp_ip4_spec; ++ struct ethtool_tcpip4_spec udp_ip4_spec; ++ struct ethtool_tcpip4_spec sctp_ip4_spec; ++ struct ethtool_ah_espip4_spec ah_ip4_spec; ++ struct ethtool_ah_espip4_spec esp_ip4_spec; ++ struct ethtool_usrip4_spec usr_ip4_spec; ++ struct ethtool_tcpip6_spec tcp_ip6_spec; ++ struct ethtool_tcpip6_spec udp_ip6_spec; ++ struct ethtool_tcpip6_spec sctp_ip6_spec; ++ struct ethtool_ah_espip6_spec ah_ip6_spec; ++ struct ethtool_ah_espip6_spec esp_ip6_spec; ++ struct ethtool_usrip6_spec usr_ip6_spec; ++ struct ethhdr ether_spec; ++ __u8 hdata[52]; ++}; ++ ++struct ethtool_flow_ext { ++ __u8 padding[2]; ++ unsigned char h_dest[6]; ++ __be16 vlan_etype; ++ __be16 vlan_tci; ++ __be32 data[2]; ++}; ++ ++struct ethtool_rx_flow_spec { ++ __u32 flow_type; ++ union ethtool_flow_union h_u; ++ struct ethtool_flow_ext h_ext; ++ union ethtool_flow_union m_u; ++ struct ethtool_flow_ext m_ext; ++ __u64 ring_cookie; ++ __u32 location; ++}; ++ ++struct ethtool_rxnfc { ++ __u32 cmd; ++ __u32 flow_type; ++ __u64 data; ++ struct ethtool_rx_flow_spec fs; ++ union { ++ __u32 rule_cnt; ++ __u32 rss_context; ++ }; ++ __u32 rule_locs[0]; ++}; ++ ++struct ethtool_flash { ++ __u32 cmd; ++ __u32 region; ++ char data[128]; ++}; ++ ++struct ethtool_dump { ++ __u32 cmd; ++ __u32 version; ++ __u32 flag; ++ __u32 len; ++ __u8 data[0]; ++}; ++ ++struct ethtool_ts_info { ++ __u32 cmd; ++ __u32 so_timestamping; ++ __s32 phc_index; ++ __u32 tx_types; ++ __u32 tx_reserved[3]; ++ __u32 rx_filters; ++ __u32 rx_reserved[3]; ++}; ++ ++struct ethtool_fecparam { ++ __u32 cmd; ++ __u32 active_fec; ++ __u32 fec; ++ __u32 reserved; ++}; ++ ++struct ethtool_link_settings { ++ __u32 cmd; ++ __u32 speed; ++ __u8 duplex; ++ __u8 port; ++ __u8 phy_address; ++ __u8 autoneg; ++ __u8 mdio_support; ++ __u8 eth_tp_mdix; ++ __u8 eth_tp_mdix_ctrl; ++ __s8 link_mode_masks_nwords; ++ __u8 transceiver; ++ __u8 reserved1[3]; ++ __u32 reserved[7]; ++ __u32 link_mode_masks[0]; ++}; ++ ++enum ethtool_phys_id_state { ++ ETHTOOL_ID_INACTIVE = 0, ++ ETHTOOL_ID_ACTIVE = 1, ++ ETHTOOL_ID_ON = 2, ++ ETHTOOL_ID_OFF = 3, ++}; ++ ++struct ethtool_link_ksettings { ++ struct ethtool_link_settings base; ++ struct { ++ long unsigned int supported[1]; ++ long unsigned int advertising[1]; ++ long unsigned int lp_advertising[1]; ++ } link_modes; ++}; ++ ++struct ethtool_ops { ++ int (*get_settings)(struct net_device *, struct ethtool_cmd *); ++ int (*set_settings)(struct net_device *, struct ethtool_cmd *); ++ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); ++ int (*get_regs_len)(struct net_device *); ++ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); ++ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); ++ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); ++ u32 (*get_msglevel)(struct net_device *); ++ void (*set_msglevel)(struct net_device *, u32); ++ int (*nway_reset)(struct net_device *); ++ u32 (*get_link)(struct net_device *); ++ int (*get_eeprom_len)(struct net_device *); ++ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); ++ int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); ++ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); ++ void (*get_strings)(struct net_device *, u32, u8 *); ++ int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); ++ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); ++ int (*begin)(struct net_device *); ++ void (*complete)(struct net_device *); ++ u32 (*get_priv_flags)(struct net_device *); ++ int (*set_priv_flags)(struct net_device *, u32); ++ int (*get_sset_count)(struct net_device *, int); ++ int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); ++ int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); ++ int (*flash_device)(struct net_device *, struct ethtool_flash *); ++ int (*reset)(struct net_device *, u32 *); ++ u32 (*get_rxfh_key_size)(struct net_device *); ++ u32 (*get_rxfh_indir_size)(struct net_device *); ++ int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); ++ int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8); ++ int (*get_rxfh_context)(struct net_device *, u32 *, u8 *, u8 *, u32); ++ int (*set_rxfh_context)(struct net_device *, const u32 *, const u8 *, const u8, u32 *, bool); ++ void (*get_channels)(struct net_device *, struct ethtool_channels *); ++ int (*set_channels)(struct net_device *, struct ethtool_channels *); ++ int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); ++ int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); ++ int (*set_dump)(struct net_device *, struct ethtool_dump *); ++ int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); ++ int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); ++ int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*get_eee)(struct net_device *, struct ethtool_eee *); ++ int (*set_eee)(struct net_device *, struct ethtool_eee *); ++ int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); ++ int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); ++ int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); ++ int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); ++ int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); ++ int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); ++ int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); ++ int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); ++ void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++ long unsigned int kabi_reserved17; ++ long unsigned int kabi_reserved18; ++ long unsigned int kabi_reserved19; ++ long unsigned int kabi_reserved20; ++ long unsigned int kabi_reserved21; ++ long unsigned int kabi_reserved22; ++ long unsigned int kabi_reserved23; ++ long unsigned int kabi_reserved24; ++ long unsigned int kabi_reserved25; ++ long unsigned int kabi_reserved26; ++ long unsigned int kabi_reserved27; ++ long unsigned int kabi_reserved28; ++ long unsigned int kabi_reserved29; ++ long unsigned int kabi_reserved30; ++ long unsigned int kabi_reserved31; ++ long unsigned int kabi_reserved32; ++}; ++ ++struct ieee_ets { ++ __u8 willing; ++ __u8 ets_cap; ++ __u8 cbs; ++ __u8 tc_tx_bw[8]; ++ __u8 tc_rx_bw[8]; ++ __u8 tc_tsa[8]; ++ __u8 prio_tc[8]; ++ __u8 tc_reco_bw[8]; ++ __u8 tc_reco_tsa[8]; ++ __u8 reco_prio_tc[8]; ++}; ++ ++struct ieee_maxrate { ++ __u64 tc_maxrate[8]; ++}; ++ ++struct ieee_qcn { ++ __u8 rpg_enable[8]; ++ __u32 rppp_max_rps[8]; ++ __u32 rpg_time_reset[8]; ++ __u32 rpg_byte_reset[8]; ++ __u32 rpg_threshold[8]; ++ __u32 rpg_max_rate[8]; ++ __u32 rpg_ai_rate[8]; ++ __u32 rpg_hai_rate[8]; ++ __u32 rpg_gd[8]; ++ __u32 rpg_min_dec_fac[8]; ++ __u32 rpg_min_rate[8]; ++ __u32 cndd_state_machine[8]; ++}; ++ ++struct ieee_qcn_stats { ++ __u64 rppp_rp_centiseconds[8]; ++ __u32 rppp_created_rps[8]; ++}; ++ ++struct ieee_pfc { ++ __u8 pfc_cap; ++ __u8 pfc_en; ++ __u8 mbc; ++ __u16 delay; ++ __u64 requests[8]; ++ __u64 indications[8]; ++}; ++ ++struct dcbnl_buffer { ++ __u8 prio2buffer[8]; ++ __u32 buffer_size[8]; ++ __u32 total_size; ++}; ++ ++struct cee_pg { ++ __u8 willing; ++ __u8 error; ++ __u8 pg_en; ++ __u8 tcs_supported; ++ __u8 pg_bw[8]; ++ __u8 prio_pg[8]; ++}; ++ ++struct cee_pfc { ++ __u8 willing; ++ __u8 error; ++ __u8 pfc_en; ++ __u8 tcs_supported; ++}; ++ ++struct dcb_app { ++ __u8 selector; ++ __u8 priority; ++ __u16 protocol; ++}; ++ ++struct dcb_peer_app_info { ++ __u8 willing; ++ __u8 error; ++}; ++ ++struct dcbnl_rtnl_ops { ++ int (*ieee_getets)(struct net_device *, struct ieee_ets *); ++ int (*ieee_setets)(struct net_device *, struct ieee_ets *); ++ int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); ++ int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); ++ int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); ++ int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); ++ int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); ++ int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); ++ int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); ++ int (*ieee_getapp)(struct net_device *, struct dcb_app *); ++ int (*ieee_setapp)(struct net_device *, struct dcb_app *); ++ int (*ieee_delapp)(struct net_device *, struct dcb_app *); ++ int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); ++ int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); ++ u8 (*getstate)(struct net_device *); ++ u8 (*setstate)(struct net_device *, u8); ++ void (*getpermhwaddr)(struct net_device *, u8 *); ++ void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8); ++ void (*setpgbwgcfgtx)(struct net_device *, int, u8); ++ void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8); ++ void (*setpgbwgcfgrx)(struct net_device *, int, u8); ++ void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); ++ void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); ++ void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); ++ void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); ++ void (*setpfccfg)(struct net_device *, int, u8); ++ void (*getpfccfg)(struct net_device *, int, u8 *); ++ u8 (*setall)(struct net_device *); ++ u8 (*getcap)(struct net_device *, int, u8 *); ++ int (*getnumtcs)(struct net_device *, int, u8 *); ++ int (*setnumtcs)(struct net_device *, int, u8); ++ u8 (*getpfcstate)(struct net_device *); ++ void (*setpfcstate)(struct net_device *, u8); ++ void (*getbcncfg)(struct net_device *, int, u32 *); ++ void (*setbcncfg)(struct net_device *, int, u32); ++ void (*getbcnrp)(struct net_device *, int, u8 *); ++ void (*setbcnrp)(struct net_device *, int, u8); ++ int (*setapp)(struct net_device *, u8, u16, u8); ++ int (*getapp)(struct net_device *, u8, u16); ++ u8 (*getfeatcfg)(struct net_device *, int, u8 *); ++ u8 (*setfeatcfg)(struct net_device *, int, u8); ++ u8 (*getdcbx)(struct net_device *); ++ u8 (*setdcbx)(struct net_device *, u8); ++ int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); ++ int (*peer_getapptable)(struct net_device *, struct dcb_app *); ++ int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); ++ int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); ++ int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *); ++ int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++}; ++ ++struct taskstats { ++ __u16 version; ++ __u32 ac_exitcode; ++ __u8 ac_flag; ++ __u8 ac_nice; ++ __u64 cpu_count; ++ __u64 cpu_delay_total; ++ __u64 blkio_count; ++ __u64 blkio_delay_total; ++ __u64 swapin_count; ++ __u64 swapin_delay_total; ++ __u64 cpu_run_real_total; ++ __u64 cpu_run_virtual_total; ++ char ac_comm[32]; ++ __u8 ac_sched; ++ __u8 ac_pad[3]; ++ int: 32; ++ __u32 ac_uid; ++ __u32 ac_gid; ++ __u32 ac_pid; ++ __u32 ac_ppid; ++ __u32 ac_btime; ++ __u64 ac_etime; ++ __u64 ac_utime; ++ __u64 ac_stime; ++ __u64 ac_minflt; ++ __u64 ac_majflt; ++ __u64 coremem; ++ __u64 virtmem; ++ __u64 hiwater_rss; ++ __u64 hiwater_vm; ++ __u64 read_char; ++ __u64 write_char; ++ __u64 read_syscalls; ++ __u64 write_syscalls; ++ __u64 read_bytes; ++ __u64 write_bytes; ++ __u64 cancelled_write_bytes; ++ __u64 nvcsw; ++ __u64 nivcsw; ++ __u64 ac_utimescaled; ++ __u64 ac_stimescaled; ++ __u64 cpu_scaled_run_real_total; ++ __u64 freepages_count; ++ __u64 freepages_delay_total; ++}; ++ ++struct cgroup_namespace { ++ refcount_t count; ++ struct ns_common ns; ++ struct user_namespace *user_ns; ++ struct ucounts *ucounts; ++ struct css_set *root_cset; ++}; ++ ++struct kernel_cpustat { ++ u64 cpustat[12]; ++}; ++ ++struct kernel_stat { ++ long unsigned int irqs_sum; ++ unsigned int softirqs[10]; ++}; ++ ++struct bpf_insn { ++ __u8 code; ++ __u8 dst_reg: 4; ++ __u8 src_reg: 4; ++ __s16 off; ++ __s32 imm; ++}; ++ ++enum bpf_prog_type { ++ BPF_PROG_TYPE_UNSPEC = 0, ++ BPF_PROG_TYPE_SOCKET_FILTER = 1, ++ BPF_PROG_TYPE_KPROBE = 2, ++ BPF_PROG_TYPE_SCHED_CLS = 3, ++ BPF_PROG_TYPE_SCHED_ACT = 4, ++ BPF_PROG_TYPE_TRACEPOINT = 5, ++ BPF_PROG_TYPE_XDP = 6, ++ BPF_PROG_TYPE_PERF_EVENT = 7, ++ BPF_PROG_TYPE_CGROUP_SKB = 8, ++ BPF_PROG_TYPE_CGROUP_SOCK = 9, ++ BPF_PROG_TYPE_LWT_IN = 10, ++ BPF_PROG_TYPE_LWT_OUT = 11, ++ BPF_PROG_TYPE_LWT_XMIT = 12, ++ BPF_PROG_TYPE_SOCK_OPS = 13, ++ BPF_PROG_TYPE_SK_SKB = 14, ++ BPF_PROG_TYPE_CGROUP_DEVICE = 15, ++ BPF_PROG_TYPE_SK_MSG = 16, ++ BPF_PROG_TYPE_RAW_TRACEPOINT = 17, ++ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, ++ BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, ++ BPF_PROG_TYPE_LIRC_MODE2 = 20, ++ BPF_PROG_TYPE_SK_REUSEPORT = 21, ++}; ++ ++enum bpf_attach_type { ++ BPF_CGROUP_INET_INGRESS = 0, ++ BPF_CGROUP_INET_EGRESS = 1, ++ BPF_CGROUP_INET_SOCK_CREATE = 2, ++ BPF_CGROUP_SOCK_OPS = 3, ++ BPF_SK_SKB_STREAM_PARSER = 4, ++ BPF_SK_SKB_STREAM_VERDICT = 5, ++ BPF_CGROUP_DEVICE = 6, ++ BPF_SK_MSG_VERDICT = 7, ++ BPF_CGROUP_INET4_BIND = 8, ++ BPF_CGROUP_INET6_BIND = 9, ++ BPF_CGROUP_INET4_CONNECT = 10, ++ BPF_CGROUP_INET6_CONNECT = 11, ++ BPF_CGROUP_INET4_POST_BIND = 12, ++ BPF_CGROUP_INET6_POST_BIND = 13, ++ BPF_CGROUP_UDP4_SENDMSG = 14, ++ BPF_CGROUP_UDP6_SENDMSG = 15, ++ BPF_LIRC_MODE2 = 16, ++ BPF_CGROUP_UDP4_RECVMSG = 19, ++ BPF_CGROUP_UDP6_RECVMSG = 20, ++ __MAX_BPF_ATTACH_TYPE = 21, ++}; ++ ++struct sock_filter { ++ __u16 code; ++ __u8 jt; ++ __u8 jf; ++ __u32 k; ++}; ++ ++struct bpf_prog_aux; ++ ++struct sock_fprog_kern; ++ ++struct bpf_prog { ++ u16 pages; ++ u16 jited: 1; ++ u16 jit_requested: 1; ++ u16 undo_set_mem: 1; ++ u16 gpl_compatible: 1; ++ u16 cb_access: 1; ++ u16 dst_needed: 1; ++ u16 blinded: 1; ++ u16 is_func: 1; ++ u16 kprobe_override: 1; ++ u16 has_callchain_buf: 1; ++ enum bpf_prog_type type; ++ enum bpf_attach_type expected_attach_type; ++ u32 len; ++ u32 jited_len; ++ u8 tag[8]; ++ struct bpf_prog_aux *aux; ++ struct sock_fprog_kern *orig_prog; ++ unsigned int (*bpf_func)(const void *, const struct bpf_insn *); ++ union { ++ struct sock_filter insns[0]; ++ struct bpf_insn insnsi[0]; ++ }; ++}; ++ ++struct bpf_prog_array; ++ ++struct cgroup_bpf { ++ struct bpf_prog_array *effective[21]; ++ struct list_head progs[21]; ++ u32 flags[21]; ++ struct bpf_prog_array *inactive; ++}; ++ ++struct cgroup_base_stat { ++ struct task_cputime cputime; ++}; ++ ++struct cgroup_root; ++ ++struct cgroup_rstat_cpu; ++ ++struct cgroup { ++ struct cgroup_subsys_state self; ++ long unsigned int flags; ++ int id; ++ int level; ++ int max_depth; ++ int nr_descendants; ++ int nr_dying_descendants; ++ int max_descendants; ++ int nr_populated_csets; ++ int nr_populated_domain_children; ++ int nr_populated_threaded_children; ++ int nr_threaded_children; ++ struct kernfs_node *kn; ++ struct cgroup_file procs_file; ++ struct cgroup_file events_file; ++ u16 subtree_control; ++ u16 subtree_ss_mask; ++ u16 old_subtree_control; ++ u16 old_subtree_ss_mask; ++ struct cgroup_subsys_state *subsys[14]; ++ struct cgroup_root *root; ++ struct list_head cset_links; ++ struct list_head e_csets[14]; ++ struct cgroup *dom_cgrp; ++ struct cgroup *old_dom_cgrp; ++ struct cgroup_rstat_cpu *rstat_cpu; ++ struct list_head rstat_css_list; ++ struct cgroup_base_stat pending_bstat; ++ struct cgroup_base_stat bstat; ++ struct prev_cputime prev_cputime; ++ struct list_head pidlists; ++ struct mutex pidlist_mutex; ++ wait_queue_head_t offline_waitq; ++ struct work_struct release_agent_work; ++ struct cgroup_bpf bpf; ++ atomic_t congestion_count; ++ int ancestor_ids[0]; ++}; ++ ++struct cgroup_taskset; ++ ++struct cftype; ++ ++struct cgroup_subsys { ++ struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); ++ int (*css_online)(struct cgroup_subsys_state *); ++ void (*css_offline)(struct cgroup_subsys_state *); ++ void (*css_released)(struct cgroup_subsys_state *); ++ void (*css_free)(struct cgroup_subsys_state *); ++ void (*css_reset)(struct cgroup_subsys_state *); ++ void (*css_rstat_flush)(struct cgroup_subsys_state *, int); ++ int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); ++ int (*can_attach)(struct cgroup_taskset *); ++ void (*cancel_attach)(struct cgroup_taskset *); ++ void (*attach)(struct cgroup_taskset *); ++ void (*post_attach)(); ++ int (*can_fork)(struct task_struct *); ++ void (*cancel_fork)(struct task_struct *); ++ void (*fork)(struct task_struct *); ++ void (*exit)(struct task_struct *); ++ void (*release)(struct task_struct *); ++ void (*bind)(struct cgroup_subsys_state *); ++ bool early_init: 1; ++ bool implicit_on_dfl: 1; ++ bool threaded: 1; ++ bool broken_hierarchy: 1; ++ bool warned_broken_hierarchy: 1; ++ int id; ++ const char *name; ++ const char *legacy_name; ++ struct cgroup_root *root; ++ struct idr css_idr; ++ struct list_head cfts; ++ struct cftype *dfl_cftypes; ++ struct cftype *legacy_cftypes; ++ unsigned int depends_on; ++}; ++ ++struct cgroup_rstat_cpu { ++ struct u64_stats_sync bsync; ++ struct cgroup_base_stat bstat; ++ struct cgroup_base_stat last_bstat; ++ struct cgroup *updated_children; ++ struct cgroup *updated_next; ++}; ++ ++struct cgroup_root { ++ struct kernfs_root *kf_root; ++ unsigned int subsys_mask; ++ int hierarchy_id; ++ struct cgroup cgrp; ++ int cgrp_ancestor_id_storage; ++ atomic_t nr_cgrps; ++ struct list_head root_list; ++ unsigned int flags; ++ struct idr cgroup_idr; ++ char release_agent_path[4096]; ++ char name[64]; ++}; ++ ++struct cftype { ++ char name[64]; ++ long unsigned int private; ++ size_t max_write_len; ++ unsigned int flags; ++ unsigned int file_offset; ++ struct cgroup_subsys *ss; ++ struct list_head node; ++ struct kernfs_ops *kf_ops; ++ int (*open)(struct kernfs_open_file *); ++ void (*release)(struct kernfs_open_file *); ++ u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); ++ s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); ++ int (*seq_show)(struct seq_file *, void *); ++ void * (*seq_start)(struct seq_file *, loff_t *); ++ void * (*seq_next)(struct seq_file *, void *, loff_t *); ++ void (*seq_stop)(struct seq_file *, void *); ++ int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); ++ int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); ++ ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); ++}; ++ ++struct netprio_map { ++ struct callback_head rcu; ++ u32 priomap_len; ++ u32 priomap[0]; ++}; ++ ++struct xdp_mem_info { ++ u32 type; ++ u32 id; ++}; ++ ++struct xdp_rxq_info { ++ struct net_device *dev; ++ u32 queue_index; ++ u32 reg_state; ++ struct xdp_mem_info mem; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct xdp_frame { ++ void *data; ++ u16 len; ++ u16 headroom; ++ u16 metasize; ++ struct xdp_mem_info mem; ++ struct net_device *dev_rx; ++}; ++ ++struct nlmsghdr { ++ __u32 nlmsg_len; ++ __u16 nlmsg_type; ++ __u16 nlmsg_flags; ++ __u32 nlmsg_seq; ++ __u32 nlmsg_pid; ++}; ++ ++struct nlattr { ++ __u16 nla_len; ++ __u16 nla_type; ++}; ++ ++struct netlink_ext_ack { ++ const char *_msg; ++ const struct nlattr *bad_attr; ++ u8 cookie[20]; ++ u8 cookie_len; ++}; ++ ++struct netlink_callback { ++ struct sk_buff *skb; ++ const struct nlmsghdr *nlh; ++ int (*dump)(struct sk_buff *, struct netlink_callback *); ++ int (*done)(struct netlink_callback *); ++ void *data; ++ struct module *module; ++ u16 family; ++ u16 min_dump_alloc; ++ unsigned int prev_seq; ++ unsigned int seq; ++ long int args[6]; ++}; ++ ++struct ndmsg { ++ __u8 ndm_family; ++ __u8 ndm_pad1; ++ __u16 ndm_pad2; ++ __s32 ndm_ifindex; ++ __u16 ndm_state; ++ __u8 ndm_flags; ++ __u8 ndm_type; ++}; ++ ++struct rtnl_link_stats64 { ++ __u64 rx_packets; ++ __u64 tx_packets; ++ __u64 rx_bytes; ++ __u64 tx_bytes; ++ __u64 rx_errors; ++ __u64 tx_errors; ++ __u64 rx_dropped; ++ __u64 tx_dropped; ++ __u64 multicast; ++ __u64 collisions; ++ __u64 rx_length_errors; ++ __u64 rx_over_errors; ++ __u64 rx_crc_errors; ++ __u64 rx_frame_errors; ++ __u64 rx_fifo_errors; ++ __u64 rx_missed_errors; ++ __u64 tx_aborted_errors; ++ __u64 tx_carrier_errors; ++ __u64 tx_fifo_errors; ++ __u64 tx_heartbeat_errors; ++ __u64 tx_window_errors; ++ __u64 rx_compressed; ++ __u64 tx_compressed; ++ __u64 rx_nohandler; ++}; ++ ++struct ifla_vf_stats { ++ __u64 rx_packets; ++ __u64 tx_packets; ++ __u64 rx_bytes; ++ __u64 tx_bytes; ++ __u64 broadcast; ++ __u64 multicast; ++ __u64 rx_dropped; ++ __u64 tx_dropped; ++}; ++ ++struct ifla_vf_info { ++ __u32 vf; ++ __u8 mac[32]; ++ __u32 vlan; ++ __u32 qos; ++ __u32 spoofchk; ++ __u32 linkstate; ++ __u32 min_tx_rate; ++ __u32 max_tx_rate; ++ __u32 rss_query_en; ++ __u32 trusted; ++ __be16 vlan_proto; ++}; ++ ++struct tc_stats { ++ __u64 bytes; ++ __u32 packets; ++ __u32 drops; ++ __u32 overlimits; ++ __u32 bps; ++ __u32 pps; ++ __u32 qlen; ++ __u32 backlog; ++}; ++ ++struct tc_sizespec { ++ unsigned char cell_log; ++ unsigned char size_log; ++ short int cell_align; ++ int overhead; ++ unsigned int linklayer; ++ unsigned int mpu; ++ unsigned int mtu; ++ unsigned int tsize; ++}; ++ ++enum netdev_tx { ++ __NETDEV_TX_MIN = 2147483648, ++ NETDEV_TX_OK = 0, ++ NETDEV_TX_BUSY = 16, ++}; ++ ++typedef enum netdev_tx netdev_tx_t; ++ ++struct header_ops { ++ int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); ++ int (*parse)(const struct sk_buff *, unsigned char *); ++ int (*cache)(const struct neighbour *, struct hh_cache *, __be16); ++ void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); ++ bool (*validate)(const char *, unsigned int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++}; ++ ++struct gro_list { ++ struct list_head list; ++ int count; ++}; ++ ++struct napi_struct { ++ struct list_head poll_list; ++ long unsigned int state; ++ int weight; ++ long unsigned int gro_bitmask; ++ int (*poll)(struct napi_struct *, int); ++ int poll_owner; ++ struct net_device *dev; ++ struct gro_list gro_hash[8]; ++ struct sk_buff *skb; ++ struct hrtimer timer; ++ struct list_head dev_list; ++ struct hlist_node napi_hash_node; ++ unsigned int napi_id; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct netdev_queue { ++ struct net_device *dev; ++ struct Qdisc *qdisc; ++ struct Qdisc *qdisc_sleeping; ++ struct kobject kobj; ++ int numa_node; ++ long unsigned int tx_maxrate; ++ long unsigned int trans_timeout; ++ struct net_device *sb_dev; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t _xmit_lock; ++ int xmit_lock_owner; ++ long unsigned int trans_start; ++ long unsigned int state; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct dql dql; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct qdisc_skb_head { ++ struct sk_buff *head; ++ struct sk_buff *tail; ++ union { ++ u32 qlen; ++ atomic_t atomic_qlen; ++ }; ++ spinlock_t lock; ++}; ++ ++struct gnet_stats_basic_packed { ++ __u64 bytes; ++ __u32 packets; ++} __attribute__((packed)); ++ ++struct gnet_stats_queue { ++ __u32 qlen; ++ __u32 backlog; ++ __u32 drops; ++ __u32 requeues; ++ __u32 overlimits; ++}; ++ ++struct Qdisc_ops; ++ ++struct qdisc_size_table; ++ ++struct net_rate_estimator; ++ ++struct gnet_stats_basic_cpu; ++ ++struct Qdisc { ++ int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); ++ struct sk_buff * (*dequeue)(struct Qdisc *); ++ unsigned int flags; ++ u32 limit; ++ const struct Qdisc_ops *ops; ++ struct qdisc_size_table *stab; ++ struct hlist_node hash; ++ u32 handle; ++ u32 parent; ++ struct netdev_queue *dev_queue; ++ struct net_rate_estimator *rate_est; ++ struct gnet_stats_basic_cpu *cpu_bstats; ++ struct gnet_stats_queue *cpu_qstats; ++ int padded; ++ refcount_t refcnt; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sk_buff_head gso_skb; ++ struct qdisc_skb_head q; ++ struct gnet_stats_basic_packed bstats; ++ seqcount_t running; ++ struct gnet_stats_queue qstats; ++ long unsigned int state; ++ struct Qdisc *next_sched; ++ struct sk_buff_head skb_bad_txq; ++ spinlock_t busylock; ++ spinlock_t seqlock; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct rps_map { ++ unsigned int len; ++ struct callback_head rcu; ++ u16 cpus[0]; ++}; ++ ++struct rps_dev_flow { ++ u16 cpu; ++ u16 filter; ++ unsigned int last_qtail; ++}; ++ ++struct rps_dev_flow_table { ++ unsigned int mask; ++ struct callback_head rcu; ++ struct rps_dev_flow flows[0]; ++}; ++ ++struct rps_sock_flow_table { ++ u32 mask; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ u32 ents[0]; ++}; ++ ++struct netdev_rx_queue { ++ struct rps_map *rps_map; ++ struct rps_dev_flow_table *rps_flow_table; ++ struct kobject kobj; ++ struct net_device *dev; ++ long: 64; ++ struct xdp_rxq_info xdp_rxq; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct xps_map { ++ unsigned int len; ++ unsigned int alloc_len; ++ struct callback_head rcu; ++ u16 queues[0]; ++}; ++ ++struct xps_dev_maps { ++ struct callback_head rcu; ++ struct xps_map *attr_map[0]; ++}; ++ ++struct netdev_fcoe_hbainfo { ++ char manufacturer[64]; ++ char serial_number[64]; ++ char hardware_version[64]; ++ char driver_version[64]; ++ char optionrom_version[64]; ++ char firmware_version[64]; ++ char model[256]; ++ char model_description[256]; ++}; ++ ++struct netdev_phys_item_id { ++ unsigned char id[32]; ++ unsigned char id_len; ++}; ++ ++typedef u16 (*select_queue_fallback_t)(struct net_device *, struct sk_buff *, struct net_device *); ++ ++enum tc_setup_type { ++ TC_SETUP_QDISC_MQPRIO = 0, ++ TC_SETUP_CLSU32 = 1, ++ TC_SETUP_CLSFLOWER = 2, ++ TC_SETUP_CLSMATCHALL = 3, ++ TC_SETUP_CLSBPF = 4, ++ TC_SETUP_BLOCK = 5, ++ TC_SETUP_QDISC_CBS = 6, ++ TC_SETUP_QDISC_RED = 7, ++ TC_SETUP_QDISC_PRIO = 8, ++ TC_SETUP_QDISC_MQ = 9, ++ TC_SETUP_QDISC_ETF = 10, ++}; ++ ++enum bpf_netdev_command { ++ XDP_SETUP_PROG = 0, ++ XDP_SETUP_PROG_HW = 1, ++ XDP_QUERY_PROG = 2, ++ XDP_QUERY_PROG_HW = 3, ++ BPF_OFFLOAD_VERIFIER_PREP = 4, ++ BPF_OFFLOAD_TRANSLATE = 5, ++ BPF_OFFLOAD_DESTROY = 6, ++ BPF_OFFLOAD_MAP_ALLOC = 7, ++ BPF_OFFLOAD_MAP_FREE = 8, ++ XDP_QUERY_XSK_UMEM = 9, ++ XDP_SETUP_XSK_UMEM = 10, ++}; ++ ++struct bpf_verifier_env; ++ ++struct bpf_prog_offload_ops { ++ int (*insn_hook)(struct bpf_verifier_env *, int, int); ++}; ++ ++struct bpf_offloaded_map; ++ ++struct xdp_umem; ++ ++struct netdev_bpf { ++ enum bpf_netdev_command command; ++ union { ++ struct { ++ u32 flags; ++ struct bpf_prog *prog; ++ struct netlink_ext_ack *extack; ++ }; ++ struct { ++ u32 prog_id; ++ u32 prog_flags; ++ }; ++ struct { ++ struct bpf_prog *prog; ++ const struct bpf_prog_offload_ops *ops; ++ } verifier; ++ struct { ++ struct bpf_prog *prog; ++ } offload; ++ struct { ++ struct bpf_offloaded_map *offmap; ++ }; ++ struct { ++ struct xdp_umem *umem; ++ u16 queue_id; ++ } xsk; ++ }; ++}; ++ ++struct xfrmdev_ops { ++ int (*xdo_dev_state_add)(struct xfrm_state *); ++ void (*xdo_dev_state_delete)(struct xfrm_state *); ++ void (*xdo_dev_state_free)(struct xfrm_state *); ++ bool (*xdo_dev_offload_ok)(struct sk_buff *, struct xfrm_state *); ++ void (*xdo_dev_state_advance_esn)(struct xfrm_state *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++enum tls_offload_ctx_dir { ++ TLS_OFFLOAD_CTX_DIR_RX = 0, ++ TLS_OFFLOAD_CTX_DIR_TX = 1, ++}; ++ ++struct tls_crypto_info; ++ ++struct tls_context; ++ ++struct tlsdev_ops { ++ int (*tls_dev_add)(struct net_device *, struct sock *, enum tls_offload_ctx_dir, struct tls_crypto_info *, u32); ++ void (*tls_dev_del)(struct net_device *, struct tls_context *, enum tls_offload_ctx_dir); ++ void (*tls_dev_resync_rx)(struct net_device *, struct sock *, u32, u64); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct dev_ifalias { ++ struct callback_head rcuhead; ++ char ifalias[0]; ++}; ++ ++struct udp_tunnel_info; ++ ++struct net_device_ops { ++ int (*ndo_init)(struct net_device *); ++ void (*ndo_uninit)(struct net_device *); ++ int (*ndo_open)(struct net_device *); ++ int (*ndo_stop)(struct net_device *); ++ netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); ++ netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); ++ u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *, select_queue_fallback_t); ++ void (*ndo_change_rx_flags)(struct net_device *, int); ++ void (*ndo_set_rx_mode)(struct net_device *); ++ int (*ndo_set_mac_address)(struct net_device *, void *); ++ int (*ndo_validate_addr)(struct net_device *); ++ int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); ++ int (*ndo_set_config)(struct net_device *, struct ifmap *); ++ int (*ndo_change_mtu)(struct net_device *, int); ++ int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); ++ void (*ndo_tx_timeout)(struct net_device *); ++ void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); ++ bool (*ndo_has_offload_stats)(const struct net_device *, int); ++ int (*ndo_get_offload_stats)(int, const struct net_device *, void *); ++ struct net_device_stats * (*ndo_get_stats)(struct net_device *); ++ int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); ++ int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); ++ void (*ndo_poll_controller)(struct net_device *); ++ int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); ++ void (*ndo_netpoll_cleanup)(struct net_device *); ++ int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); ++ int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); ++ int (*ndo_set_vf_rate)(struct net_device *, int, int, int); ++ int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); ++ int (*ndo_set_vf_trust)(struct net_device *, int, bool); ++ int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); ++ int (*ndo_set_vf_link_state)(struct net_device *, int, int); ++ int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); ++ int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); ++ int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); ++ int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); ++ int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); ++ int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); ++ int (*ndo_fcoe_enable)(struct net_device *); ++ int (*ndo_fcoe_disable)(struct net_device *); ++ int (*ndo_fcoe_ddp_setup)(struct net_device *, u16, struct scatterlist *, unsigned int); ++ int (*ndo_fcoe_ddp_done)(struct net_device *, u16); ++ int (*ndo_fcoe_ddp_target)(struct net_device *, u16, struct scatterlist *, unsigned int); ++ int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); ++ int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); ++ int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); ++ int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); ++ int (*ndo_del_slave)(struct net_device *, struct net_device *); ++ netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); ++ int (*ndo_set_features)(struct net_device *, netdev_features_t); ++ int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); ++ void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); ++ int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16); ++ int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16); ++ int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); ++ int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16); ++ int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); ++ int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); ++ int (*ndo_change_carrier)(struct net_device *, bool); ++ int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); ++ int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); ++ void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); ++ void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); ++ void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); ++ void (*ndo_dfwd_del_station)(struct net_device *, void *); ++ int (*ndo_get_lock_subclass)(struct net_device *); ++ int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); ++ int (*ndo_get_iflink)(const struct net_device *); ++ int (*ndo_change_proto_down)(struct net_device *, bool); ++ int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); ++ void (*ndo_set_rx_headroom)(struct net_device *, int); ++ int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); ++ int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); ++ int (*ndo_xsk_async_xmit)(struct net_device *, u32); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++ long unsigned int kabi_reserved17; ++ long unsigned int kabi_reserved18; ++ long unsigned int kabi_reserved19; ++ long unsigned int kabi_reserved20; ++ long unsigned int kabi_reserved21; ++ long unsigned int kabi_reserved22; ++ long unsigned int kabi_reserved23; ++ long unsigned int kabi_reserved24; ++ long unsigned int kabi_reserved25; ++ long unsigned int kabi_reserved26; ++ long unsigned int kabi_reserved27; ++ long unsigned int kabi_reserved28; ++ long unsigned int kabi_reserved29; ++ long unsigned int kabi_reserved30; ++ long unsigned int kabi_reserved31; ++ long unsigned int kabi_reserved32; ++ long unsigned int kabi_reserved33; ++ long unsigned int kabi_reserved34; ++ long unsigned int kabi_reserved35; ++ long unsigned int kabi_reserved36; ++ long unsigned int kabi_reserved37; ++ long unsigned int kabi_reserved38; ++ long unsigned int kabi_reserved39; ++ long unsigned int kabi_reserved40; ++ long unsigned int kabi_reserved41; ++ long unsigned int kabi_reserved42; ++ long unsigned int kabi_reserved43; ++ long unsigned int kabi_reserved44; ++ long unsigned int kabi_reserved45; ++ long unsigned int kabi_reserved46; ++ long unsigned int kabi_reserved47; ++}; ++ ++struct neigh_parms { ++ possible_net_t net; ++ struct net_device *dev; ++ struct list_head list; ++ int (*neigh_setup)(struct neighbour *); ++ void (*neigh_cleanup)(struct neighbour *); ++ struct neigh_table *tbl; ++ void *sysctl_table; ++ int dead; ++ refcount_t refcnt; ++ struct callback_head callback_head; ++ int reachable_time; ++ int data[13]; ++ long unsigned int data_state[1]; ++}; ++ ++struct pcpu_sw_netstats { ++ u64 rx_packets; ++ u64 rx_bytes; ++ u64 tx_packets; ++ u64 tx_bytes; ++ struct u64_stats_sync syncp; ++}; ++ ++struct switchdev_attr; ++ ++struct switchdev_trans; ++ ++struct switchdev_obj; ++ ++struct switchdev_ops { ++ int (*switchdev_port_attr_get)(struct net_device *, struct switchdev_attr *); ++ int (*switchdev_port_attr_set)(struct net_device *, const struct switchdev_attr *, struct switchdev_trans *); ++ int (*switchdev_port_obj_add)(struct net_device *, const struct switchdev_obj *, struct switchdev_trans *); ++ int (*switchdev_port_obj_del)(struct net_device *, const struct switchdev_obj *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct l3mdev_ops { ++ u32 (*l3mdev_fib_table)(const struct net_device *); ++ struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16); ++ struct sk_buff * (*l3mdev_l3_out)(struct net_device *, struct sock *, struct sk_buff *, u16); ++ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *, struct flowi6 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct nd_opt_hdr; ++ ++struct ndisc_options; ++ ++struct prefix_info; ++ ++struct ndisc_ops { ++ int (*is_useropt)(u8); ++ int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); ++ void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); ++ int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); ++ void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); ++ void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); ++}; ++ ++struct ipv6_devstat { ++ struct proc_dir_entry *proc_dir_entry; ++ struct ipstats_mib *ipv6; ++ struct icmpv6_mib_device *icmpv6dev; ++ struct icmpv6msg_mib_device *icmpv6msgdev; ++}; ++ ++struct ifmcaddr6; ++ ++struct ifacaddr6; ++ ++struct inet6_dev { ++ struct net_device *dev; ++ struct list_head addr_list; ++ struct ifmcaddr6 *mc_list; ++ struct ifmcaddr6 *mc_tomb; ++ spinlock_t mc_lock; ++ unsigned char mc_qrv; ++ unsigned char mc_gq_running; ++ unsigned char mc_ifc_count; ++ unsigned char mc_dad_count; ++ long unsigned int mc_v1_seen; ++ long unsigned int mc_qi; ++ long unsigned int mc_qri; ++ long unsigned int mc_maxdelay; ++ struct timer_list mc_gq_timer; ++ struct timer_list mc_ifc_timer; ++ struct timer_list mc_dad_timer; ++ struct ifacaddr6 *ac_list; ++ rwlock_t lock; ++ refcount_t refcnt; ++ __u32 if_flags; ++ int dead; ++ u32 desync_factor; ++ u8 rndid[8]; ++ struct list_head tempaddr_list; ++ struct in6_addr token; ++ struct neigh_parms *nd_parms; ++ struct ipv6_devconf cnf; ++ struct ipv6_devstat stats; ++ struct timer_list rs_timer; ++ __s32 rs_interval; ++ __u8 rs_probes; ++ long unsigned int tstamp; ++ struct callback_head rcu; ++}; ++ ++struct tcf_proto; ++ ++struct mini_Qdisc { ++ struct tcf_proto *filter_list; ++ struct gnet_stats_basic_cpu *cpu_bstats; ++ struct gnet_stats_queue *cpu_qstats; ++ struct callback_head rcu; ++}; ++ ++struct rtnl_link_ops { ++ struct list_head list; ++ const char *kind; ++ size_t priv_size; ++ void (*setup)(struct net_device *); ++ unsigned int maxtype; ++ const struct nla_policy *policy; ++ int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ void (*dellink)(struct net_device *, struct list_head *); ++ size_t (*get_size)(const struct net_device *); ++ int (*fill_info)(struct sk_buff *, const struct net_device *); ++ size_t (*get_xstats_size)(const struct net_device *); ++ int (*fill_xstats)(struct sk_buff *, const struct net_device *); ++ unsigned int (*get_num_tx_queues)(); ++ unsigned int (*get_num_rx_queues)(); ++ unsigned int slave_maxtype; ++ const struct nla_policy *slave_policy; ++ int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ size_t (*get_slave_size)(const struct net_device *, const struct net_device *); ++ int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); ++ struct net * (*get_link_net)(const struct net_device *); ++ size_t (*get_linkxstats_size)(const struct net_device *, int); ++ int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++}; ++ ++struct sd_flow_limit { ++ u64 count; ++ unsigned int num_buckets; ++ unsigned int history_head; ++ u16 history[128]; ++ u8 buckets[0]; ++}; ++ ++struct softnet_data { ++ struct list_head poll_list; ++ struct sk_buff_head process_queue; ++ unsigned int processed; ++ unsigned int time_squeeze; ++ unsigned int received_rps; ++ struct softnet_data *rps_ipi_list; ++ struct sd_flow_limit *flow_limit; ++ struct Qdisc *output_queue; ++ struct Qdisc **output_queue_tailp; ++ struct sk_buff *completion_queue; ++ struct sk_buff_head xfrm_backlog; ++ struct { ++ u16 recursion; ++ u8 more; ++ } xmit; ++ int: 32; ++ unsigned int input_queue_head; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ call_single_data_t csd; ++ struct softnet_data *rps_ipi_next; ++ unsigned int cpu; ++ unsigned int input_queue_tail; ++ unsigned int dropped; ++ struct sk_buff_head input_pkt_queue; ++ struct napi_struct backlog; ++ long: 64; ++}; ++ ++struct nf_hook_state { ++ unsigned int hook; ++ u_int8_t pf; ++ struct net_device *in; ++ struct net_device *out; ++ struct sock *sk; ++ struct net *net; ++ int (*okfn)(struct net *, struct sock *, struct sk_buff *); ++}; ++ ++enum nf_nat_manip_type; ++ ++struct nf_conn; ++ ++struct nf_nat_hook { ++ int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); ++ void (*decode_session)(struct sk_buff *, struct flowi *); ++ unsigned int (*manip_pkt)(struct sk_buff *, struct nf_conn *, enum nf_nat_manip_type, enum ip_conntrack_dir); ++}; ++ ++struct nf_conntrack_zone { ++ u16 id; ++ u8 flags; ++ u8 dir; ++}; ++ ++struct nf_conntrack_man { ++ union nf_inet_addr u3; ++ union nf_conntrack_man_proto u; ++ u_int16_t l3num; ++}; ++ ++struct nf_conntrack_tuple { ++ struct nf_conntrack_man src; ++ struct { ++ union nf_inet_addr u3; ++ union { ++ __be16 all; ++ struct { ++ __be16 port; ++ } tcp; ++ struct { ++ __be16 port; ++ } udp; ++ struct { ++ u_int8_t type; ++ u_int8_t code; ++ } icmp; ++ struct { ++ __be16 port; ++ } dccp; ++ struct { ++ __be16 port; ++ } sctp; ++ struct { ++ __be16 key; ++ } gre; ++ } u; ++ u_int8_t protonum; ++ u_int8_t dir; ++ } dst; ++}; ++ ++struct nf_conntrack_tuple_hash { ++ struct hlist_nulls_node hnnode; ++ struct nf_conntrack_tuple tuple; ++}; ++ ++struct nf_ct_gre { ++ unsigned int stream_timeout; ++ unsigned int timeout; ++}; ++ ++union nf_conntrack_proto { ++ struct nf_ct_dccp dccp; ++ struct ip_ct_sctp sctp; ++ struct ip_ct_tcp tcp; ++ struct nf_ct_gre gre; ++ unsigned int tmpl_padto; ++}; ++ ++struct nf_ct_ext; ++ ++struct nf_conn { ++ struct nf_conntrack ct_general; ++ spinlock_t lock; ++ u16 cpu; ++ struct nf_conntrack_zone zone; ++ struct nf_conntrack_tuple_hash tuplehash[2]; ++ long unsigned int status; ++ u32 timeout; ++ possible_net_t ct_net; ++ struct hlist_node nat_bysource; ++ u8 __nfct_init_offset[0]; ++ struct nf_conn *master; ++ u_int32_t mark; ++ u_int32_t secmark; ++ struct nf_ct_ext *ext; ++ union nf_conntrack_proto proto; ++}; ++ ++struct nf_ct_hook { ++ int (*update)(struct net *, struct sk_buff *); ++ void (*destroy)(struct nf_conntrack *); ++ bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); ++}; ++ ++struct nfnl_ct_hook { ++ struct nf_conn * (*get_ct)(const struct sk_buff *, enum ip_conntrack_info *); ++ size_t (*build_size)(const struct nf_conn *); ++ int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t); ++ int (*parse)(const struct nlattr *, struct nf_conn *); ++ int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); ++ void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); ++}; ++ ++struct iphdr { ++ __u8 ihl: 4; ++ __u8 version: 4; ++ __u8 tos; ++ __be16 tot_len; ++ __be16 id; ++ __be16 frag_off; ++ __u8 ttl; ++ __u8 protocol; ++ __sum16 check; ++ __be32 saddr; ++ __be32 daddr; ++}; ++ ++struct ipv6_params { ++ __s32 disable_ipv6; ++ __s32 autoconf; ++}; ++ ++struct bdi_writeback_congested { ++ long unsigned int state; ++ refcount_t refcnt; ++ struct backing_dev_info *__bdi; ++ int blkcg_id; ++ struct rb_node rb_node; ++}; ++ ++struct bio_integrity_payload { ++ struct bio *bip_bio; ++ struct bvec_iter bip_iter; ++ short unsigned int bip_slab; ++ short unsigned int bip_vcnt; ++ short unsigned int bip_max_vcnt; ++ short unsigned int bip_flags; ++ struct work_struct bip_work; ++ struct bio_vec *bip_vec; ++ struct bio_vec bip_inline_vecs[0]; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++typedef void *mempool_alloc_t(gfp_t, void *); ++ ++typedef void mempool_free_t(void *, void *); ++ ++struct mempool_s { ++ spinlock_t lock; ++ int min_nr; ++ int curr_nr; ++ void **elements; ++ void *pool_data; ++ mempool_alloc_t *alloc; ++ mempool_free_t *free; ++ wait_queue_head_t wait; ++}; ++ ++typedef struct mempool_s mempool_t; ++ ++struct bio_set { ++ struct kmem_cache *bio_slab; ++ unsigned int front_pad; ++ mempool_t bio_pool; ++ mempool_t bvec_pool; ++ mempool_t bio_integrity_pool; ++ mempool_t bvec_integrity_pool; ++ spinlock_t rescue_lock; ++ struct bio_list rescue_list; ++ struct work_struct rescue_work; ++ struct workqueue_struct *rescue_workqueue; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++enum kgdb_bptype { ++ BP_BREAKPOINT = 0, ++ BP_HARDWARE_BREAKPOINT = 1, ++ BP_WRITE_WATCHPOINT = 2, ++ BP_READ_WATCHPOINT = 3, ++ BP_ACCESS_WATCHPOINT = 4, ++ BP_POKE_BREAKPOINT = 5, ++}; ++ ++struct dbg_reg_def_t { ++ char *name; ++ int size; ++ int offset; ++}; ++ ++struct kgdb_arch { ++ unsigned char gdb_bpt_instr[4]; ++ long unsigned int flags; ++ int (*set_breakpoint)(long unsigned int, char *); ++ int (*remove_breakpoint)(long unsigned int, char *); ++ int (*set_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype); ++ int (*remove_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype); ++ void (*disable_hw_break)(struct pt_regs *); ++ void (*remove_all_hw_break)(); ++ void (*correct_hw_break)(); ++ void (*enable_nmi)(bool); ++}; ++ ++struct kgdb_io { ++ const char *name; ++ int (*read_char)(); ++ void (*write_char)(u8); ++ void (*flush)(); ++ int (*init)(); ++ void (*pre_exception)(); ++ void (*post_exception)(); ++ int is_console; ++}; ++ ++struct mem_cgroup_stat_cpu { ++ long int count[34]; ++ long unsigned int events[81]; ++ long unsigned int nr_page_events; ++ long unsigned int targets[3]; ++}; ++ ++struct mem_cgroup_reclaim_iter { ++ struct mem_cgroup *position; ++ unsigned int generation; ++}; ++ ++struct lruvec_stat { ++ long int count[28]; ++}; ++ ++struct memcg_shrinker_map { ++ struct callback_head rcu; ++ long unsigned int map[0]; ++}; ++ ++struct mem_cgroup_per_node { ++ struct lruvec lruvec; ++ struct lruvec_stat *lruvec_stat_cpu; ++ atomic_long_t lruvec_stat[28]; ++ long unsigned int lru_zone_size[15]; ++ struct mem_cgroup_reclaim_iter iter[13]; ++ struct memcg_shrinker_map *shrinker_map; ++ struct rb_node tree_node; ++ long unsigned int usage_in_excess; ++ bool on_tree; ++ bool congested; ++ struct mem_cgroup *memcg; ++}; ++ ++struct eventfd_ctx; ++ ++struct mem_cgroup_threshold { ++ struct eventfd_ctx *eventfd; ++ long unsigned int threshold; ++}; ++ ++struct mem_cgroup_threshold_ary { ++ int current_threshold; ++ unsigned int size; ++ struct mem_cgroup_threshold entries[0]; ++}; ++ ++enum { ++ RTAX_UNSPEC = 0, ++ RTAX_LOCK = 1, ++ RTAX_MTU = 2, ++ RTAX_WINDOW = 3, ++ RTAX_RTT = 4, ++ RTAX_RTTVAR = 5, ++ RTAX_SSTHRESH = 6, ++ RTAX_CWND = 7, ++ RTAX_ADVMSS = 8, ++ RTAX_REORDERING = 9, ++ RTAX_HOPLIMIT = 10, ++ RTAX_INITCWND = 11, ++ RTAX_FEATURES = 12, ++ RTAX_RTO_MIN = 13, ++ RTAX_INITRWND = 14, ++ RTAX_QUICKACK = 15, ++ RTAX_CC_ALGO = 16, ++ RTAX_FASTOPEN_NO_COOKIE = 17, ++ __RTAX_MAX = 18, ++}; ++ ++struct tcmsg { ++ unsigned char tcm_family; ++ unsigned char tcm__pad1; ++ short unsigned int tcm__pad2; ++ int tcm_ifindex; ++ __u32 tcm_handle; ++ __u32 tcm_parent; ++ __u32 tcm_info; ++}; ++ ++struct gnet_stats_basic_cpu { ++ struct gnet_stats_basic_packed bstats; ++ struct u64_stats_sync syncp; ++} __attribute__((packed)); ++ ++struct gnet_dump { ++ spinlock_t *lock; ++ struct sk_buff *skb; ++ struct nlattr *tail; ++ int compat_tc_stats; ++ int compat_xstats; ++ int padattr; ++ void *xstats; ++ int xstats_len; ++ struct tc_stats tc_stats; ++}; ++ ++struct nla_policy { ++ u16 type; ++ u16 len; ++ void *validation_data; ++}; ++ ++typedef int tc_setup_cb_t(enum tc_setup_type, void *, void *); ++ ++struct qdisc_size_table { ++ struct callback_head rcu; ++ struct list_head list; ++ struct tc_sizespec szopts; ++ int refcnt; ++ u16 data[0]; ++}; ++ ++struct Qdisc_class_ops; ++ ++struct Qdisc_ops { ++ struct Qdisc_ops *next; ++ const struct Qdisc_class_ops *cl_ops; ++ char id[16]; ++ int priv_size; ++ unsigned int static_flags; ++ int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); ++ struct sk_buff * (*dequeue)(struct Qdisc *); ++ struct sk_buff * (*peek)(struct Qdisc *); ++ int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); ++ void (*reset)(struct Qdisc *); ++ void (*destroy)(struct Qdisc *); ++ int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); ++ void (*attach)(struct Qdisc *); ++ int (*change_tx_queue_len)(struct Qdisc *, unsigned int); ++ int (*dump)(struct Qdisc *, struct sk_buff *); ++ int (*dump_stats)(struct Qdisc *, struct gnet_dump *); ++ void (*ingress_block_set)(struct Qdisc *, u32); ++ void (*egress_block_set)(struct Qdisc *, u32); ++ u32 (*ingress_block_get)(struct Qdisc *); ++ u32 (*egress_block_get)(struct Qdisc *); ++ struct module *owner; ++}; ++ ++struct qdisc_walker; ++ ++struct tcf_block; ++ ++struct Qdisc_class_ops { ++ struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); ++ int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); ++ struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); ++ void (*qlen_notify)(struct Qdisc *, long unsigned int); ++ long unsigned int (*find)(struct Qdisc *, u32); ++ int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); ++ int (*delete)(struct Qdisc *, long unsigned int); ++ void (*walk)(struct Qdisc *, struct qdisc_walker *); ++ struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); ++ long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); ++ void (*unbind_tcf)(struct Qdisc *, long unsigned int); ++ int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); ++ int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); ++}; ++ ++struct tcf_chain; ++ ++struct tcf_block { ++ struct list_head chain_list; ++ u32 index; ++ unsigned int refcnt; ++ struct net *net; ++ struct Qdisc *q; ++ struct list_head cb_list; ++ struct list_head owner_list; ++ bool keep_dst; ++ unsigned int offloadcnt; ++ unsigned int nooffloaddevcnt; ++ struct { ++ struct tcf_chain *chain; ++ struct list_head filter_chain_list; ++ } chain0; ++}; ++ ++struct tcf_result; ++ ++struct tcf_proto_ops; ++ ++struct tcf_proto { ++ struct tcf_proto *next; ++ void *root; ++ int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); ++ __be16 protocol; ++ u32 prio; ++ void *data; ++ const struct tcf_proto_ops *ops; ++ struct tcf_chain *chain; ++ struct callback_head rcu; ++}; ++ ++struct tcf_result { ++ union { ++ struct { ++ long unsigned int class; ++ u32 classid; ++ }; ++ const struct tcf_proto *goto_tp; ++ struct { ++ bool ingress; ++ struct gnet_stats_queue *qstats; ++ }; ++ }; ++}; ++ ++struct tcf_walker; ++ ++struct tcf_proto_ops { ++ struct list_head head; ++ char kind[16]; ++ int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); ++ int (*init)(struct tcf_proto *); ++ void (*destroy)(struct tcf_proto *, struct netlink_ext_ack *); ++ void * (*get)(struct tcf_proto *, u32); ++ int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, bool, struct netlink_ext_ack *); ++ int (*delete)(struct tcf_proto *, void *, bool *, struct netlink_ext_ack *); ++ void (*walk)(struct tcf_proto *, struct tcf_walker *); ++ int (*reoffload)(struct tcf_proto *, bool, tc_setup_cb_t *, void *, struct netlink_ext_ack *); ++ void (*bind_class)(void *, u32, long unsigned int); ++ void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); ++ void (*tmplt_destroy)(void *); ++ int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *); ++ int (*tmplt_dump)(struct sk_buff *, struct net *, void *); ++ struct module *owner; ++}; ++ ++struct tcf_chain { ++ struct tcf_proto *filter_chain; ++ struct list_head list; ++ struct tcf_block *block; ++ u32 index; ++ unsigned int refcnt; ++ unsigned int action_refcnt; ++ bool explicitly_created; ++ const struct tcf_proto_ops *tmplt_ops; ++ void *tmplt_priv; ++}; ++ ++struct sock_fprog_kern { ++ u16 len; ++ struct sock_filter *filter; ++}; ++ ++struct sk_filter { ++ refcount_t refcnt; ++ struct callback_head rcu; ++ struct bpf_prog *prog; ++}; ++ ++struct bpf_map; ++ ++struct bpf_redirect_info { ++ u32 ifindex; ++ u32 flags; ++ struct bpf_map *map; ++ struct bpf_map *map_to_flush; ++ u32 kern_flags; ++}; ++ ++enum { ++ NEIGH_VAR_MCAST_PROBES = 0, ++ NEIGH_VAR_UCAST_PROBES = 1, ++ NEIGH_VAR_APP_PROBES = 2, ++ NEIGH_VAR_MCAST_REPROBES = 3, ++ NEIGH_VAR_RETRANS_TIME = 4, ++ NEIGH_VAR_BASE_REACHABLE_TIME = 5, ++ NEIGH_VAR_DELAY_PROBE_TIME = 6, ++ NEIGH_VAR_GC_STALETIME = 7, ++ NEIGH_VAR_QUEUE_LEN_BYTES = 8, ++ NEIGH_VAR_PROXY_QLEN = 9, ++ NEIGH_VAR_ANYCAST_DELAY = 10, ++ NEIGH_VAR_PROXY_DELAY = 11, ++ NEIGH_VAR_LOCKTIME = 12, ++ NEIGH_VAR_QUEUE_LEN = 13, ++ NEIGH_VAR_RETRANS_TIME_MS = 14, ++ NEIGH_VAR_BASE_REACHABLE_TIME_MS = 15, ++ NEIGH_VAR_GC_INTERVAL = 16, ++ NEIGH_VAR_GC_THRESH1 = 17, ++ NEIGH_VAR_GC_THRESH2 = 18, ++ NEIGH_VAR_GC_THRESH3 = 19, ++ NEIGH_VAR_MAX = 20, ++}; ++ ++struct pneigh_entry; ++ ++struct neigh_statistics; ++ ++struct neigh_hash_table; ++ ++struct neigh_table { ++ int family; ++ unsigned int entry_size; ++ unsigned int key_len; ++ __be16 protocol; ++ __u32 (*hash)(const void *, const struct net_device *, __u32 *); ++ bool (*key_eq)(const struct neighbour *, const void *); ++ int (*constructor)(struct neighbour *); ++ int (*pconstructor)(struct pneigh_entry *); ++ void (*pdestructor)(struct pneigh_entry *); ++ void (*proxy_redo)(struct sk_buff *); ++ char *id; ++ struct neigh_parms parms; ++ struct list_head parms_list; ++ int gc_interval; ++ int gc_thresh1; ++ int gc_thresh2; ++ int gc_thresh3; ++ long unsigned int last_flush; ++ struct delayed_work gc_work; ++ struct timer_list proxy_timer; ++ struct sk_buff_head proxy_queue; ++ atomic_t entries; ++ rwlock_t lock; ++ long unsigned int last_rand; ++ struct neigh_statistics *stats; ++ struct neigh_hash_table *nht; ++ struct pneigh_entry **phash_buckets; ++}; ++ ++struct neigh_statistics { ++ long unsigned int allocs; ++ long unsigned int destroys; ++ long unsigned int hash_grows; ++ long unsigned int res_failed; ++ long unsigned int lookups; ++ long unsigned int hits; ++ long unsigned int rcv_probes_mcast; ++ long unsigned int rcv_probes_ucast; ++ long unsigned int periodic_gc_runs; ++ long unsigned int forced_gc_runs; ++ long unsigned int unres_discards; ++ long unsigned int table_fulls; ++}; ++ ++struct neigh_ops { ++ int family; ++ void (*solicit)(struct neighbour *, struct sk_buff *); ++ void (*error_report)(struct neighbour *, struct sk_buff *); ++ int (*output)(struct neighbour *, struct sk_buff *); ++ int (*connected_output)(struct neighbour *, struct sk_buff *); ++}; ++ ++struct pneigh_entry { ++ struct pneigh_entry *next; ++ possible_net_t net; ++ struct net_device *dev; ++ u8 flags; ++ u8 key[0]; ++}; ++ ++struct neigh_hash_table { ++ struct neighbour **hash_buckets; ++ unsigned int hash_shift; ++ __u32 hash_rnd[4]; ++ struct callback_head rcu; ++}; ++ ++struct lwtunnel_state { ++ __u16 type; ++ __u16 flags; ++ __u16 headroom; ++ atomic_t refcnt; ++ int (*orig_output)(struct net *, struct sock *, struct sk_buff *); ++ int (*orig_input)(struct sk_buff *); ++ struct callback_head rcu; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ __u8 data[0]; ++}; ++ ++struct dst_metrics { ++ u32 metrics[17]; ++ refcount_t refcnt; ++}; ++ ++enum { ++ TCP_ESTABLISHED = 1, ++ TCP_SYN_SENT = 2, ++ TCP_SYN_RECV = 3, ++ TCP_FIN_WAIT1 = 4, ++ TCP_FIN_WAIT2 = 5, ++ TCP_TIME_WAIT = 6, ++ TCP_CLOSE = 7, ++ TCP_CLOSE_WAIT = 8, ++ TCP_LAST_ACK = 9, ++ TCP_LISTEN = 10, ++ TCP_CLOSING = 11, ++ TCP_NEW_SYN_RECV = 12, ++ TCP_MAX_STATES = 13, ++}; ++ ++struct smc_hashinfo { ++ rwlock_t lock; ++ struct hlist_head ht; ++}; ++ ++struct fib_rule_hdr { ++ __u8 family; ++ __u8 dst_len; ++ __u8 src_len; ++ __u8 tos; ++ __u8 table; ++ __u8 res1; ++ __u8 res2; ++ __u8 action; ++ __u32 flags; ++}; ++ ++struct fib_rule_port_range { ++ __u16 start; ++ __u16 end; ++}; ++ ++struct fib_kuid_range { ++ kuid_t start; ++ kuid_t end; ++}; ++ ++struct fib_rule { ++ struct list_head list; ++ int iifindex; ++ int oifindex; ++ u32 mark; ++ u32 mark_mask; ++ u32 flags; ++ u32 table; ++ u8 action; ++ u8 l3mdev; ++ u8 proto; ++ u8 ip_proto; ++ u32 target; ++ __be64 tun_id; ++ struct fib_rule *ctarget; ++ struct net *fr_net; ++ refcount_t refcnt; ++ u32 pref; ++ int suppress_ifgroup; ++ int suppress_prefixlen; ++ char iifname[16]; ++ char oifname[16]; ++ struct fib_kuid_range uid_range; ++ struct fib_rule_port_range sport_range; ++ struct fib_rule_port_range dport_range; ++ struct callback_head rcu; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct fib_lookup_arg { ++ void *lookup_ptr; ++ const void *lookup_data; ++ void *result; ++ struct fib_rule *rule; ++ u32 table; ++ int flags; ++}; ++ ++struct request_sock_ops; ++ ++struct timewait_sock_ops; ++ ++struct udp_table; ++ ++struct raw_hashinfo; ++ ++struct proto { ++ void (*close)(struct sock *, long int); ++ int (*pre_connect)(struct sock *, struct sockaddr *, int); ++ int (*connect)(struct sock *, struct sockaddr *, int); ++ int (*disconnect)(struct sock *, int); ++ struct sock * (*accept)(struct sock *, int, int *, bool); ++ int (*ioctl)(struct sock *, int, long unsigned int); ++ int (*init)(struct sock *); ++ void (*destroy)(struct sock *); ++ void (*shutdown)(struct sock *, int); ++ int (*setsockopt)(struct sock *, int, int, char *, unsigned int); ++ int (*getsockopt)(struct sock *, int, int, char *, int *); ++ void (*keepalive)(struct sock *, int); ++ int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); ++ int (*compat_getsockopt)(struct sock *, int, int, char *, int *); ++ int (*compat_ioctl)(struct sock *, unsigned int, long unsigned int); ++ int (*sendmsg)(struct sock *, struct msghdr *, size_t); ++ int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int, int *); ++ int (*sendpage)(struct sock *, struct page *, int, size_t, int); ++ int (*bind)(struct sock *, struct sockaddr *, int); ++ int (*backlog_rcv)(struct sock *, struct sk_buff *); ++ void (*release_cb)(struct sock *); ++ int (*hash)(struct sock *); ++ void (*unhash)(struct sock *); ++ void (*rehash)(struct sock *); ++ int (*get_port)(struct sock *, short unsigned int); ++ unsigned int inuse_idx; ++ bool (*stream_memory_free)(const struct sock *); ++ bool (*stream_memory_read)(const struct sock *); ++ void (*enter_memory_pressure)(struct sock *); ++ void (*leave_memory_pressure)(struct sock *); ++ atomic_long_t *memory_allocated; ++ struct percpu_counter *sockets_allocated; ++ long unsigned int *memory_pressure; ++ long int *sysctl_mem; ++ int *sysctl_wmem; ++ int *sysctl_rmem; ++ u32 sysctl_wmem_offset; ++ u32 sysctl_rmem_offset; ++ int max_header; ++ bool no_autobind; ++ struct kmem_cache *slab; ++ unsigned int obj_size; ++ slab_flags_t slab_flags; ++ unsigned int useroffset; ++ unsigned int usersize; ++ struct percpu_counter *orphan_count; ++ struct request_sock_ops *rsk_prot; ++ struct timewait_sock_ops *twsk_prot; ++ union { ++ struct inet_hashinfo *hashinfo; ++ struct udp_table *udp_table; ++ struct raw_hashinfo *raw_hash; ++ struct smc_hashinfo *smc_hash; ++ } h; ++ struct module *owner; ++ char name[32]; ++ struct list_head node; ++ int (*diag_destroy)(struct sock *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++}; ++ ++struct request_sock; ++ ++struct request_sock_ops { ++ int family; ++ unsigned int obj_size; ++ struct kmem_cache *slab; ++ char *slab_name; ++ int (*rtx_syn_ack)(const struct sock *, struct request_sock *); ++ void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); ++ void (*send_reset)(const struct sock *, struct sk_buff *); ++ void (*destructor)(struct request_sock *); ++ void (*syn_ack_timeout)(const struct request_sock *); ++}; ++ ++struct timewait_sock_ops { ++ struct kmem_cache *twsk_slab; ++ char *twsk_slab_name; ++ unsigned int twsk_obj_size; ++ int (*twsk_unique)(struct sock *, struct sock *, void *); ++ void (*twsk_destructor)(struct sock *); ++}; ++ ++struct request_sock { ++ struct sock_common __req_common; ++ struct request_sock *dl_next; ++ u16 mss; ++ u8 num_retrans; ++ u8 cookie_ts: 1; ++ u8 num_timeout: 7; ++ u32 ts_recent; ++ struct timer_list rsk_timer; ++ const struct request_sock_ops *rsk_ops; ++ struct sock *sk; ++ u32 *saved_syn; ++ u32 secid; ++ u32 peer_secid; ++}; ++ ++enum tsq_enum { ++ TSQ_THROTTLED = 0, ++ TSQ_QUEUED = 1, ++ TCP_TSQ_DEFERRED = 2, ++ TCP_WRITE_TIMER_DEFERRED = 3, ++ TCP_DELACK_TIMER_DEFERRED = 4, ++ TCP_MTU_REDUCED_DEFERRED = 5, ++}; ++ ++struct ip6_sf_list { ++ struct ip6_sf_list *sf_next; ++ struct in6_addr sf_addr; ++ long unsigned int sf_count[2]; ++ unsigned char sf_gsresp; ++ unsigned char sf_oldin; ++ unsigned char sf_crcount; ++}; ++ ++struct ifmcaddr6 { ++ struct in6_addr mca_addr; ++ struct inet6_dev *idev; ++ struct ifmcaddr6 *next; ++ struct ip6_sf_list *mca_sources; ++ struct ip6_sf_list *mca_tomb; ++ unsigned int mca_sfmode; ++ unsigned char mca_crcount; ++ long unsigned int mca_sfcount[2]; ++ struct timer_list mca_timer; ++ unsigned int mca_flags; ++ int mca_users; ++ refcount_t mca_refcnt; ++ spinlock_t mca_lock; ++ long unsigned int mca_cstamp; ++ long unsigned int mca_tstamp; ++}; ++ ++struct ifacaddr6 { ++ struct in6_addr aca_addr; ++ struct fib6_info *aca_rt; ++ struct ifacaddr6 *aca_next; ++ int aca_users; ++ refcount_t aca_refcnt; ++ long unsigned int aca_cstamp; ++ long unsigned int aca_tstamp; ++}; ++ ++enum { ++ __ND_OPT_PREFIX_INFO_END = 0, ++ ND_OPT_SOURCE_LL_ADDR = 1, ++ ND_OPT_TARGET_LL_ADDR = 2, ++ ND_OPT_PREFIX_INFO = 3, ++ ND_OPT_REDIRECT_HDR = 4, ++ ND_OPT_MTU = 5, ++ ND_OPT_NONCE = 14, ++ __ND_OPT_ARRAY_MAX = 15, ++ ND_OPT_ROUTE_INFO = 24, ++ ND_OPT_RDNSS = 25, ++ ND_OPT_DNSSL = 31, ++ ND_OPT_6CO = 34, ++ __ND_OPT_MAX = 35, ++}; ++ ++struct nd_opt_hdr { ++ __u8 nd_opt_type; ++ __u8 nd_opt_len; ++}; ++ ++struct ndisc_options { ++ struct nd_opt_hdr *nd_opt_array[15]; ++ struct nd_opt_hdr *nd_opts_ri; ++ struct nd_opt_hdr *nd_opts_ri_end; ++ struct nd_opt_hdr *nd_useropts; ++ struct nd_opt_hdr *nd_useropts_end; ++}; ++ ++struct prefix_info { ++ __u8 type; ++ __u8 length; ++ __u8 prefix_len; ++ __u8 reserved: 6; ++ __u8 autoconf: 1; ++ __u8 onlink: 1; ++ __be32 valid; ++ __be32 prefered; ++ __be32 reserved2; ++ struct in6_addr prefix; ++}; ++ ++struct ip6_ra_chain { ++ struct ip6_ra_chain *next; ++ struct sock *sk; ++ int sel; ++ void (*destructor)(struct sock *); ++}; ++ ++struct nf_ipv6_ops { ++ int (*chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); ++ void (*route_input)(struct sk_buff *); ++ int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); ++ int (*route)(struct net *, struct dst_entry **, struct flowi *, bool); ++ int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); ++}; ++ ++struct nf_queue_entry { ++ struct list_head list; ++ struct sk_buff *skb; ++ unsigned int id; ++ unsigned int hook_index; ++ struct nf_hook_state state; ++ u16 size; ++}; ++ ++struct xt_table_info { ++ unsigned int size; ++ unsigned int number; ++ unsigned int initial_entries; ++ unsigned int hook_entry[5]; ++ unsigned int underflow[5]; ++ unsigned int stacksize; ++ void ***jumpstack; ++ unsigned char entries[0]; ++}; ++ ++struct ip_rt_acct { ++ __u32 o_bytes; ++ __u32 o_packets; ++ __u32 i_bytes; ++ __u32 i_packets; ++}; ++ ++struct rt6_exception_bucket { ++ struct hlist_head chain; ++ int depth; ++}; ++ ++struct ipv6_stub { ++ int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); ++ int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); ++ struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); ++ struct fib6_table * (*fib6_get_table)(struct net *, u32); ++ struct fib6_info * (*fib6_lookup)(struct net *, int, struct flowi6 *, int); ++ struct fib6_info * (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, int); ++ struct fib6_info * (*fib6_multipath_select)(const struct net *, struct fib6_info *, struct flowi6 *, int, const struct sk_buff *, int); ++ u32 (*ip6_mtu_from_fib6)(struct fib6_info *, struct in6_addr *, struct in6_addr *); ++ void (*udpv6_encap_enable)(); ++ void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); ++ struct neigh_table *nd_tbl; ++}; ++ ++struct ipv6_bpf_stub { ++ int (*inet6_bind)(struct sock *, struct sockaddr *, int, bool, bool); ++}; ++ ++struct ip_tunnel_encap { ++ u16 type; ++ u16 flags; ++ __be16 sport; ++ __be16 dport; ++}; ++ ++struct ip_tunnel_encap_ops { ++ size_t (*encap_hlen)(struct ip_tunnel_encap *); ++ int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); ++}; ++ ++enum grep_conntrack { ++ GRE_CT_UNREPLIED = 0, ++ GRE_CT_REPLIED = 1, ++ GRE_CT_MAX = 2, ++}; ++ ++struct nf_conntrack_l4proto; ++ ++struct nf_conntrack_l3proto; ++ ++struct ip_vs_iphdr { ++ int hdr_flags; ++ __u32 off; ++ __u32 len; ++ __u16 fragoffs; ++ __s16 protocol; ++ __s32 flags; ++ union nf_inet_addr saddr; ++ union nf_inet_addr daddr; ++}; ++ ++enum { ++ IP_VS_TCP_S_NONE = 0, ++ IP_VS_TCP_S_ESTABLISHED = 1, ++ IP_VS_TCP_S_SYN_SENT = 2, ++ IP_VS_TCP_S_SYN_RECV = 3, ++ IP_VS_TCP_S_FIN_WAIT = 4, ++ IP_VS_TCP_S_TIME_WAIT = 5, ++ IP_VS_TCP_S_CLOSE = 6, ++ IP_VS_TCP_S_CLOSE_WAIT = 7, ++ IP_VS_TCP_S_LAST_ACK = 8, ++ IP_VS_TCP_S_LISTEN = 9, ++ IP_VS_TCP_S_SYNACK = 10, ++ IP_VS_TCP_S_LAST = 11, ++}; ++ ++enum ip_vs_sctp_states { ++ IP_VS_SCTP_S_NONE = 0, ++ IP_VS_SCTP_S_INIT1 = 1, ++ IP_VS_SCTP_S_INIT = 2, ++ IP_VS_SCTP_S_COOKIE_SENT = 3, ++ IP_VS_SCTP_S_COOKIE_REPLIED = 4, ++ IP_VS_SCTP_S_COOKIE_WAIT = 5, ++ IP_VS_SCTP_S_COOKIE = 6, ++ IP_VS_SCTP_S_COOKIE_ECHOED = 7, ++ IP_VS_SCTP_S_ESTABLISHED = 8, ++ IP_VS_SCTP_S_SHUTDOWN_SENT = 9, ++ IP_VS_SCTP_S_SHUTDOWN_RECEIVED = 10, ++ IP_VS_SCTP_S_SHUTDOWN_ACK_SENT = 11, ++ IP_VS_SCTP_S_REJECTED = 12, ++ IP_VS_SCTP_S_CLOSED = 13, ++ IP_VS_SCTP_S_LAST = 14, ++}; ++ ++struct ip_vs_seq { ++ __u32 init_seq; ++ __u32 delta; ++ __u32 previous_delta; ++}; ++ ++struct ip_vs_counters { ++ __u64 conns; ++ __u64 inpkts; ++ __u64 outpkts; ++ __u64 inbytes; ++ __u64 outbytes; ++}; ++ ++struct ip_vs_cpu_stats { ++ struct ip_vs_counters cnt; ++ struct u64_stats_sync syncp; ++}; ++ ++struct ip_vs_conn; ++ ++struct ip_vs_app; ++ ++struct ip_vs_protocol { ++ struct ip_vs_protocol *next; ++ char *name; ++ u16 protocol; ++ u16 num_states; ++ int dont_defrag; ++ void (*init)(struct ip_vs_protocol *); ++ void (*exit)(struct ip_vs_protocol *); ++ int (*init_netns)(struct netns_ipvs *, struct ip_vs_proto_data *); ++ void (*exit_netns)(struct netns_ipvs *, struct ip_vs_proto_data *); ++ int (*conn_schedule)(struct netns_ipvs *, int, struct sk_buff *, struct ip_vs_proto_data *, int *, struct ip_vs_conn **, struct ip_vs_iphdr *); ++ struct ip_vs_conn * (*conn_in_get)(struct netns_ipvs *, int, const struct sk_buff *, const struct ip_vs_iphdr *); ++ struct ip_vs_conn * (*conn_out_get)(struct netns_ipvs *, int, const struct sk_buff *, const struct ip_vs_iphdr *); ++ int (*snat_handler)(struct sk_buff *, struct ip_vs_protocol *, struct ip_vs_conn *, struct ip_vs_iphdr *); ++ int (*dnat_handler)(struct sk_buff *, struct ip_vs_protocol *, struct ip_vs_conn *, struct ip_vs_iphdr *); ++ int (*csum_check)(int, struct sk_buff *, struct ip_vs_protocol *); ++ const char * (*state_name)(int); ++ void (*state_transition)(struct ip_vs_conn *, int, const struct sk_buff *, struct ip_vs_proto_data *); ++ int (*register_app)(struct netns_ipvs *, struct ip_vs_app *); ++ void (*unregister_app)(struct netns_ipvs *, struct ip_vs_app *); ++ int (*app_conn_bind)(struct ip_vs_conn *); ++ void (*debug_packet)(int, struct ip_vs_protocol *, const struct sk_buff *, int, const char *); ++ void (*timeout_change)(struct ip_vs_proto_data *, int); ++}; ++ ++struct tcp_states_t; ++ ++struct ip_vs_proto_data { ++ struct ip_vs_proto_data *next; ++ struct ip_vs_protocol *pp; ++ int *timeout_table; ++ atomic_t appcnt; ++ struct tcp_states_t *tcp_state_table; ++}; ++ ++struct ip_vs_dest; ++ ++struct ip_vs_pe; ++ ++struct ip_vs_conn { ++ struct hlist_node c_list; ++ __be16 cport; ++ __be16 dport; ++ __be16 vport; ++ u16 af; ++ union nf_inet_addr caddr; ++ union nf_inet_addr vaddr; ++ union nf_inet_addr daddr; ++ volatile __u32 flags; ++ __u16 protocol; ++ __u16 daf; ++ struct netns_ipvs *ipvs; ++ refcount_t refcnt; ++ struct timer_list timer; ++ volatile long unsigned int timeout; ++ spinlock_t lock; ++ volatile __u16 state; ++ volatile __u16 old_state; ++ __u32 fwmark; ++ long unsigned int sync_endtime; ++ struct ip_vs_conn *control; ++ atomic_t n_control; ++ struct ip_vs_dest *dest; ++ atomic_t in_pkts; ++ int (*packet_xmit)(struct sk_buff *, struct ip_vs_conn *, struct ip_vs_protocol *, struct ip_vs_iphdr *); ++ struct ip_vs_app *app; ++ void *app_data; ++ struct ip_vs_seq in_seq; ++ struct ip_vs_seq out_seq; ++ const struct ip_vs_pe *pe; ++ char *pe_data; ++ __u8 pe_data_len; ++ struct callback_head callback_head; ++}; ++ ++struct ip_vs_app { ++ struct list_head a_list; ++ int type; ++ char *name; ++ __u16 protocol; ++ struct module *module; ++ struct list_head incs_list; ++ struct list_head p_list; ++ struct ip_vs_app *app; ++ __be16 port; ++ atomic_t usecnt; ++ struct callback_head callback_head; ++ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *, struct ip_vs_iphdr *); ++ int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *, struct ip_vs_iphdr *); ++ int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *); ++ int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *); ++ int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, struct ip_vs_protocol *); ++ void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *); ++ int *timeout_table; ++ int *timeouts; ++ int timeouts_size; ++ int (*conn_schedule)(struct sk_buff *, struct ip_vs_app *, int *, struct ip_vs_conn **); ++ struct ip_vs_conn * (*conn_in_get)(const struct sk_buff *, struct ip_vs_app *, const struct iphdr *, int); ++ struct ip_vs_conn * (*conn_out_get)(const struct sk_buff *, struct ip_vs_app *, const struct iphdr *, int); ++ int (*state_transition)(struct ip_vs_conn *, int, const struct sk_buff *, struct ip_vs_app *); ++ void (*timeout_change)(struct ip_vs_app *, int); ++}; ++ ++struct ip_vs_conn_param { ++ struct netns_ipvs *ipvs; ++ const union nf_inet_addr *caddr; ++ const union nf_inet_addr *vaddr; ++ __be16 cport; ++ __be16 vport; ++ __u16 protocol; ++ u16 af; ++ const struct ip_vs_pe *pe; ++ char *pe_data; ++ __u8 pe_data_len; ++}; ++ ++struct ip_vs_service; ++ ++struct ip_vs_pe { ++ struct list_head n_list; ++ char *name; ++ atomic_t refcnt; ++ struct module *module; ++ int (*fill_param)(struct ip_vs_conn_param *, struct sk_buff *); ++ bool (*ct_match)(const struct ip_vs_conn_param *, struct ip_vs_conn *); ++ u32 (*hashkey_raw)(const struct ip_vs_conn_param *, u32, bool); ++ int (*show_pe_data)(const struct ip_vs_conn *, char *); ++ struct ip_vs_conn * (*conn_out)(struct ip_vs_service *, struct ip_vs_dest *, struct sk_buff *, const struct ip_vs_iphdr *, __be16, __be16); ++}; ++ ++struct ip_vs_dest_dst; ++ ++struct ip_vs_dest { ++ struct list_head n_list; ++ struct hlist_node d_list; ++ u16 af; ++ __be16 port; ++ union nf_inet_addr addr; ++ volatile unsigned int flags; ++ atomic_t conn_flags; ++ atomic_t weight; ++ atomic_t last_weight; ++ refcount_t refcnt; ++ struct ip_vs_stats stats; ++ long unsigned int idle_start; ++ atomic_t activeconns; ++ atomic_t inactconns; ++ atomic_t persistconns; ++ __u32 u_threshold; ++ __u32 l_threshold; ++ spinlock_t dst_lock; ++ struct ip_vs_dest_dst *dest_dst; ++ struct ip_vs_service *svc; ++ __u16 protocol; ++ __be16 vport; ++ union nf_inet_addr vaddr; ++ __u32 vfwmark; ++ struct list_head t_list; ++ unsigned int in_rs_table: 1; ++}; ++ ++struct ip_vs_scheduler; ++ ++struct ip_vs_service { ++ struct hlist_node s_list; ++ struct hlist_node f_list; ++ atomic_t refcnt; ++ u16 af; ++ __u16 protocol; ++ union nf_inet_addr addr; ++ __be16 port; ++ __u32 fwmark; ++ unsigned int flags; ++ unsigned int timeout; ++ __be32 netmask; ++ struct netns_ipvs *ipvs; ++ struct list_head destinations; ++ __u32 num_dests; ++ struct ip_vs_stats stats; ++ struct ip_vs_scheduler *scheduler; ++ spinlock_t sched_lock; ++ void *sched_data; ++ struct ip_vs_pe *pe; ++ int conntrack_afmask; ++ struct callback_head callback_head; ++}; ++ ++struct ip_vs_scheduler { ++ struct list_head n_list; ++ char *name; ++ atomic_t refcnt; ++ struct module *module; ++ int (*init_service)(struct ip_vs_service *); ++ void (*done_service)(struct ip_vs_service *); ++ int (*add_dest)(struct ip_vs_service *, struct ip_vs_dest *); ++ int (*del_dest)(struct ip_vs_service *, struct ip_vs_dest *); ++ int (*upd_dest)(struct ip_vs_service *, struct ip_vs_dest *); ++ struct ip_vs_dest * (*schedule)(struct ip_vs_service *, const struct sk_buff *, struct ip_vs_iphdr *); ++}; ++ ++struct ip_vs_dest_dst { ++ struct dst_entry *dst_cache; ++ u32 dst_cookie; ++ union nf_inet_addr dst_saddr; ++ struct callback_head callback_head; ++}; ++ ++struct ip_vs_sync_buff; ++ ++struct ipvs_master_sync_state { ++ struct list_head sync_queue; ++ struct ip_vs_sync_buff *sync_buff; ++ long unsigned int sync_queue_len; ++ unsigned int sync_queue_delay; ++ struct delayed_work master_wakeup_work; ++ struct netns_ipvs *ipvs; ++}; ++ ++struct ip_vs_aligned_lock { ++ spinlock_t l; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct ip_vs_iter_state { ++ struct seq_net_private p; ++ struct hlist_head *l; ++}; ++ ++enum net_device_flags { ++ IFF_UP = 1, ++ IFF_BROADCAST = 2, ++ IFF_DEBUG = 4, ++ IFF_LOOPBACK = 8, ++ IFF_POINTOPOINT = 16, ++ IFF_NOTRAILERS = 32, ++ IFF_RUNNING = 64, ++ IFF_NOARP = 128, ++ IFF_PROMISC = 256, ++ IFF_ALLMULTI = 512, ++ IFF_MASTER = 1024, ++ IFF_SLAVE = 2048, ++ IFF_MULTICAST = 4096, ++ IFF_PORTSEL = 8192, ++ IFF_AUTOMEDIA = 16384, ++ IFF_DYNAMIC = 32768, ++ IFF_LOWER_UP = 65536, ++ IFF_DORMANT = 131072, ++ IFF_ECHO = 262144, ++}; ++ ++struct inet_listen_hashbucket { ++ spinlock_t lock; ++ unsigned int count; ++ union { ++ struct hlist_head head; ++ struct hlist_nulls_head nulls_head; ++ }; ++}; ++ ++struct inet_ehash_bucket; ++ ++struct inet_bind_hashbucket; ++ ++struct inet_hashinfo { ++ struct inet_ehash_bucket *ehash; ++ spinlock_t *ehash_locks; ++ unsigned int ehash_mask; ++ unsigned int ehash_locks_mask; ++ struct kmem_cache *bind_bucket_cachep; ++ struct inet_bind_hashbucket *bhash; ++ unsigned int bhash_size; ++ unsigned int lhash2_mask; ++ struct inet_listen_hashbucket *lhash2; ++ long: 64; ++ struct inet_listen_hashbucket listening_hash[32]; ++}; ++ ++struct ip_ra_chain { ++ struct ip_ra_chain *next; ++ struct sock *sk; ++ union { ++ void (*destructor)(struct sock *); ++ struct sock *saved_sk; ++ }; ++ struct callback_head rcu; ++}; ++ ++struct crypto_cipher; ++ ++struct tcp_fastopen_context { ++ struct crypto_cipher *tfm; ++ __u8 key[16]; ++ struct callback_head rcu; ++}; ++ ++enum { ++ NFPROTO_UNSPEC = 0, ++ NFPROTO_INET = 1, ++ NFPROTO_IPV4 = 2, ++ NFPROTO_ARP = 3, ++ NFPROTO_NETDEV = 5, ++ NFPROTO_BRIDGE = 7, ++ NFPROTO_IPV6 = 10, ++ NFPROTO_DECNET = 12, ++ NFPROTO_NUMPROTO = 13, ++}; ++ ++enum ip_conntrack_status { ++ IPS_EXPECTED_BIT = 0, ++ IPS_EXPECTED = 1, ++ IPS_SEEN_REPLY_BIT = 1, ++ IPS_SEEN_REPLY = 2, ++ IPS_ASSURED_BIT = 2, ++ IPS_ASSURED = 4, ++ IPS_CONFIRMED_BIT = 3, ++ IPS_CONFIRMED = 8, ++ IPS_SRC_NAT_BIT = 4, ++ IPS_SRC_NAT = 16, ++ IPS_DST_NAT_BIT = 5, ++ IPS_DST_NAT = 32, ++ IPS_NAT_MASK = 48, ++ IPS_SEQ_ADJUST_BIT = 6, ++ IPS_SEQ_ADJUST = 64, ++ IPS_SRC_NAT_DONE_BIT = 7, ++ IPS_SRC_NAT_DONE = 128, ++ IPS_DST_NAT_DONE_BIT = 8, ++ IPS_DST_NAT_DONE = 256, ++ IPS_NAT_DONE_MASK = 384, ++ IPS_DYING_BIT = 9, ++ IPS_DYING = 512, ++ IPS_FIXED_TIMEOUT_BIT = 10, ++ IPS_FIXED_TIMEOUT = 1024, ++ IPS_TEMPLATE_BIT = 11, ++ IPS_TEMPLATE = 2048, ++ IPS_UNTRACKED_BIT = 12, ++ IPS_UNTRACKED = 4096, ++ IPS_HELPER_BIT = 13, ++ IPS_HELPER = 8192, ++ IPS_OFFLOAD_BIT = 14, ++ IPS_OFFLOAD = 16384, ++ IPS_UNCHANGEABLE_MASK = 19449, ++ __IPS_MAX_BIT = 15, ++}; ++ ++enum { ++ RTN_UNSPEC = 0, ++ RTN_UNICAST = 1, ++ RTN_LOCAL = 2, ++ RTN_BROADCAST = 3, ++ RTN_ANYCAST = 4, ++ RTN_MULTICAST = 5, ++ RTN_BLACKHOLE = 6, ++ RTN_UNREACHABLE = 7, ++ RTN_PROHIBIT = 8, ++ RTN_THROW = 9, ++ RTN_NAT = 10, ++ RTN_XRESOLVE = 11, ++ __RTN_MAX = 12, ++}; ++ ++struct sock_reuseport { ++ struct callback_head rcu; ++ u16 max_socks; ++ u16 num_socks; ++ unsigned int synq_overflow_ts; ++ unsigned int reuseport_id; ++ unsigned int bind_inany: 1; ++ unsigned int has_conns: 1; ++ struct bpf_prog *prog; ++ struct sock *socks[0]; ++}; ++ ++struct udp_hslot; ++ ++struct udp_table { ++ struct udp_hslot *hash; ++ struct udp_hslot *hash2; ++ unsigned int mask; ++ unsigned int log; ++}; ++ ++struct ip_options { ++ __be32 faddr; ++ __be32 nexthop; ++ unsigned char optlen; ++ unsigned char srr; ++ unsigned char rr; ++ unsigned char ts; ++ unsigned char is_strictroute: 1; ++ unsigned char srr_is_hit: 1; ++ unsigned char is_changed: 1; ++ unsigned char rr_needaddr: 1; ++ unsigned char ts_needtime: 1; ++ unsigned char ts_needaddr: 1; ++ unsigned char router_alert; ++ unsigned char cipso; ++ unsigned char __pad2; ++ unsigned char __data[0]; ++}; ++ ++struct ip_options_rcu { ++ struct callback_head rcu; ++ struct ip_options opt; ++}; ++ ++struct ipv6_opt_hdr; ++ ++struct ipv6_rt_hdr; ++ ++struct ipv6_txoptions { ++ refcount_t refcnt; ++ int tot_len; ++ __u16 opt_flen; ++ __u16 opt_nflen; ++ struct ipv6_opt_hdr *hopopt; ++ struct ipv6_opt_hdr *dst0opt; ++ struct ipv6_rt_hdr *srcrt; ++ struct ipv6_opt_hdr *dst1opt; ++ struct callback_head rcu; ++}; ++ ++struct inet_cork { ++ unsigned int flags; ++ __be32 addr; ++ struct ip_options *opt; ++ unsigned int fragsize; ++ int length; ++ struct dst_entry *dst; ++ u8 tx_flags; ++ __u8 ttl; ++ __s16 tos; ++ char priority; ++ __u16 gso_size; ++ u64 transmit_time; ++}; ++ ++struct inet_cork_full { ++ struct inet_cork base; ++ struct flowi fl; ++}; ++ ++struct ipv6_pinfo; ++ ++struct ip_mc_socklist; ++ ++struct inet_sock { ++ struct sock sk; ++ struct ipv6_pinfo *pinet6; ++ __be32 inet_saddr; ++ __s16 uc_ttl; ++ __u16 cmsg_flags; ++ __be16 inet_sport; ++ __u16 inet_id; ++ struct ip_options_rcu *inet_opt; ++ int rx_dst_ifindex; ++ __u8 tos; ++ __u8 min_ttl; ++ __u8 mc_ttl; ++ __u8 pmtudisc; ++ __u8 recverr: 1; ++ __u8 is_icsk: 1; ++ __u8 freebind: 1; ++ __u8 hdrincl: 1; ++ __u8 mc_loop: 1; ++ __u8 transparent: 1; ++ __u8 mc_all: 1; ++ __u8 nodefrag: 1; ++ __u8 bind_address_no_port: 1; ++ __u8 defer_connect: 1; ++ __u8 rcv_tos; ++ __u8 convert_csum; ++ int uc_index; ++ int mc_index; ++ __be32 mc_addr; ++ struct ip_mc_socklist *mc_list; ++ struct inet_cork_full cork; ++}; ++ ++struct in6_pktinfo { ++ struct in6_addr ipi6_addr; ++ int ipi6_ifindex; ++}; ++ ++struct inet6_cork { ++ struct ipv6_txoptions *opt; ++ u8 hop_limit; ++ u8 tclass; ++}; ++ ++struct ipv6_mc_socklist; ++ ++struct ipv6_ac_socklist; ++ ++struct ipv6_fl_socklist; ++ ++struct ipv6_pinfo { ++ struct in6_addr saddr; ++ struct in6_pktinfo sticky_pktinfo; ++ const struct in6_addr *daddr_cache; ++ __be32 flow_label; ++ __u32 frag_size; ++ __u16 __unused_1: 7; ++ __s16 hop_limit: 9; ++ __u16 mc_loop: 1; ++ __u16 __unused_2: 6; ++ __s16 mcast_hops: 9; ++ int ucast_oif; ++ int mcast_oif; ++ union { ++ struct { ++ __u16 srcrt: 1; ++ __u16 osrcrt: 1; ++ __u16 rxinfo: 1; ++ __u16 rxoinfo: 1; ++ __u16 rxhlim: 1; ++ __u16 rxohlim: 1; ++ __u16 hopopts: 1; ++ __u16 ohopopts: 1; ++ __u16 dstopts: 1; ++ __u16 odstopts: 1; ++ __u16 rxflow: 1; ++ __u16 rxtclass: 1; ++ __u16 rxpmtu: 1; ++ __u16 rxorigdstaddr: 1; ++ __u16 recvfragsize: 1; ++ } bits; ++ __u16 all; ++ } rxopt; ++ __u16 recverr: 1; ++ __u16 sndflow: 1; ++ __u16 repflow: 1; ++ __u16 pmtudisc: 3; ++ __u16 padding: 1; ++ __u16 srcprefs: 3; ++ __u16 dontfrag: 1; ++ __u16 autoflowlabel: 1; ++ __u16 autoflowlabel_set: 1; ++ __u8 min_hopcount; ++ __u8 tclass; ++ __be32 rcv_flowinfo; ++ __u32 dst_cookie; ++ __u32 rx_dst_cookie; ++ struct ipv6_mc_socklist *ipv6_mc_list; ++ struct ipv6_ac_socklist *ipv6_ac_list; ++ struct ipv6_fl_socklist *ipv6_fl_list; ++ struct ipv6_txoptions *opt; ++ struct sk_buff *pktoptions; ++ struct sk_buff *rxpmtu; ++ struct inet6_cork cork; ++}; ++ ++struct tcphdr { ++ __be16 source; ++ __be16 dest; ++ __be32 seq; ++ __be32 ack_seq; ++ __u16 res1: 4; ++ __u16 doff: 4; ++ __u16 fin: 1; ++ __u16 syn: 1; ++ __u16 rst: 1; ++ __u16 psh: 1; ++ __u16 ack: 1; ++ __u16 urg: 1; ++ __u16 ece: 1; ++ __u16 cwr: 1; ++ __be16 window; ++ __sum16 check; ++ __be16 urg_ptr; ++}; ++ ++enum sctp_msg_flags { ++ MSG_NOTIFICATION = 32768, ++}; ++ ++struct sctp_chunkhdr { ++ __u8 type; ++ __u8 flags; ++ __be16 length; ++}; ++ ++enum sctp_cid { ++ SCTP_CID_DATA = 0, ++ SCTP_CID_INIT = 1, ++ SCTP_CID_INIT_ACK = 2, ++ SCTP_CID_SACK = 3, ++ SCTP_CID_HEARTBEAT = 4, ++ SCTP_CID_HEARTBEAT_ACK = 5, ++ SCTP_CID_ABORT = 6, ++ SCTP_CID_SHUTDOWN = 7, ++ SCTP_CID_SHUTDOWN_ACK = 8, ++ SCTP_CID_ERROR = 9, ++ SCTP_CID_COOKIE_ECHO = 10, ++ SCTP_CID_COOKIE_ACK = 11, ++ SCTP_CID_ECN_ECNE = 12, ++ SCTP_CID_ECN_CWR = 13, ++ SCTP_CID_SHUTDOWN_COMPLETE = 14, ++ SCTP_CID_AUTH = 15, ++ SCTP_CID_I_DATA = 64, ++ SCTP_CID_FWD_TSN = 192, ++ SCTP_CID_ASCONF = 193, ++ SCTP_CID_I_FWD_TSN = 194, ++ SCTP_CID_ASCONF_ACK = 128, ++ SCTP_CID_RECONF = 130, ++}; ++ ++struct icmphdr { ++ __u8 type; ++ __u8 code; ++ __sum16 checksum; ++ union { ++ struct { ++ __be16 id; ++ __be16 sequence; ++ } echo; ++ __be32 gateway; ++ struct { ++ __be16 __unused; ++ __be16 mtu; ++ } frag; ++ __u8 reserved[4]; ++ } un; ++}; ++ ++struct ipv6_rt_hdr { ++ __u8 nexthdr; ++ __u8 hdrlen; ++ __u8 type; ++ __u8 segments_left; ++}; ++ ++struct ipv6_opt_hdr { ++ __u8 nexthdr; ++ __u8 hdrlen; ++}; ++ ++struct ipv6hdr { ++ __u8 priority: 4; ++ __u8 version: 4; ++ __u8 flow_lbl[3]; ++ __be16 payload_len; ++ __u8 nexthdr; ++ __u8 hop_limit; ++ struct in6_addr saddr; ++ struct in6_addr daddr; ++}; ++ ++struct icmpv6_echo { ++ __be16 identifier; ++ __be16 sequence; ++}; ++ ++struct icmpv6_nd_advt { ++ __u32 reserved: 5; ++ __u32 override: 1; ++ __u32 solicited: 1; ++ __u32 router: 1; ++ __u32 reserved2: 24; ++}; ++ ++struct icmpv6_nd_ra { ++ __u8 hop_limit; ++ __u8 reserved: 3; ++ __u8 router_pref: 2; ++ __u8 home_agent: 1; ++ __u8 other: 1; ++ __u8 managed: 1; ++ __be16 rt_lifetime; ++}; ++ ++struct icmp6hdr { ++ __u8 icmp6_type; ++ __u8 icmp6_code; ++ __sum16 icmp6_cksum; ++ union { ++ __be32 un_data32[1]; ++ __be16 un_data16[2]; ++ __u8 un_data8[4]; ++ struct icmpv6_echo u_echo; ++ struct icmpv6_nd_advt u_nd_advt; ++ struct icmpv6_nd_ra u_nd_ra; ++ } icmp6_dataun; ++}; ++ ++struct ip6_sf_socklist; ++ ++struct ipv6_mc_socklist { ++ struct in6_addr addr; ++ int ifindex; ++ struct ipv6_mc_socklist *next; ++ rwlock_t sflock; ++ unsigned int sfmode; ++ struct ip6_sf_socklist *sflist; ++ struct callback_head rcu; ++}; ++ ++struct ipv6_ac_socklist { ++ struct in6_addr acl_addr; ++ int acl_ifindex; ++ struct ipv6_ac_socklist *acl_next; ++}; ++ ++struct ip6_flowlabel; ++ ++struct ipv6_fl_socklist { ++ struct ipv6_fl_socklist *next; ++ struct ip6_flowlabel *fl; ++ struct callback_head rcu; ++}; ++ ++struct ip6_sf_socklist { ++ unsigned int sl_max; ++ unsigned int sl_count; ++ struct in6_addr sl_addr[0]; ++}; ++ ++struct ip6_flowlabel { ++ struct ip6_flowlabel *next; ++ __be32 label; ++ atomic_t users; ++ struct in6_addr dst; ++ struct ipv6_txoptions *opt; ++ long unsigned int linger; ++ struct callback_head rcu; ++ u8 share; ++ union { ++ struct pid *pid; ++ kuid_t uid; ++ } owner; ++ long unsigned int lastuse; ++ long unsigned int expires; ++ struct net *fl_net; ++}; ++ ++enum { ++ IP6_FH_F_FRAG = 1, ++ IP6_FH_F_AUTH = 2, ++ IP6_FH_F_SKIP_RH = 4, ++}; ++ ++struct rtable { ++ struct dst_entry dst; ++ int rt_genid; ++ unsigned int rt_flags; ++ __u16 rt_type; ++ __u8 rt_is_input; ++ __u8 rt_uses_gateway; ++ int rt_iif; ++ __be32 rt_gateway; ++ u32 rt_mtu_locked: 1; ++ u32 rt_pmtu: 31; ++ struct list_head rt_uncached; ++ struct uncached_list *rt_uncached_list; ++}; ++ ++struct inet_skb_parm { ++ int iif; ++ struct ip_options opt; ++ u16 flags; ++ u16 frag_max_size; ++}; ++ ++enum ip_defrag_users { ++ IP_DEFRAG_LOCAL_DELIVER = 0, ++ IP_DEFRAG_CALL_RA_CHAIN = 1, ++ IP_DEFRAG_CONNTRACK_IN = 2, ++ __IP_DEFRAG_CONNTRACK_IN_END = 65537, ++ IP_DEFRAG_CONNTRACK_OUT = 65538, ++ __IP_DEFRAG_CONNTRACK_OUT_END = 131073, ++ IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, ++ __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, ++ IP_DEFRAG_VS_IN = 196610, ++ IP_DEFRAG_VS_OUT = 196611, ++ IP_DEFRAG_VS_FWD = 196612, ++ IP_DEFRAG_AF_PACKET = 196613, ++ IP_DEFRAG_MACVLAN = 196614, ++}; ++ ++struct inet_ehash_bucket { ++ struct hlist_nulls_head chain; ++}; ++ ++struct inet_bind_hashbucket { ++ spinlock_t lock; ++ struct hlist_head chain; ++}; ++ ++struct ack_sample { ++ u32 pkts_acked; ++ s32 rtt_us; ++ u32 in_flight; ++}; ++ ++struct rate_sample { ++ u64 prior_mstamp; ++ u32 prior_delivered; ++ s32 delivered; ++ long int interval_us; ++ u32 snd_interval_us; ++ u32 rcv_interval_us; ++ long int rtt_us; ++ int losses; ++ u32 acked_sacked; ++ u32 prior_in_flight; ++ bool is_app_limited; ++ bool is_retrans; ++ bool is_ack_delayed; ++}; ++ ++struct udp_hslot { ++ struct hlist_head head; ++ int count; ++ spinlock_t lock; ++}; ++ ++struct icmp_err { ++ int errno; ++ unsigned int fatal: 1; ++}; ++ ++struct nf_hook_ops { ++ nf_hookfn *hook; ++ struct net_device *dev; ++ void *priv; ++ u_int8_t pf; ++ unsigned int hooknum; ++ int priority; ++}; ++ ++enum nf_ip_hook_priorities { ++ NF_IP_PRI_FIRST = 2147483648, ++ NF_IP_PRI_RAW_BEFORE_DEFRAG = 4294966846, ++ NF_IP_PRI_CONNTRACK_DEFRAG = 4294966896, ++ NF_IP_PRI_RAW = 4294966996, ++ NF_IP_PRI_SELINUX_FIRST = 4294967071, ++ NF_IP_PRI_CONNTRACK = 4294967096, ++ NF_IP_PRI_MANGLE = 4294967146, ++ NF_IP_PRI_NAT_DST = 4294967196, ++ NF_IP_PRI_FILTER = 0, ++ NF_IP_PRI_SECURITY = 50, ++ NF_IP_PRI_NAT_SRC = 100, ++ NF_IP_PRI_SELINUX_LAST = 225, ++ NF_IP_PRI_CONNTRACK_HELPER = 300, ++ NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, ++ NF_IP_PRI_LAST = 2147483647, ++}; ++ ++enum nf_ip6_hook_priorities { ++ NF_IP6_PRI_FIRST = 2147483648, ++ NF_IP6_PRI_RAW_BEFORE_DEFRAG = 4294966846, ++ NF_IP6_PRI_CONNTRACK_DEFRAG = 4294966896, ++ NF_IP6_PRI_RAW = 4294966996, ++ NF_IP6_PRI_SELINUX_FIRST = 4294967071, ++ NF_IP6_PRI_CONNTRACK = 4294967096, ++ NF_IP6_PRI_MANGLE = 4294967146, ++ NF_IP6_PRI_NAT_DST = 4294967196, ++ NF_IP6_PRI_FILTER = 0, ++ NF_IP6_PRI_SECURITY = 50, ++ NF_IP6_PRI_NAT_SRC = 100, ++ NF_IP6_PRI_SELINUX_LAST = 225, ++ NF_IP6_PRI_CONNTRACK_HELPER = 300, ++ NF_IP6_PRI_LAST = 2147483647, ++}; ++ ++enum { ++ IP_VS_DIR_INPUT = 0, ++ IP_VS_DIR_OUTPUT = 1, ++ IP_VS_DIR_INPUT_ONLY = 2, ++ IP_VS_DIR_LAST = 3, ++}; ++ ++struct sysinfo { ++ __kernel_long_t uptime; ++ __kernel_ulong_t loads[3]; ++ __kernel_ulong_t totalram; ++ __kernel_ulong_t freeram; ++ __kernel_ulong_t sharedram; ++ __kernel_ulong_t bufferram; ++ __kernel_ulong_t totalswap; ++ __kernel_ulong_t freeswap; ++ __u16 procs; ++ __u16 pad; ++ __kernel_ulong_t totalhigh; ++ __kernel_ulong_t freehigh; ++ __u32 mem_unit; ++ char _f[0]; ++}; ++ ++enum { ++ WORK_STRUCT_PENDING_BIT = 0, ++ WORK_STRUCT_DELAYED_BIT = 1, ++ WORK_STRUCT_PWQ_BIT = 2, ++ WORK_STRUCT_LINKED_BIT = 3, ++ WORK_STRUCT_COLOR_SHIFT = 4, ++ WORK_STRUCT_COLOR_BITS = 4, ++ WORK_STRUCT_PENDING = 1, ++ WORK_STRUCT_DELAYED = 2, ++ WORK_STRUCT_PWQ = 4, ++ WORK_STRUCT_LINKED = 8, ++ WORK_STRUCT_STATIC = 0, ++ WORK_NR_COLORS = 15, ++ WORK_NO_COLOR = 15, ++ WORK_CPU_UNBOUND = 1024, ++ WORK_STRUCT_FLAG_BITS = 8, ++ WORK_OFFQ_FLAG_BASE = 4, ++ __WORK_OFFQ_CANCELING = 4, ++ WORK_OFFQ_CANCELING = 16, ++ WORK_OFFQ_FLAG_BITS = 1, ++ WORK_OFFQ_POOL_SHIFT = 5, ++ WORK_OFFQ_LEFT = 59, ++ WORK_OFFQ_POOL_BITS = 31, ++ WORK_OFFQ_POOL_NONE = 2147483647, ++ WORK_STRUCT_FLAG_MASK = 255, ++ WORK_STRUCT_WQ_DATA_MASK = 4294967040, ++ WORK_STRUCT_NO_POOL = 4294967264, ++ WORK_BUSY_PENDING = 1, ++ WORK_BUSY_RUNNING = 2, ++ WORK_FLUSH_FROM_CANCEL = 1, ++ WORK_FLUSH_AT_NICE = 2, ++ WORKER_DESC_LEN = 24, ++}; ++ ++struct reclaim_state { ++ long unsigned int reclaimed_slab; ++}; ++ ++struct swap_cluster_info { ++ spinlock_t lock; ++ unsigned int data: 24; ++ unsigned int flags: 8; ++}; ++ ++struct swap_cluster_list { ++ struct swap_cluster_info head; ++ struct swap_cluster_info tail; ++}; ++ ++struct swap_extent { ++ struct list_head list; ++ long unsigned int start_page; ++ long unsigned int nr_pages; ++ sector_t start_block; ++}; ++ ++struct percpu_cluster; ++ ++struct swap_info_struct { ++ long unsigned int flags; ++ short int prio; ++ struct plist_node list; ++ signed char type; ++ unsigned int max; ++ unsigned char *swap_map; ++ struct swap_cluster_info *cluster_info; ++ struct swap_cluster_list free_clusters; ++ unsigned int lowest_bit; ++ unsigned int highest_bit; ++ unsigned int pages; ++ unsigned int inuse_pages; ++ unsigned int cluster_next; ++ unsigned int cluster_nr; ++ struct percpu_cluster *percpu_cluster; ++ struct swap_extent *curr_swap_extent; ++ struct swap_extent first_swap_extent; ++ struct block_device *bdev; ++ struct file *swap_file; ++ unsigned int old_block_size; ++ long unsigned int *frontswap_map; ++ atomic_t frontswap_pages; ++ spinlock_t lock; ++ spinlock_t cont_lock; ++ struct work_struct discard_work; ++ struct swap_cluster_list discard_clusters; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ struct plist_node avail_lists[0]; ++}; ++ ++struct node { ++ struct device dev; ++ struct work_struct node_work; ++}; ++ ++struct percpu_cluster { ++ struct swap_cluster_info index; ++ unsigned int next; ++}; ++ ++struct scm_creds { ++ u32 pid; ++ kuid_t uid; ++ kgid_t gid; ++}; ++ ++struct netlink_skb_parms { ++ struct scm_creds creds; ++ __u32 portid; ++ __u32 dst_group; ++ __u32 flags; ++ struct sock *sk; ++ bool nsid_is_set; ++ int nsid; ++}; ++ ++enum netdev_cmd { ++ NETDEV_UP = 1, ++ NETDEV_DOWN = 2, ++ NETDEV_REBOOT = 3, ++ NETDEV_CHANGE = 4, ++ NETDEV_REGISTER = 5, ++ NETDEV_UNREGISTER = 6, ++ NETDEV_CHANGEMTU = 7, ++ NETDEV_CHANGEADDR = 8, ++ NETDEV_GOING_DOWN = 9, ++ NETDEV_CHANGENAME = 10, ++ NETDEV_FEAT_CHANGE = 11, ++ NETDEV_BONDING_FAILOVER = 12, ++ NETDEV_PRE_UP = 13, ++ NETDEV_PRE_TYPE_CHANGE = 14, ++ NETDEV_POST_TYPE_CHANGE = 15, ++ NETDEV_POST_INIT = 16, ++ NETDEV_RELEASE = 17, ++ NETDEV_NOTIFY_PEERS = 18, ++ NETDEV_JOIN = 19, ++ NETDEV_CHANGEUPPER = 20, ++ NETDEV_RESEND_IGMP = 21, ++ NETDEV_PRECHANGEMTU = 22, ++ NETDEV_CHANGEINFODATA = 23, ++ NETDEV_BONDING_INFO = 24, ++ NETDEV_PRECHANGEUPPER = 25, ++ NETDEV_CHANGELOWERSTATE = 26, ++ NETDEV_UDP_TUNNEL_PUSH_INFO = 27, ++ NETDEV_UDP_TUNNEL_DROP_INFO = 28, ++ NETDEV_CHANGE_TX_QUEUE_LEN = 29, ++ NETDEV_CVLAN_FILTER_PUSH_INFO = 30, ++ NETDEV_CVLAN_FILTER_DROP_INFO = 31, ++ NETDEV_SVLAN_FILTER_PUSH_INFO = 32, ++ NETDEV_SVLAN_FILTER_DROP_INFO = 33, ++}; ++ ++struct netdev_notifier_info { ++ struct net_device *dev; ++ struct netlink_ext_ack *extack; ++}; ++ ++struct nf_sockopt_ops { ++ struct list_head list; ++ u_int8_t pf; ++ int set_optmin; ++ int set_optmax; ++ int (*set)(struct sock *, int, void *, unsigned int); ++ int (*compat_set)(struct sock *, int, void *, unsigned int); ++ int get_optmin; ++ int get_optmax; ++ int (*get)(struct sock *, int, void *, int *); ++ int (*compat_get)(struct sock *, int, void *, int *); ++ struct module *owner; ++}; ++ ++enum { ++ NLA_UNSPEC = 0, ++ NLA_U8 = 1, ++ NLA_U16 = 2, ++ NLA_U32 = 3, ++ NLA_U64 = 4, ++ NLA_STRING = 5, ++ NLA_FLAG = 6, ++ NLA_MSECS = 7, ++ NLA_NESTED = 8, ++ NLA_NESTED_COMPAT = 9, ++ NLA_NUL_STRING = 10, ++ NLA_BINARY = 11, ++ NLA_S8 = 12, ++ NLA_S16 = 13, ++ NLA_S32 = 14, ++ NLA_S64 = 15, ++ NLA_BITFIELD32 = 16, ++ __NLA_TYPE_MAX = 17, ++}; ++ ++struct genlmsghdr { ++ __u8 cmd; ++ __u8 version; ++ __u16 reserved; ++}; ++ ++struct genl_multicast_group { ++ char name[16]; ++}; ++ ++struct genl_ops; ++ ++struct genl_info; ++ ++struct genl_family { ++ int id; ++ unsigned int hdrsize; ++ char name[16]; ++ unsigned int version; ++ unsigned int maxattr; ++ bool netnsok; ++ bool parallel_ops; ++ int (*pre_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *); ++ void (*post_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *); ++ int (*mcast_bind)(struct net *, int); ++ void (*mcast_unbind)(struct net *, int); ++ struct nlattr **attrbuf; ++ const struct genl_ops *ops; ++ const struct genl_multicast_group *mcgrps; ++ unsigned int n_ops; ++ unsigned int n_mcgrps; ++ unsigned int mcgrp_offset; ++ struct module *module; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct genl_ops { ++ const struct nla_policy *policy; ++ int (*doit)(struct sk_buff *, struct genl_info *); ++ int (*start)(struct netlink_callback *); ++ int (*dumpit)(struct sk_buff *, struct netlink_callback *); ++ int (*done)(struct netlink_callback *); ++ u8 cmd; ++ u8 internal_flags; ++ u8 flags; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct genl_info { ++ u32 snd_seq; ++ u32 snd_portid; ++ struct nlmsghdr *nlhdr; ++ struct genlmsghdr *genlhdr; ++ void *userhdr; ++ struct nlattr **attrs; ++ possible_net_t _net; ++ void *user_ptr[2]; ++ struct netlink_ext_ack *extack; ++}; ++ ++struct ip_vs_service_user { ++ __u16 protocol; ++ __be32 addr; ++ __be16 port; ++ __u32 fwmark; ++ char sched_name[16]; ++ unsigned int flags; ++ unsigned int timeout; ++ __be32 netmask; ++}; ++ ++struct ip_vs_dest_user { ++ __be32 addr; ++ __be16 port; ++ unsigned int conn_flags; ++ int weight; ++ __u32 u_threshold; ++ __u32 l_threshold; ++}; ++ ++struct ip_vs_stats_user { ++ __u32 conns; ++ __u32 inpkts; ++ __u32 outpkts; ++ __u64 inbytes; ++ __u64 outbytes; ++ __u32 cps; ++ __u32 inpps; ++ __u32 outpps; ++ __u32 inbps; ++ __u32 outbps; ++}; ++ ++struct ip_vs_getinfo { ++ unsigned int version; ++ unsigned int size; ++ unsigned int num_services; ++}; ++ ++struct ip_vs_service_entry { ++ __u16 protocol; ++ __be32 addr; ++ __be16 port; ++ __u32 fwmark; ++ char sched_name[16]; ++ unsigned int flags; ++ unsigned int timeout; ++ __be32 netmask; ++ unsigned int num_dests; ++ struct ip_vs_stats_user stats; ++}; ++ ++struct ip_vs_dest_entry { ++ __be32 addr; ++ __be16 port; ++ unsigned int conn_flags; ++ int weight; ++ __u32 u_threshold; ++ __u32 l_threshold; ++ __u32 activeconns; ++ __u32 inactconns; ++ __u32 persistconns; ++ struct ip_vs_stats_user stats; ++}; ++ ++struct ip_vs_get_dests { ++ __u16 protocol; ++ __be32 addr; ++ __be16 port; ++ __u32 fwmark; ++ unsigned int num_dests; ++ struct ip_vs_dest_entry entrytable[0]; ++}; ++ ++struct ip_vs_get_services { ++ unsigned int num_services; ++ struct ip_vs_service_entry entrytable[0]; ++}; ++ ++struct ip_vs_timeout_user { ++ int tcp_timeout; ++ int tcp_fin_timeout; ++ int udp_timeout; ++}; ++ ++struct ip_vs_daemon_user { ++ int state; ++ char mcast_ifn[16]; ++ int syncid; ++}; ++ ++struct ip_vs_flags { ++ __u32 flags; ++ __u32 mask; ++}; ++ ++enum { ++ IPVS_CMD_UNSPEC = 0, ++ IPVS_CMD_NEW_SERVICE = 1, ++ IPVS_CMD_SET_SERVICE = 2, ++ IPVS_CMD_DEL_SERVICE = 3, ++ IPVS_CMD_GET_SERVICE = 4, ++ IPVS_CMD_NEW_DEST = 5, ++ IPVS_CMD_SET_DEST = 6, ++ IPVS_CMD_DEL_DEST = 7, ++ IPVS_CMD_GET_DEST = 8, ++ IPVS_CMD_NEW_DAEMON = 9, ++ IPVS_CMD_DEL_DAEMON = 10, ++ IPVS_CMD_GET_DAEMON = 11, ++ IPVS_CMD_SET_CONFIG = 12, ++ IPVS_CMD_GET_CONFIG = 13, ++ IPVS_CMD_SET_INFO = 14, ++ IPVS_CMD_GET_INFO = 15, ++ IPVS_CMD_ZERO = 16, ++ IPVS_CMD_FLUSH = 17, ++ __IPVS_CMD_MAX = 18, ++}; ++ ++enum { ++ IPVS_CMD_ATTR_UNSPEC = 0, ++ IPVS_CMD_ATTR_SERVICE = 1, ++ IPVS_CMD_ATTR_DEST = 2, ++ IPVS_CMD_ATTR_DAEMON = 3, ++ IPVS_CMD_ATTR_TIMEOUT_TCP = 4, ++ IPVS_CMD_ATTR_TIMEOUT_TCP_FIN = 5, ++ IPVS_CMD_ATTR_TIMEOUT_UDP = 6, ++ __IPVS_CMD_ATTR_MAX = 7, ++}; ++ ++enum { ++ IPVS_SVC_ATTR_UNSPEC = 0, ++ IPVS_SVC_ATTR_AF = 1, ++ IPVS_SVC_ATTR_PROTOCOL = 2, ++ IPVS_SVC_ATTR_ADDR = 3, ++ IPVS_SVC_ATTR_PORT = 4, ++ IPVS_SVC_ATTR_FWMARK = 5, ++ IPVS_SVC_ATTR_SCHED_NAME = 6, ++ IPVS_SVC_ATTR_FLAGS = 7, ++ IPVS_SVC_ATTR_TIMEOUT = 8, ++ IPVS_SVC_ATTR_NETMASK = 9, ++ IPVS_SVC_ATTR_STATS = 10, ++ IPVS_SVC_ATTR_PE_NAME = 11, ++ IPVS_SVC_ATTR_STATS64 = 12, ++ __IPVS_SVC_ATTR_MAX = 13, ++}; ++ ++enum { ++ IPVS_DEST_ATTR_UNSPEC = 0, ++ IPVS_DEST_ATTR_ADDR = 1, ++ IPVS_DEST_ATTR_PORT = 2, ++ IPVS_DEST_ATTR_FWD_METHOD = 3, ++ IPVS_DEST_ATTR_WEIGHT = 4, ++ IPVS_DEST_ATTR_U_THRESH = 5, ++ IPVS_DEST_ATTR_L_THRESH = 6, ++ IPVS_DEST_ATTR_ACTIVE_CONNS = 7, ++ IPVS_DEST_ATTR_INACT_CONNS = 8, ++ IPVS_DEST_ATTR_PERSIST_CONNS = 9, ++ IPVS_DEST_ATTR_STATS = 10, ++ IPVS_DEST_ATTR_ADDR_FAMILY = 11, ++ IPVS_DEST_ATTR_STATS64 = 12, ++ __IPVS_DEST_ATTR_MAX = 13, ++}; ++ ++enum { ++ IPVS_DAEMON_ATTR_UNSPEC = 0, ++ IPVS_DAEMON_ATTR_STATE = 1, ++ IPVS_DAEMON_ATTR_MCAST_IFN = 2, ++ IPVS_DAEMON_ATTR_SYNC_ID = 3, ++ IPVS_DAEMON_ATTR_SYNC_MAXLEN = 4, ++ IPVS_DAEMON_ATTR_MCAST_GROUP = 5, ++ IPVS_DAEMON_ATTR_MCAST_GROUP6 = 6, ++ IPVS_DAEMON_ATTR_MCAST_PORT = 7, ++ IPVS_DAEMON_ATTR_MCAST_TTL = 8, ++ __IPVS_DAEMON_ATTR_MAX = 9, ++}; ++ ++enum { ++ IPVS_STATS_ATTR_UNSPEC = 0, ++ IPVS_STATS_ATTR_CONNS = 1, ++ IPVS_STATS_ATTR_INPKTS = 2, ++ IPVS_STATS_ATTR_OUTPKTS = 3, ++ IPVS_STATS_ATTR_INBYTES = 4, ++ IPVS_STATS_ATTR_OUTBYTES = 5, ++ IPVS_STATS_ATTR_CPS = 6, ++ IPVS_STATS_ATTR_INPPS = 7, ++ IPVS_STATS_ATTR_OUTPPS = 8, ++ IPVS_STATS_ATTR_INBPS = 9, ++ IPVS_STATS_ATTR_OUTBPS = 10, ++ IPVS_STATS_ATTR_PAD = 11, ++ __IPVS_STATS_ATTR_MAX = 12, ++}; ++ ++enum { ++ IPVS_INFO_ATTR_UNSPEC = 0, ++ IPVS_INFO_ATTR_VERSION = 1, ++ IPVS_INFO_ATTR_CONN_TAB_SIZE = 2, ++ __IPVS_INFO_ATTR_MAX = 3, ++}; ++ ++enum { ++ IP_VS_UDP_S_NORMAL = 0, ++ IP_VS_UDP_S_LAST = 1, ++}; ++ ++struct ip_vs_service_user_kern { ++ u16 af; ++ u16 protocol; ++ union nf_inet_addr addr; ++ __be16 port; ++ u32 fwmark; ++ char *sched_name; ++ char *pe_name; ++ unsigned int flags; ++ unsigned int timeout; ++ __be32 netmask; ++}; ++ ++struct ip_vs_dest_user_kern { ++ union nf_inet_addr addr; ++ __be16 port; ++ unsigned int conn_flags; ++ int weight; ++ u32 u_threshold; ++ u32 l_threshold; ++ u16 af; ++}; ++ ++struct ip_vs_iter { ++ struct seq_net_private p; ++ struct hlist_head *table; ++ int bucket; ++}; ++ ++typedef u8 uint8_t; ++ ++struct skb_frag_struct { ++ struct { ++ struct page *p; ++ } page; ++ __u32 page_offset; ++ __u32 size; ++}; ++ ++typedef struct skb_frag_struct skb_frag_t; ++ ++struct skb_shared_hwtstamps { ++ ktime_t hwtstamp; ++}; ++ ++struct skb_shared_info { ++ __u8 __unused; ++ __u8 meta_len; ++ __u8 nr_frags; ++ __u8 tx_flags; ++ short unsigned int gso_size; ++ short unsigned int gso_segs; ++ struct sk_buff *frag_list; ++ struct skb_shared_hwtstamps hwtstamps; ++ unsigned int gso_type; ++ u32 tskey; ++ atomic_t dataref; ++ void *destructor_arg; ++ skb_frag_t frags[16]; ++}; ++ ++enum { ++ SKB_GSO_TCPV4 = 1, ++ SKB_GSO_DODGY = 2, ++ SKB_GSO_TCP_ECN = 4, ++ SKB_GSO_TCP_FIXEDID = 8, ++ SKB_GSO_TCPV6 = 16, ++ SKB_GSO_FCOE = 32, ++ SKB_GSO_GRE = 64, ++ SKB_GSO_GRE_CSUM = 128, ++ SKB_GSO_IPXIP4 = 256, ++ SKB_GSO_IPXIP6 = 512, ++ SKB_GSO_UDP_TUNNEL = 1024, ++ SKB_GSO_UDP_TUNNEL_CSUM = 2048, ++ SKB_GSO_PARTIAL = 4096, ++ SKB_GSO_TUNNEL_REMCSUM = 8192, ++ SKB_GSO_SCTP = 16384, ++ SKB_GSO_ESP = 32768, ++ SKB_GSO_UDP = 65536, ++ SKB_GSO_UDP_L4 = 131072, ++}; ++ ++enum { ++ IPSTATS_MIB_NUM = 0, ++ IPSTATS_MIB_INPKTS = 1, ++ IPSTATS_MIB_INOCTETS = 2, ++ IPSTATS_MIB_INDELIVERS = 3, ++ IPSTATS_MIB_OUTFORWDATAGRAMS = 4, ++ IPSTATS_MIB_OUTPKTS = 5, ++ IPSTATS_MIB_OUTOCTETS = 6, ++ IPSTATS_MIB_INHDRERRORS = 7, ++ IPSTATS_MIB_INTOOBIGERRORS = 8, ++ IPSTATS_MIB_INNOROUTES = 9, ++ IPSTATS_MIB_INADDRERRORS = 10, ++ IPSTATS_MIB_INUNKNOWNPROTOS = 11, ++ IPSTATS_MIB_INTRUNCATEDPKTS = 12, ++ IPSTATS_MIB_INDISCARDS = 13, ++ IPSTATS_MIB_OUTDISCARDS = 14, ++ IPSTATS_MIB_OUTNOROUTES = 15, ++ IPSTATS_MIB_REASMTIMEOUT = 16, ++ IPSTATS_MIB_REASMREQDS = 17, ++ IPSTATS_MIB_REASMOKS = 18, ++ IPSTATS_MIB_REASMFAILS = 19, ++ IPSTATS_MIB_FRAGOKS = 20, ++ IPSTATS_MIB_FRAGFAILS = 21, ++ IPSTATS_MIB_FRAGCREATES = 22, ++ IPSTATS_MIB_INMCASTPKTS = 23, ++ IPSTATS_MIB_OUTMCASTPKTS = 24, ++ IPSTATS_MIB_INBCASTPKTS = 25, ++ IPSTATS_MIB_OUTBCASTPKTS = 26, ++ IPSTATS_MIB_INMCASTOCTETS = 27, ++ IPSTATS_MIB_OUTMCASTOCTETS = 28, ++ IPSTATS_MIB_INBCASTOCTETS = 29, ++ IPSTATS_MIB_OUTBCASTOCTETS = 30, ++ IPSTATS_MIB_CSUMERRORS = 31, ++ IPSTATS_MIB_NOECTPKTS = 32, ++ IPSTATS_MIB_ECT1PKTS = 33, ++ IPSTATS_MIB_ECT0PKTS = 34, ++ IPSTATS_MIB_CEPKTS = 35, ++ IPSTATS_MIB_REASM_OVERLAPS = 36, ++ __IPSTATS_MIB_MAX = 37, ++}; ++ ++enum { ++ TCPF_ESTABLISHED = 2, ++ TCPF_SYN_SENT = 4, ++ TCPF_SYN_RECV = 8, ++ TCPF_FIN_WAIT1 = 16, ++ TCPF_FIN_WAIT2 = 32, ++ TCPF_TIME_WAIT = 64, ++ TCPF_CLOSE = 128, ++ TCPF_CLOSE_WAIT = 256, ++ TCPF_LAST_ACK = 512, ++ TCPF_LISTEN = 1024, ++ TCPF_CLOSING = 2048, ++ TCPF_NEW_SYN_RECV = 4096, ++}; ++ ++struct inet6_skb_parm { ++ int iif; ++ __be16 ra; ++ __u16 dst0; ++ __u16 srcrt; ++ __u16 dst1; ++ __u16 lastopt; ++ __u16 nhoff; ++ __u16 flags; ++ __u16 dsthao; ++ __u16 frag_max_size; ++}; ++ ++enum { ++ INET_ECN_NOT_ECT = 0, ++ INET_ECN_ECT_1 = 1, ++ INET_ECN_ECT_0 = 2, ++ INET_ECN_CE = 3, ++ INET_ECN_MASK = 3, ++}; ++ ++enum { ++ IP_VS_RT_MODE_LOCAL = 1, ++ IP_VS_RT_MODE_NON_LOCAL = 2, ++ IP_VS_RT_MODE_RDR = 4, ++ IP_VS_RT_MODE_CONNECT = 8, ++ IP_VS_RT_MODE_KNOWN_NH = 16, ++ IP_VS_RT_MODE_TUNNEL = 32, ++}; ++ ++struct offload_callbacks { ++ struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); ++ struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); ++ int (*gro_complete)(struct sk_buff *, int); ++}; ++ ++struct net_protocol { ++ int (*early_demux)(struct sk_buff *); ++ int (*early_demux_handler)(struct sk_buff *); ++ int (*handler)(struct sk_buff *); ++ void (*err_handler)(struct sk_buff *, u32); ++ unsigned int no_policy: 1; ++ unsigned int netns_ok: 1; ++ unsigned int icmp_strict_tag_validation: 1; ++}; ++ ++struct inet6_protocol { ++ void (*early_demux)(struct sk_buff *); ++ void (*early_demux_handler)(struct sk_buff *); ++ int (*handler)(struct sk_buff *); ++ void (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); ++ unsigned int flags; ++}; ++ ++struct net_offload { ++ struct offload_callbacks callbacks; ++ unsigned int flags; ++}; ++ ++typedef unsigned char u_char; ++ ++struct wait_queue_entry; ++ ++typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); ++ ++struct wait_queue_entry { ++ unsigned int flags; ++ void *private; ++ wait_queue_func_t func; ++ struct list_head entry; ++}; ++ ++enum { ++ ITER_IOVEC = 0, ++ ITER_KVEC = 2, ++ ITER_BVEC = 4, ++ ITER_PIPE = 8, ++}; ++ ++enum sock_type { ++ SOCK_STREAM = 1, ++ SOCK_DGRAM = 2, ++ SOCK_RAW = 3, ++ SOCK_RDM = 4, ++ SOCK_SEQPACKET = 5, ++ SOCK_DCCP = 6, ++ SOCK_PACKET = 10, ++}; ++ ++struct sockaddr_in6 { ++ short unsigned int sin6_family; ++ __be16 sin6_port; ++ __be32 sin6_flowinfo; ++ struct in6_addr sin6_addr; ++ __u32 sin6_scope_id; ++}; ++ ++struct ipv4_devconf { ++ void *sysctl; ++ int data[32]; ++ long unsigned int state[1]; ++}; ++ ++struct ip_mreqn { ++ struct in_addr imr_multiaddr; ++ struct in_addr imr_address; ++ int imr_ifindex; ++}; ++ ++struct sockaddr_in { ++ __kernel_sa_family_t sin_family; ++ __be16 sin_port; ++ struct in_addr sin_addr; ++ unsigned char __pad[8]; ++}; ++ ++struct in_ifaddr; ++ ++struct ip_mc_list; ++ ++struct in_device { ++ struct net_device *dev; ++ refcount_t refcnt; ++ int dead; ++ struct in_ifaddr *ifa_list; ++ struct ip_mc_list *mc_list; ++ struct ip_mc_list **mc_hash; ++ int mc_count; ++ spinlock_t mc_tomb_lock; ++ struct ip_mc_list *mc_tomb; ++ long unsigned int mr_v1_seen; ++ long unsigned int mr_v2_seen; ++ long unsigned int mr_maxdelay; ++ long unsigned int mr_qi; ++ long unsigned int mr_qri; ++ unsigned char mr_qrv; ++ unsigned char mr_gq_running; ++ unsigned char mr_ifc_count; ++ struct timer_list mr_gq_timer; ++ struct timer_list mr_ifc_timer; ++ struct neigh_parms *arp_parms; ++ struct ipv4_devconf cnf; ++ struct callback_head callback_head; ++}; ++ ++enum rt_scope_t { ++ RT_SCOPE_UNIVERSE = 0, ++ RT_SCOPE_SITE = 200, ++ RT_SCOPE_LINK = 253, ++ RT_SCOPE_HOST = 254, ++ RT_SCOPE_NOWHERE = 255, ++}; ++ ++struct in_ifaddr { ++ struct hlist_node hash; ++ struct in_ifaddr *ifa_next; ++ struct in_device *ifa_dev; ++ struct callback_head callback_head; ++ __be32 ifa_local; ++ __be32 ifa_address; ++ __be32 ifa_mask; ++ __u32 ifa_rt_priority; ++ __be32 ifa_broadcast; ++ unsigned char ifa_scope; ++ unsigned char ifa_prefixlen; ++ __u32 ifa_flags; ++ char ifa_label[16]; ++ __u32 ifa_valid_lft; ++ __u32 ifa_preferred_lft; ++ long unsigned int ifa_cstamp; ++ long unsigned int ifa_tstamp; ++}; ++ ++struct ip_sf_list; ++ ++struct ip_mc_list { ++ struct in_device *interface; ++ __be32 multiaddr; ++ unsigned int sfmode; ++ struct ip_sf_list *sources; ++ struct ip_sf_list *tomb; ++ long unsigned int sfcount[2]; ++ union { ++ struct ip_mc_list *next; ++ struct ip_mc_list *next_rcu; ++ }; ++ struct ip_mc_list *next_hash; ++ struct timer_list timer; ++ int users; ++ refcount_t refcnt; ++ spinlock_t lock; ++ char tm_running; ++ char reporter; ++ char unsolicit_count; ++ char loaded; ++ unsigned char gsquery; ++ unsigned char crcount; ++ struct callback_head rcu; ++}; ++ ++struct ip_sf_socklist { ++ unsigned int sl_max; ++ unsigned int sl_count; ++ struct callback_head rcu; ++ __be32 sl_addr[0]; ++}; ++ ++struct ip_mc_socklist { ++ struct ip_mc_socklist *next_rcu; ++ struct ip_mreqn multi; ++ unsigned int sfmode; ++ struct ip_sf_socklist *sflist; ++ struct callback_head rcu; ++}; ++ ++struct ip_sf_list { ++ struct ip_sf_list *sf_next; ++ __be32 sf_inaddr; ++ long unsigned int sf_count[2]; ++ unsigned char sf_gsresp; ++ unsigned char sf_oldin; ++ unsigned char sf_crcount; ++}; ++ ++struct udp_sock { ++ struct inet_sock inet; ++ int pending; ++ unsigned int corkflag; ++ __u8 encap_type; ++ unsigned char no_check6_tx: 1; ++ unsigned char no_check6_rx: 1; ++ __u16 len; ++ __u16 gso_size; ++ __u16 pcslen; ++ __u16 pcrlen; ++ __u8 pcflag; ++ __u8 unused[3]; ++ int (*encap_rcv)(struct sock *, struct sk_buff *); ++ void (*encap_destroy)(struct sock *); ++ struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); ++ int (*gro_complete)(struct sock *, struct sk_buff *, int); ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sk_buff_head reader_queue; ++ int forward_deficit; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct ip_vs_sync_mesg; ++ ++struct ip_vs_sync_buff { ++ struct list_head list; ++ long unsigned int firstuse; ++ struct ip_vs_sync_mesg *mesg; ++ unsigned char *head; ++ unsigned char *end; ++}; ++ ++struct ip_vs_sync_thread_data { ++ struct task_struct *task; ++ struct netns_ipvs *ipvs; ++ struct socket *sock; ++ char *buf; ++ int id; ++}; ++ ++struct ip_vs_sync_conn_v0 { ++ __u8 reserved; ++ __u8 protocol; ++ __be16 cport; ++ __be16 vport; ++ __be16 dport; ++ __be32 caddr; ++ __be32 vaddr; ++ __be32 daddr; ++ __be16 flags; ++ __be16 state; ++}; ++ ++struct ip_vs_sync_conn_options { ++ struct ip_vs_seq in_seq; ++ struct ip_vs_seq out_seq; ++}; ++ ++struct ip_vs_sync_v4 { ++ __u8 type; ++ __u8 protocol; ++ __be16 ver_size; ++ __be32 flags; ++ __be16 state; ++ __be16 cport; ++ __be16 vport; ++ __be16 dport; ++ __be32 fwmark; ++ __be32 timeout; ++ __be32 caddr; ++ __be32 vaddr; ++ __be32 daddr; ++}; ++ ++struct ip_vs_sync_v6 { ++ __u8 type; ++ __u8 protocol; ++ __be16 ver_size; ++ __be32 flags; ++ __be16 state; ++ __be16 cport; ++ __be16 vport; ++ __be16 dport; ++ __be32 fwmark; ++ __be32 timeout; ++ struct in6_addr caddr; ++ struct in6_addr vaddr; ++ struct in6_addr daddr; ++}; ++ ++union ip_vs_sync_conn { ++ struct ip_vs_sync_v4 v4; ++ struct ip_vs_sync_v6 v6; ++}; ++ ++struct ip_vs_sync_mesg_v0 { ++ __u8 nr_conns; ++ __u8 syncid; ++ __be16 size; ++}; ++ ++struct ip_vs_sync_mesg { ++ __u8 reserved; ++ __u8 syncid; ++ __be16 size; ++ __u8 nr_conns; ++ __s8 version; ++ __u16 spare; ++}; ++ ++union ipvs_sockaddr { ++ struct sockaddr_in in; ++ struct sockaddr_in6 in6; ++}; ++ ++struct _ddebug { ++ const char *modname; ++ const char *function; ++ const char *filename; ++ const char *format; ++ unsigned int lineno: 18; ++ unsigned int flags: 8; ++ union { ++ struct static_key_true dd_key_true; ++ struct static_key_false dd_key_false; ++ } key; ++}; ++ ++struct tcp_states_t { ++ int next_state[11]; ++}; ++ ++struct udphdr { ++ __be16 source; ++ __be16 dest; ++ __be16 len; ++ __sum16 check; ++}; ++ ++struct llist_head { ++ struct llist_node *first; ++}; ++ ++struct rhltable { ++ struct rhashtable ht; ++}; ++ ++struct cdev { ++ struct kobject kobj; ++ struct module *owner; ++ const struct file_operations *ops; ++ struct list_head list; ++ dev_t dev; ++ unsigned int count; ++}; ++ ++enum { ++ NETIF_F_SG_BIT = 0, ++ NETIF_F_IP_CSUM_BIT = 1, ++ __UNUSED_NETIF_F_1 = 2, ++ NETIF_F_HW_CSUM_BIT = 3, ++ NETIF_F_IPV6_CSUM_BIT = 4, ++ NETIF_F_HIGHDMA_BIT = 5, ++ NETIF_F_FRAGLIST_BIT = 6, ++ NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, ++ NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, ++ NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, ++ NETIF_F_VLAN_CHALLENGED_BIT = 10, ++ NETIF_F_GSO_BIT = 11, ++ NETIF_F_LLTX_BIT = 12, ++ NETIF_F_NETNS_LOCAL_BIT = 13, ++ NETIF_F_GRO_BIT = 14, ++ NETIF_F_LRO_BIT = 15, ++ NETIF_F_GSO_SHIFT = 16, ++ NETIF_F_TSO_BIT = 16, ++ NETIF_F_GSO_ROBUST_BIT = 17, ++ NETIF_F_TSO_ECN_BIT = 18, ++ NETIF_F_TSO_MANGLEID_BIT = 19, ++ NETIF_F_TSO6_BIT = 20, ++ NETIF_F_FSO_BIT = 21, ++ NETIF_F_GSO_GRE_BIT = 22, ++ NETIF_F_GSO_GRE_CSUM_BIT = 23, ++ NETIF_F_GSO_IPXIP4_BIT = 24, ++ NETIF_F_GSO_IPXIP6_BIT = 25, ++ NETIF_F_GSO_UDP_TUNNEL_BIT = 26, ++ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, ++ NETIF_F_GSO_PARTIAL_BIT = 28, ++ NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, ++ NETIF_F_GSO_SCTP_BIT = 30, ++ NETIF_F_GSO_ESP_BIT = 31, ++ NETIF_F_GSO_UDP_BIT = 32, ++ NETIF_F_GSO_UDP_L4_BIT = 33, ++ NETIF_F_GSO_LAST = 33, ++ NETIF_F_FCOE_CRC_BIT = 34, ++ NETIF_F_SCTP_CRC_BIT = 35, ++ NETIF_F_FCOE_MTU_BIT = 36, ++ NETIF_F_NTUPLE_BIT = 37, ++ NETIF_F_RXHASH_BIT = 38, ++ NETIF_F_RXCSUM_BIT = 39, ++ NETIF_F_NOCACHE_COPY_BIT = 40, ++ NETIF_F_LOOPBACK_BIT = 41, ++ NETIF_F_RXFCS_BIT = 42, ++ NETIF_F_RXALL_BIT = 43, ++ NETIF_F_HW_VLAN_STAG_TX_BIT = 44, ++ NETIF_F_HW_VLAN_STAG_RX_BIT = 45, ++ NETIF_F_HW_VLAN_STAG_FILTER_BIT = 46, ++ NETIF_F_HW_L2FW_DOFFLOAD_BIT = 47, ++ NETIF_F_HW_TC_BIT = 48, ++ NETIF_F_HW_ESP_BIT = 49, ++ NETIF_F_HW_ESP_TX_CSUM_BIT = 50, ++ NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 51, ++ NETIF_F_HW_TLS_TX_BIT = 52, ++ NETIF_F_HW_TLS_RX_BIT = 53, ++ NETIF_F_GRO_HW_BIT = 54, ++ NETIF_F_HW_TLS_RECORD_BIT = 55, ++ NETDEV_FEATURE_COUNT = 56, ++}; ++ ++struct sctphdr { ++ __be16 source; ++ __be16 dest; ++ __be32 vtag; ++ __le32 checksum; ++}; ++ ++struct sctp_mib { ++ long unsigned int mibs[34]; ++}; ++ ++struct ld_semaphore { ++ atomic_long_t count; ++ raw_spinlock_t wait_lock; ++ unsigned int wait_readers; ++ struct list_head read_wait; ++ struct list_head write_wait; ++}; ++ ++typedef unsigned int tcflag_t; ++ ++typedef unsigned char cc_t; ++ ++typedef unsigned int speed_t; ++ ++struct ktermios { ++ tcflag_t c_iflag; ++ tcflag_t c_oflag; ++ tcflag_t c_cflag; ++ tcflag_t c_lflag; ++ cc_t c_line; ++ cc_t c_cc[19]; ++ speed_t c_ispeed; ++ speed_t c_ospeed; ++}; ++ ++struct winsize { ++ short unsigned int ws_row; ++ short unsigned int ws_col; ++ short unsigned int ws_xpixel; ++ short unsigned int ws_ypixel; ++}; ++ ++struct tty_driver; ++ ++struct tty_operations; ++ ++struct tty_ldisc; ++ ++struct termiox; ++ ++struct tty_port; ++ ++struct tty_struct { ++ int magic; ++ struct kref kref; ++ struct device *dev; ++ struct tty_driver *driver; ++ const struct tty_operations *ops; ++ int index; ++ struct ld_semaphore ldisc_sem; ++ struct tty_ldisc *ldisc; ++ struct mutex atomic_write_lock; ++ struct mutex legacy_mutex; ++ struct mutex throttle_mutex; ++ struct rw_semaphore termios_rwsem; ++ struct mutex winsize_mutex; ++ spinlock_t ctrl_lock; ++ spinlock_t flow_lock; ++ struct ktermios termios; ++ struct ktermios termios_locked; ++ struct termiox *termiox; ++ char name[64]; ++ struct pid *pgrp; ++ struct pid *session; ++ long unsigned int flags; ++ int count; ++ struct winsize winsize; ++ long unsigned int stopped: 1; ++ long unsigned int flow_stopped: 1; ++ int: 30; ++ long unsigned int unused: 62; ++ int hw_stopped; ++ long unsigned int ctrl_status: 8; ++ long unsigned int packet: 1; ++ int: 23; ++ long unsigned int unused_ctrl: 55; ++ unsigned int receive_room; ++ int flow_change; ++ struct tty_struct *link; ++ struct fasync_struct *fasync; ++ wait_queue_head_t write_wait; ++ wait_queue_head_t read_wait; ++ struct work_struct hangup_work; ++ void *disc_data; ++ void *driver_data; ++ spinlock_t files_lock; ++ struct list_head tty_files; ++ int closing; ++ unsigned char *write_buf; ++ int write_cnt; ++ struct work_struct SAK_work; ++ struct tty_port *port; ++}; ++ ++struct termiox { ++ __u16 x_hflag; ++ __u16 x_cflag; ++ __u16 x_rflag[5]; ++ __u16 x_sflag; ++}; ++ ++struct serial_icounter_struct; ++ ++struct tty_operations { ++ struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); ++ int (*install)(struct tty_driver *, struct tty_struct *); ++ void (*remove)(struct tty_driver *, struct tty_struct *); ++ int (*open)(struct tty_struct *, struct file *); ++ void (*close)(struct tty_struct *, struct file *); ++ void (*shutdown)(struct tty_struct *); ++ void (*cleanup)(struct tty_struct *); ++ int (*write)(struct tty_struct *, const unsigned char *, int); ++ int (*put_char)(struct tty_struct *, unsigned char); ++ void (*flush_chars)(struct tty_struct *); ++ int (*write_room)(struct tty_struct *); ++ int (*chars_in_buffer)(struct tty_struct *); ++ int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); ++ long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); ++ void (*set_termios)(struct tty_struct *, struct ktermios *); ++ void (*throttle)(struct tty_struct *); ++ void (*unthrottle)(struct tty_struct *); ++ void (*stop)(struct tty_struct *); ++ void (*start)(struct tty_struct *); ++ void (*hangup)(struct tty_struct *); ++ int (*break_ctl)(struct tty_struct *, int); ++ void (*flush_buffer)(struct tty_struct *); ++ void (*set_ldisc)(struct tty_struct *); ++ void (*wait_until_sent)(struct tty_struct *, int); ++ void (*send_xchar)(struct tty_struct *, char); ++ int (*tiocmget)(struct tty_struct *); ++ int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); ++ int (*resize)(struct tty_struct *, struct winsize *); ++ int (*set_termiox)(struct tty_struct *, struct termiox *); ++ int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); ++ void (*show_fdinfo)(struct tty_struct *, struct seq_file *); ++ int (*poll_init)(struct tty_driver *, int, char *); ++ int (*poll_get_char)(struct tty_driver *, int); ++ void (*poll_put_char)(struct tty_driver *, int, char); ++ int (*proc_show)(struct seq_file *, void *); ++}; ++ ++struct tty_driver { ++ int magic; ++ struct kref kref; ++ struct cdev **cdevs; ++ struct module *owner; ++ const char *driver_name; ++ const char *name; ++ int name_base; ++ int major; ++ int minor_start; ++ unsigned int num; ++ short int type; ++ short int subtype; ++ struct ktermios init_termios; ++ long unsigned int flags; ++ struct proc_dir_entry *proc_entry; ++ struct tty_driver *other; ++ struct tty_struct **ttys; ++ struct tty_port **ports; ++ struct ktermios **termios; ++ void *driver_state; ++ const struct tty_operations *ops; ++ struct list_head tty_drivers; ++}; ++ ++struct tty_buffer { ++ union { ++ struct tty_buffer *next; ++ struct llist_node free; ++ }; ++ int used; ++ int size; ++ int commit; ++ int read; ++ int flags; ++ long unsigned int data[0]; ++}; ++ ++struct tty_bufhead { ++ struct tty_buffer *head; ++ struct work_struct work; ++ struct mutex lock; ++ atomic_t priority; ++ struct tty_buffer sentinel; ++ struct llist_head free; ++ atomic_t mem_used; ++ int mem_limit; ++ struct tty_buffer *tail; ++}; ++ ++struct tty_port_operations; ++ ++struct tty_port_client_operations; ++ ++struct tty_port { ++ struct tty_bufhead buf; ++ struct tty_struct *tty; ++ struct tty_struct *itty; ++ const struct tty_port_operations *ops; ++ const struct tty_port_client_operations *client_ops; ++ spinlock_t lock; ++ int blocked_open; ++ int count; ++ wait_queue_head_t open_wait; ++ wait_queue_head_t delta_msr_wait; ++ long unsigned int flags; ++ long unsigned int iflags; ++ unsigned char console: 1; ++ unsigned char low_latency: 1; ++ struct mutex mutex; ++ struct mutex buf_mutex; ++ unsigned char *xmit_buf; ++ unsigned int close_delay; ++ unsigned int closing_wait; ++ int drain_delay; ++ struct kref kref; ++ void *client_data; ++}; ++ ++struct tty_ldisc_ops { ++ int magic; ++ char *name; ++ int num; ++ int flags; ++ int (*open)(struct tty_struct *); ++ void (*close)(struct tty_struct *); ++ void (*flush_buffer)(struct tty_struct *); ++ ssize_t (*read)(struct tty_struct *, struct file *, unsigned char *, size_t); ++ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t); ++ int (*ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int); ++ long int (*compat_ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int); ++ void (*set_termios)(struct tty_struct *, struct ktermios *); ++ __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); ++ int (*hangup)(struct tty_struct *); ++ void (*receive_buf)(struct tty_struct *, const unsigned char *, char *, int); ++ void (*write_wakeup)(struct tty_struct *); ++ void (*dcd_change)(struct tty_struct *, unsigned int); ++ int (*receive_buf2)(struct tty_struct *, const unsigned char *, char *, int); ++ struct module *owner; ++ int refcount; ++}; ++ ++struct tty_ldisc { ++ struct tty_ldisc_ops *ops; ++ struct tty_struct *tty; ++}; ++ ++struct tty_port_operations { ++ int (*carrier_raised)(struct tty_port *); ++ void (*dtr_rts)(struct tty_port *, int); ++ void (*shutdown)(struct tty_port *); ++ int (*activate)(struct tty_port *, struct tty_struct *); ++ void (*destruct)(struct tty_port *); ++}; ++ ++struct tty_port_client_operations { ++ int (*receive_buf)(struct tty_port *, const unsigned char *, const unsigned char *, size_t); ++ void (*write_wakeup)(struct tty_port *); ++}; ++ ++enum { ++ SCTP_MAX_STREAM = 65535, ++}; ++ ++enum sctp_event_timeout { ++ SCTP_EVENT_TIMEOUT_NONE = 0, ++ SCTP_EVENT_TIMEOUT_T1_COOKIE = 1, ++ SCTP_EVENT_TIMEOUT_T1_INIT = 2, ++ SCTP_EVENT_TIMEOUT_T2_SHUTDOWN = 3, ++ SCTP_EVENT_TIMEOUT_T3_RTX = 4, ++ SCTP_EVENT_TIMEOUT_T4_RTO = 5, ++ SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD = 6, ++ SCTP_EVENT_TIMEOUT_HEARTBEAT = 7, ++ SCTP_EVENT_TIMEOUT_RECONF = 8, ++ SCTP_EVENT_TIMEOUT_SACK = 9, ++ SCTP_EVENT_TIMEOUT_AUTOCLOSE = 10, ++}; ++ ++enum { ++ SCTP_MAX_DUP_TSNS = 16, ++}; ++ ++enum { ++ SCTP_AUTH_HMAC_ID_RESERVED_0 = 0, ++ SCTP_AUTH_HMAC_ID_SHA1 = 1, ++ SCTP_AUTH_HMAC_ID_RESERVED_2 = 2, ++ SCTP_AUTH_HMAC_ID_SHA256 = 3, ++ __SCTP_AUTH_HMAC_MAX = 4, ++}; ++ ++struct sctp_bind_hashbucket { ++ spinlock_t lock; ++ struct hlist_head chain; ++}; ++ ++struct sctp_hashbucket { ++ rwlock_t lock; ++ struct hlist_head chain; ++}; ++ ++struct sctp_globals { ++ struct list_head address_families; ++ struct sctp_hashbucket *ep_hashtable; ++ struct sctp_bind_hashbucket *port_hashtable; ++ struct rhltable transport_hashtable; ++ int ep_hashsize; ++ int port_hashsize; ++ __u16 max_instreams; ++ __u16 max_outstreams; ++ bool checksum_disable; ++}; ++ ++enum { ++ SCTP_MIB_NUM = 0, ++ SCTP_MIB_CURRESTAB = 1, ++ SCTP_MIB_ACTIVEESTABS = 2, ++ SCTP_MIB_PASSIVEESTABS = 3, ++ SCTP_MIB_ABORTEDS = 4, ++ SCTP_MIB_SHUTDOWNS = 5, ++ SCTP_MIB_OUTOFBLUES = 6, ++ SCTP_MIB_CHECKSUMERRORS = 7, ++ SCTP_MIB_OUTCTRLCHUNKS = 8, ++ SCTP_MIB_OUTORDERCHUNKS = 9, ++ SCTP_MIB_OUTUNORDERCHUNKS = 10, ++ SCTP_MIB_INCTRLCHUNKS = 11, ++ SCTP_MIB_INORDERCHUNKS = 12, ++ SCTP_MIB_INUNORDERCHUNKS = 13, ++ SCTP_MIB_FRAGUSRMSGS = 14, ++ SCTP_MIB_REASMUSRMSGS = 15, ++ SCTP_MIB_OUTSCTPPACKS = 16, ++ SCTP_MIB_INSCTPPACKS = 17, ++ SCTP_MIB_T1_INIT_EXPIREDS = 18, ++ SCTP_MIB_T1_COOKIE_EXPIREDS = 19, ++ SCTP_MIB_T2_SHUTDOWN_EXPIREDS = 20, ++ SCTP_MIB_T3_RTX_EXPIREDS = 21, ++ SCTP_MIB_T4_RTO_EXPIREDS = 22, ++ SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS = 23, ++ SCTP_MIB_DELAY_SACK_EXPIREDS = 24, ++ SCTP_MIB_AUTOCLOSE_EXPIREDS = 25, ++ SCTP_MIB_T1_RETRANSMITS = 26, ++ SCTP_MIB_T3_RETRANSMITS = 27, ++ SCTP_MIB_PMTUD_RETRANSMITS = 28, ++ SCTP_MIB_FAST_RETRANSMITS = 29, ++ SCTP_MIB_IN_PKT_SOFTIRQ = 30, ++ SCTP_MIB_IN_PKT_BACKLOG = 31, ++ SCTP_MIB_IN_PKT_DISCARDS = 32, ++ SCTP_MIB_IN_DATA_CHUNK_DISCARDS = 33, ++ __SCTP_MIB_MAX = 34, ++}; ++ ++enum ipvs_sctp_event_t { ++ IP_VS_SCTP_DATA = 0, ++ IP_VS_SCTP_INIT = 1, ++ IP_VS_SCTP_INIT_ACK = 2, ++ IP_VS_SCTP_COOKIE_ECHO = 3, ++ IP_VS_SCTP_COOKIE_ACK = 4, ++ IP_VS_SCTP_SHUTDOWN = 5, ++ IP_VS_SCTP_SHUTDOWN_ACK = 6, ++ IP_VS_SCTP_SHUTDOWN_COMPLETE = 7, ++ IP_VS_SCTP_ERROR = 8, ++ IP_VS_SCTP_ABORT = 9, ++ IP_VS_SCTP_EVENT_LAST = 10, ++}; ++ ++struct nf_ct_event; ++ ++struct nf_ct_event_notifier { ++ int (*fcn)(unsigned int, struct nf_ct_event *); ++}; ++ ++struct nf_exp_event; ++ ++struct nf_exp_event_notifier { ++ int (*fcn)(unsigned int, struct nf_exp_event *); ++}; ++ ++struct nf_conntrack_tuple_mask { ++ struct { ++ union nf_inet_addr u3; ++ union nf_conntrack_man_proto u; ++ } src; ++}; ++ ++struct nf_conntrack_l4proto___2 { ++ u_int16_t l3proto; ++ u_int8_t l4proto; ++ bool allow_clash; ++ u16 nlattr_size; ++ bool (*pkt_to_tuple)(const struct sk_buff *, unsigned int, struct net *, struct nf_conntrack_tuple *); ++ bool (*invert_tuple)(struct nf_conntrack_tuple *, const struct nf_conntrack_tuple *); ++ int (*packet)(struct nf_conn *, const struct sk_buff *, unsigned int, enum ip_conntrack_info); ++ bool (*new)(struct nf_conn *, const struct sk_buff *, unsigned int); ++ void (*destroy)(struct nf_conn *); ++ int (*error)(struct net *, struct nf_conn *, struct sk_buff *, unsigned int, u_int8_t, unsigned int); ++ bool (*can_early_drop)(const struct nf_conn *); ++ int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *); ++ int (*from_nlattr)(struct nlattr **, struct nf_conn *); ++ int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *); ++ unsigned int (*nlattr_tuple_size)(); ++ int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *); ++ const struct nla_policy *nla_policy; ++ struct { ++ int (*nlattr_to_obj)(struct nlattr **, struct net *, void *); ++ int (*obj_to_nlattr)(struct sk_buff *, const void *); ++ u16 obj_size; ++ u16 nlattr_max; ++ const struct nla_policy *nla_policy; ++ } ctnl_timeout; ++ void (*print_conntrack)(struct seq_file *, struct nf_conn *); ++ unsigned int *net_id; ++ int (*init_net)(struct net *, u_int16_t); ++ struct nf_proto_net * (*get_net_proto)(struct net *); ++ struct module *me; ++}; ++ ++struct nf_ct_ext { ++ struct callback_head rcu; ++ u8 offset[9]; ++ u8 len; ++ char data[0]; ++}; ++ ++enum nf_ct_ext_id { ++ NF_CT_EXT_HELPER = 0, ++ NF_CT_EXT_NAT = 1, ++ NF_CT_EXT_SEQADJ = 2, ++ NF_CT_EXT_ACCT = 3, ++ NF_CT_EXT_ECACHE = 4, ++ NF_CT_EXT_TSTAMP = 5, ++ NF_CT_EXT_TIMEOUT = 6, ++ NF_CT_EXT_LABELS = 7, ++ NF_CT_EXT_SYNPROXY = 8, ++ NF_CT_EXT_NUM = 9, ++}; ++ ++struct nf_conntrack_helper; ++ ++struct nf_conntrack_expect { ++ struct hlist_node lnode; ++ struct hlist_node hnode; ++ struct nf_conntrack_tuple tuple; ++ struct nf_conntrack_tuple_mask mask; ++ void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); ++ struct nf_conntrack_helper *helper; ++ struct nf_conn *master; ++ struct timer_list timeout; ++ refcount_t use; ++ unsigned int flags; ++ unsigned int class; ++ union nf_inet_addr saved_addr; ++ union nf_conntrack_man_proto saved_proto; ++ enum ip_conntrack_dir dir; ++ struct callback_head rcu; ++}; ++ ++struct nf_conntrack_expect_policy; ++ ++struct nf_conntrack_helper { ++ struct hlist_node hnode; ++ char name[16]; ++ refcount_t refcnt; ++ struct module *me; ++ const struct nf_conntrack_expect_policy *expect_policy; ++ struct nf_conntrack_tuple tuple; ++ int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info); ++ void (*destroy)(struct nf_conn *); ++ int (*from_nlattr)(struct nlattr *, struct nf_conn *); ++ int (*to_nlattr)(struct sk_buff *, const struct nf_conn *); ++ unsigned int expect_class_max; ++ unsigned int flags; ++ unsigned int queue_num; ++ u16 data_len; ++}; ++ ++struct nf_conntrack_expect_policy { ++ unsigned int max_expected; ++ unsigned int timeout; ++ char name[16]; ++}; ++ ++struct nf_ct_event { ++ struct nf_conn *ct; ++ u32 portid; ++ int report; ++}; ++ ++struct nf_exp_event { ++ struct nf_conntrack_expect *exp; ++ u32 portid; ++ int report; ++}; ++ ++struct nf_ct_seqadj { ++ u32 correction_pos; ++ s32 offset_before; ++ s32 offset_after; ++}; ++ ++struct nf_conn_seqadj { ++ struct nf_ct_seqadj seq[2]; ++}; ++ ++typedef __u32 Elf32_Word; ++ ++struct elf32_note { ++ Elf32_Word n_namesz; ++ Elf32_Word n_descsz; ++ Elf32_Word n_type; ++}; ++ ++enum { ++ UNAME26 = 131072, ++ ADDR_NO_RANDOMIZE = 262144, ++ FDPIC_FUNCPTRS = 524288, ++ MMAP_PAGE_ZERO = 1048576, ++ ADDR_COMPAT_LAYOUT = 2097152, ++ READ_IMPLIES_EXEC = 4194304, ++ ADDR_LIMIT_32BIT = 8388608, ++ SHORT_INODE = 16777216, ++ WHOLE_SECONDS = 33554432, ++ STICKY_TIMEOUTS = 67108864, ++ ADDR_LIMIT_3GB = 134217728, ++}; ++ ++enum hrtimer_base_type { ++ HRTIMER_BASE_MONOTONIC = 0, ++ HRTIMER_BASE_REALTIME = 1, ++ HRTIMER_BASE_BOOTTIME = 2, ++ HRTIMER_BASE_TAI = 3, ++ HRTIMER_BASE_MONOTONIC_SOFT = 4, ++ HRTIMER_BASE_REALTIME_SOFT = 5, ++ HRTIMER_BASE_BOOTTIME_SOFT = 6, ++ HRTIMER_BASE_TAI_SOFT = 7, ++ HRTIMER_MAX_CLOCK_BASES = 8, ++}; ++ ++enum node_states { ++ N_POSSIBLE = 0, ++ N_ONLINE = 1, ++ N_NORMAL_MEMORY = 2, ++ N_HIGH_MEMORY = 2, ++ N_MEMORY = 3, ++ N_CPU = 4, ++ NR_NODE_STATES = 5, ++}; ++ ++enum { ++ MM_FILEPAGES = 0, ++ MM_ANONPAGES = 1, ++ MM_SWAPENTS = 2, ++ MM_SHMEMPAGES = 3, ++ NR_MM_COUNTERS = 4, ++}; ++ ++enum rseq_cs_flags_bit { ++ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, ++ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, ++ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, ++}; ++ ++enum perf_event_task_context { ++ perf_invalid_context = 4294967295, ++ perf_hw_context = 0, ++ perf_sw_context = 1, ++ perf_nr_task_contexts = 2, ++}; ++ ++enum rseq_event_mask_bits { ++ RSEQ_EVENT_PREEMPT_BIT = 0, ++ RSEQ_EVENT_SIGNAL_BIT = 1, ++ RSEQ_EVENT_MIGRATE_BIT = 2, ++}; ++ ++enum migratetype { ++ MIGRATE_UNMOVABLE = 0, ++ MIGRATE_MOVABLE = 1, ++ MIGRATE_RECLAIMABLE = 2, ++ MIGRATE_PCPTYPES = 3, ++ MIGRATE_HIGHATOMIC = 3, ++ MIGRATE_CMA = 4, ++ MIGRATE_ISOLATE = 5, ++ MIGRATE_TYPES = 6, ++}; ++ ++enum numa_stat_item { ++ NUMA_HIT = 0, ++ NUMA_MISS = 1, ++ NUMA_FOREIGN = 2, ++ NUMA_INTERLEAVE_HIT = 3, ++ NUMA_LOCAL = 4, ++ NUMA_OTHER = 5, ++ NR_VM_NUMA_STAT_ITEMS = 6, ++}; ++ ++enum zone_stat_item { ++ NR_FREE_PAGES = 0, ++ NR_ZONE_LRU_BASE = 1, ++ NR_ZONE_INACTIVE_ANON = 1, ++ NR_ZONE_ACTIVE_ANON = 2, ++ NR_ZONE_INACTIVE_FILE = 3, ++ NR_ZONE_ACTIVE_FILE = 4, ++ NR_ZONE_UNEVICTABLE = 5, ++ NR_ZONE_WRITE_PENDING = 6, ++ NR_MLOCK = 7, ++ NR_PAGETABLE = 8, ++ NR_KERNEL_STACK_KB = 9, ++ NR_BOUNCE = 10, ++ NR_ZSPAGES = 11, ++ NR_FREE_CMA_PAGES = 12, ++ NR_VM_ZONE_STAT_ITEMS = 13, ++}; ++ ++enum node_stat_item { ++ NR_LRU_BASE = 0, ++ NR_INACTIVE_ANON = 0, ++ NR_ACTIVE_ANON = 1, ++ NR_INACTIVE_FILE = 2, ++ NR_ACTIVE_FILE = 3, ++ NR_UNEVICTABLE = 4, ++ NR_SLAB_RECLAIMABLE = 5, ++ NR_SLAB_UNRECLAIMABLE = 6, ++ NR_ISOLATED_ANON = 7, ++ NR_ISOLATED_FILE = 8, ++ WORKINGSET_REFAULT = 9, ++ WORKINGSET_ACTIVATE = 10, ++ WORKINGSET_NODERECLAIM = 11, ++ NR_ANON_MAPPED = 12, ++ NR_FILE_MAPPED = 13, ++ NR_FILE_PAGES = 14, ++ NR_FILE_DIRTY = 15, ++ NR_WRITEBACK = 16, ++ NR_WRITEBACK_TEMP = 17, ++ NR_SHMEM = 18, ++ NR_SHMEM_THPS = 19, ++ NR_SHMEM_PMDMAPPED = 20, ++ NR_ANON_THPS = 21, ++ NR_UNSTABLE_NFS = 22, ++ NR_VMSCAN_WRITE = 23, ++ NR_VMSCAN_IMMEDIATE = 24, ++ NR_DIRTIED = 25, ++ NR_WRITTEN = 26, ++ NR_INDIRECTLY_RECLAIMABLE_BYTES = 27, ++ NR_VM_NODE_STAT_ITEMS = 28, ++}; ++ ++enum lru_list { ++ LRU_INACTIVE_ANON = 0, ++ LRU_ACTIVE_ANON = 1, ++ LRU_INACTIVE_FILE = 2, ++ LRU_ACTIVE_FILE = 3, ++ LRU_UNEVICTABLE = 4, ++ NR_LRU_LISTS = 5, ++}; ++ ++enum zone_watermarks { ++ WMARK_MIN = 0, ++ WMARK_LOW = 1, ++ WMARK_HIGH = 2, ++ NR_WMARK = 3, ++}; ++ ++enum { ++ ZONELIST_FALLBACK = 0, ++ ZONELIST_NOFALLBACK = 1, ++ MAX_ZONELISTS = 2, ++}; ++ ++struct modversion_info { ++ long unsigned int crc; ++ char name[56]; ++}; ++ ++typedef int (*initcall_t)(); ++ ++struct obs_kernel_param { ++ const char *str; ++ int (*setup_func)(char *); ++ int early; ++}; ++ ++enum ftrace_dump_mode { ++ DUMP_NONE = 0, ++ DUMP_ALL = 1, ++ DUMP_ORIG = 2, ++}; ++ ++struct lockdep_map {}; ++ ++struct arch_hw_breakpoint_ctrl { ++ u32 __reserved: 19; ++ u32 len: 8; ++ u32 type: 2; ++ u32 privilege: 2; ++ u32 enabled: 1; ++}; ++ ++struct arch_hw_breakpoint { ++ u64 address; ++ u64 trigger; ++ struct arch_hw_breakpoint_ctrl ctrl; ++}; ++ ++enum perf_event_state { ++ PERF_EVENT_STATE_DEAD = 4294967292, ++ PERF_EVENT_STATE_EXIT = 4294967293, ++ PERF_EVENT_STATE_ERROR = 4294967294, ++ PERF_EVENT_STATE_OFF = 4294967295, ++ PERF_EVENT_STATE_INACTIVE = 0, ++ PERF_EVENT_STATE_ACTIVE = 1, ++}; ++ ++typedef struct { ++ atomic_long_t a; ++} local_t; ++ ++typedef struct { ++ local_t a; ++} local64_t; ++ ++struct perf_event_attr { ++ __u32 type; ++ __u32 size; ++ __u64 config; ++ union { ++ __u64 sample_period; ++ __u64 sample_freq; ++ }; ++ __u64 sample_type; ++ __u64 read_format; ++ __u64 disabled: 1; ++ __u64 inherit: 1; ++ __u64 pinned: 1; ++ __u64 exclusive: 1; ++ __u64 exclude_user: 1; ++ __u64 exclude_kernel: 1; ++ __u64 exclude_hv: 1; ++ __u64 exclude_idle: 1; ++ __u64 mmap: 1; ++ __u64 comm: 1; ++ __u64 freq: 1; ++ __u64 inherit_stat: 1; ++ __u64 enable_on_exec: 1; ++ __u64 task: 1; ++ __u64 watermark: 1; ++ __u64 precise_ip: 2; ++ __u64 mmap_data: 1; ++ __u64 sample_id_all: 1; ++ __u64 exclude_host: 1; ++ __u64 exclude_guest: 1; ++ __u64 exclude_callchain_kernel: 1; ++ __u64 exclude_callchain_user: 1; ++ __u64 mmap2: 1; ++ __u64 comm_exec: 1; ++ __u64 use_clockid: 1; ++ __u64 context_switch: 1; ++ __u64 write_backward: 1; ++ __u64 namespaces: 1; ++ __u64 __reserved_1: 35; ++ union { ++ __u32 wakeup_events; ++ __u32 wakeup_watermark; ++ }; ++ __u32 bp_type; ++ union { ++ __u64 bp_addr; ++ __u64 kprobe_func; ++ __u64 uprobe_path; ++ __u64 config1; ++ }; ++ union { ++ __u64 bp_len; ++ __u64 kprobe_addr; ++ __u64 probe_offset; ++ __u64 config2; ++ }; ++ __u64 branch_sample_type; ++ __u64 sample_regs_user; ++ __u32 sample_stack_user; ++ __s32 clockid; ++ __u64 sample_regs_intr; ++ __u32 aux_watermark; ++ __u16 sample_max_stack; ++ __u16 __reserved_2; ++}; ++ ++struct hw_perf_event_extra { ++ u64 config; ++ unsigned int reg; ++ int alloc; ++ int idx; ++}; ++ ++struct hw_perf_event { ++ union { ++ struct { ++ u64 config; ++ u64 last_tag; ++ long unsigned int config_base; ++ long unsigned int event_base; ++ int event_base_rdpmc; ++ int idx; ++ int last_cpu; ++ int flags; ++ struct hw_perf_event_extra extra_reg; ++ struct hw_perf_event_extra branch_reg; ++ }; ++ struct { ++ struct hrtimer hrtimer; ++ }; ++ struct { ++ struct list_head tp_list; ++ }; ++ struct { ++ u64 pwr_acc; ++ u64 ptsc; ++ }; ++ struct { ++ struct arch_hw_breakpoint info; ++ struct list_head bp_list; ++ }; ++ struct { ++ u8 iommu_bank; ++ u8 iommu_cntr; ++ u16 padding; ++ u64 conf; ++ u64 conf1; ++ }; ++ }; ++ struct task_struct *target; ++ void *addr_filters; ++ long unsigned int addr_filters_gen; ++ int state; ++ local64_t prev_count; ++ u64 sample_period; ++ u64 last_period; ++ local64_t period_left; ++ u64 interrupts_seq; ++ u64 interrupts; ++ u64 freq_time_stamp; ++ u64 freq_count_stamp; ++}; ++ ++struct irq_work { ++ long unsigned int flags; ++ struct llist_node llnode; ++ void (*func)(struct irq_work *); ++}; ++ ++struct perf_addr_filters_head { ++ struct list_head list; ++ raw_spinlock_t lock; ++ unsigned int nr_file_filters; ++}; ++ ++struct perf_sample_data; ++ ++typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); ++ ++struct ftrace_ops; ++ ++typedef void (*ftrace_func_t)(long unsigned int, long unsigned int, struct ftrace_ops *, struct pt_regs *); ++ ++struct ftrace_hash; ++ ++struct ftrace_ops_hash { ++ struct ftrace_hash *notrace_hash; ++ struct ftrace_hash *filter_hash; ++ struct mutex regex_lock; ++}; ++ ++struct ftrace_ops { ++ ftrace_func_t func; ++ struct ftrace_ops *next; ++ long unsigned int flags; ++ void *private; ++ ftrace_func_t saved_func; ++ struct ftrace_ops_hash local_hash; ++ struct ftrace_ops_hash *func_hash; ++ struct ftrace_ops_hash old_hash; ++ long unsigned int trampoline; ++ long unsigned int trampoline_size; ++}; ++ ++struct pmu; ++ ++struct ring_buffer; ++ ++struct perf_addr_filter_range; ++ ++struct event_filter; ++ ++struct perf_cgroup; ++ ++struct perf_event { ++ struct list_head event_entry; ++ struct list_head sibling_list; ++ struct list_head active_list; ++ struct rb_node group_node; ++ u64 group_index; ++ struct list_head migrate_entry; ++ struct hlist_node hlist_entry; ++ struct list_head active_entry; ++ int nr_siblings; ++ int event_caps; ++ int group_caps; ++ struct perf_event *group_leader; ++ struct pmu *pmu; ++ void *pmu_private; ++ enum perf_event_state state; ++ unsigned int attach_state; ++ local64_t count; ++ atomic64_t child_count; ++ u64 total_time_enabled; ++ u64 total_time_running; ++ u64 tstamp; ++ u64 shadow_ctx_time; ++ struct perf_event_attr attr; ++ u16 header_size; ++ u16 id_header_size; ++ u16 read_size; ++ struct hw_perf_event hw; ++ struct perf_event_context *ctx; ++ atomic_long_t refcount; ++ atomic64_t child_total_time_enabled; ++ atomic64_t child_total_time_running; ++ struct mutex child_mutex; ++ struct list_head child_list; ++ struct perf_event *parent; ++ int oncpu; ++ int cpu; ++ struct list_head owner_entry; ++ struct task_struct *owner; ++ struct mutex mmap_mutex; ++ atomic_t mmap_count; ++ struct ring_buffer *rb; ++ struct list_head rb_entry; ++ long unsigned int rcu_batches; ++ int rcu_pending; ++ wait_queue_head_t waitq; ++ struct fasync_struct *fasync; ++ int pending_wakeup; ++ int pending_kill; ++ int pending_disable; ++ struct irq_work pending; ++ atomic_t event_limit; ++ struct perf_addr_filters_head addr_filters; ++ struct perf_addr_filter_range *addr_filter_ranges; ++ long unsigned int addr_filters_gen; ++ void (*destroy)(struct perf_event *); ++ struct callback_head callback_head; ++ struct pid_namespace *ns; ++ u64 id; ++ u64 (*clock)(); ++ perf_overflow_handler_t overflow_handler; ++ void *overflow_handler_context; ++ perf_overflow_handler_t orig_overflow_handler; ++ struct bpf_prog *prog; ++ struct trace_event_call *tp_event; ++ struct event_filter *filter; ++ struct ftrace_ops ftrace_ops; ++ struct perf_cgroup *cgrp; ++ struct list_head sb_list; ++}; ++ ++enum clock_event_state { ++ CLOCK_EVT_STATE_DETACHED = 0, ++ CLOCK_EVT_STATE_SHUTDOWN = 1, ++ CLOCK_EVT_STATE_PERIODIC = 2, ++ CLOCK_EVT_STATE_ONESHOT = 3, ++ CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, ++}; ++ ++struct clock_event_device { ++ void (*event_handler)(struct clock_event_device *); ++ int (*set_next_event)(long unsigned int, struct clock_event_device *); ++ int (*set_next_ktime)(ktime_t, struct clock_event_device *); ++ ktime_t next_event; ++ u64 max_delta_ns; ++ u64 min_delta_ns; ++ u32 mult; ++ u32 shift; ++ enum clock_event_state state_use_accessors; ++ unsigned int features; ++ long unsigned int retries; ++ int (*set_state_periodic)(struct clock_event_device *); ++ int (*set_state_oneshot)(struct clock_event_device *); ++ int (*set_state_oneshot_stopped)(struct clock_event_device *); ++ int (*set_state_shutdown)(struct clock_event_device *); ++ int (*tick_resume)(struct clock_event_device *); ++ void (*broadcast)(const struct cpumask *); ++ void (*suspend)(struct clock_event_device *); ++ void (*resume)(struct clock_event_device *); ++ long unsigned int min_delta_ticks; ++ long unsigned int max_delta_ticks; ++ const char *name; ++ int rating; ++ int irq; ++ int bound_on; ++ const struct cpumask *cpumask; ++ struct list_head list; ++ struct module *owner; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct blk_plug { ++ struct list_head list; ++ struct list_head mq_list; ++ struct list_head cb_list; ++}; ++ ++struct perf_event_groups { ++ struct rb_root tree; ++ u64 index; ++}; ++ ++struct perf_event_context { ++ struct pmu *pmu; ++ raw_spinlock_t lock; ++ struct mutex mutex; ++ struct list_head active_ctx_list; ++ struct perf_event_groups pinned_groups; ++ struct perf_event_groups flexible_groups; ++ struct list_head event_list; ++ struct list_head pinned_active; ++ struct list_head flexible_active; ++ int nr_events; ++ int nr_active; ++ int is_active; ++ int nr_stat; ++ int nr_freq; ++ int rotate_disable; ++ atomic_t refcount; ++ struct task_struct *task; ++ u64 time; ++ u64 timestamp; ++ struct perf_event_context *parent_ctx; ++ u64 parent_gen; ++ u64 generation; ++ int pin_count; ++ int nr_cgroups; ++ void *task_ctx_data; ++ struct callback_head callback_head; ++}; ++ ++struct mempolicy { ++ atomic_t refcnt; ++ short unsigned int mode; ++ short unsigned int flags; ++ union { ++ short int preferred_node; ++ nodemask_t nodes; ++ } v; ++ union { ++ nodemask_t cpuset_mems_allowed; ++ nodemask_t user_nodemask; ++ } w; ++}; ++ ++struct task_delay_info { ++ raw_spinlock_t lock; ++ unsigned int flags; ++ u64 blkio_start; ++ u64 blkio_delay; ++ u64 swapin_delay; ++ u32 blkio_count; ++ u32 swapin_count; ++ u64 freepages_start; ++ u64 freepages_delay; ++ u32 freepages_count; ++}; ++ ++struct ftrace_ret_stack { ++ long unsigned int ret; ++ long unsigned int func; ++ long long unsigned int calltime; ++}; ++ ++struct request_list { ++ struct request_queue *q; ++ struct blkcg_gq *blkg; ++ int count[2]; ++ int starved[2]; ++ mempool_t *rq_pool; ++ wait_queue_head_t wait[2]; ++ unsigned int flags; ++}; ++ ++typedef void request_fn_proc(struct request_queue *); ++ ++typedef unsigned int blk_qc_t; ++ ++typedef blk_qc_t make_request_fn(struct request_queue *, struct bio *); ++ ++typedef bool poll_q_fn(struct request_queue *, blk_qc_t); ++ ++struct request; ++ ++typedef int prep_rq_fn(struct request_queue *, struct request *); ++ ++typedef void unprep_rq_fn(struct request_queue *, struct request *); ++ ++typedef void softirq_done_fn(struct request *); ++ ++enum blk_eh_timer_return { ++ BLK_EH_DONE = 0, ++ BLK_EH_RESET_TIMER = 1, ++}; ++ ++typedef enum blk_eh_timer_return rq_timed_out_fn(struct request *); ++ ++typedef int dma_drain_needed_fn(struct request *); ++ ++typedef int lld_busy_fn(struct request_queue *); ++ ++typedef int init_rq_fn(struct request_queue *, struct request *, gfp_t); ++ ++typedef void exit_rq_fn(struct request_queue *, struct request *); ++ ++struct blk_integrity_profile; ++ ++struct blk_integrity { ++ const struct blk_integrity_profile *profile; ++ unsigned char flags; ++ unsigned char tuple_size; ++ unsigned char interval_exp; ++ unsigned char tag_size; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct blk_rq_stat { ++ u64 mean; ++ u64 min; ++ u64 max; ++ u32 nr_samples; ++ u64 batch; ++}; ++ ++enum blk_zoned_model { ++ BLK_ZONED_NONE = 0, ++ BLK_ZONED_HA = 1, ++ BLK_ZONED_HM = 2, ++}; ++ ++struct queue_limits { ++ long unsigned int bounce_pfn; ++ long unsigned int seg_boundary_mask; ++ long unsigned int virt_boundary_mask; ++ unsigned int max_hw_sectors; ++ unsigned int max_dev_sectors; ++ unsigned int chunk_sectors; ++ unsigned int max_sectors; ++ unsigned int max_segment_size; ++ unsigned int physical_block_size; ++ unsigned int alignment_offset; ++ unsigned int io_min; ++ unsigned int io_opt; ++ unsigned int max_discard_sectors; ++ unsigned int max_hw_discard_sectors; ++ unsigned int max_write_same_sectors; ++ unsigned int max_write_zeroes_sectors; ++ unsigned int discard_granularity; ++ unsigned int discard_alignment; ++ short unsigned int logical_block_size; ++ short unsigned int max_segments; ++ short unsigned int max_integrity_segments; ++ short unsigned int max_discard_segments; ++ unsigned char misaligned; ++ unsigned char discard_misaligned; ++ unsigned char cluster; ++ unsigned char raid_partial_stripes_expensive; ++ enum blk_zoned_model zoned; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct bsg_job; ++ ++typedef int bsg_job_fn(struct bsg_job *); ++ ++struct bsg_ops; ++ ++struct bsg_class_device { ++ struct device *class_dev; ++ int minor; ++ struct request_queue *queue; ++ const struct bsg_ops *ops; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct elevator_queue; ++ ++struct blk_queue_stats; ++ ++struct rq_qos; ++ ++struct blk_mq_ops; ++ ++struct blk_mq_ctx; ++ ++struct blk_mq_hw_ctx; ++ ++struct blk_queue_tag; ++ ++struct blk_stat_callback; ++ ++struct blk_trace; ++ ++struct blk_flush_queue; ++ ++struct throtl_data; ++ ++struct blk_mq_tag_set; ++ ++struct request_queue { ++ struct list_head queue_head; ++ struct request *last_merge; ++ struct elevator_queue *elevator; ++ int nr_rqs[2]; ++ int nr_rqs_elvpriv; ++ struct blk_queue_stats *stats; ++ struct rq_qos *rq_qos; ++ struct request_list root_rl; ++ request_fn_proc *request_fn; ++ make_request_fn *make_request_fn; ++ poll_q_fn *poll_fn; ++ prep_rq_fn *prep_rq_fn; ++ unprep_rq_fn *unprep_rq_fn; ++ softirq_done_fn *softirq_done_fn; ++ rq_timed_out_fn *rq_timed_out_fn; ++ dma_drain_needed_fn *dma_drain_needed; ++ lld_busy_fn *lld_busy_fn; ++ init_rq_fn *init_rq_fn; ++ exit_rq_fn *exit_rq_fn; ++ void (*initialize_rq_fn)(struct request *); ++ const struct blk_mq_ops *mq_ops; ++ unsigned int *mq_map; ++ struct blk_mq_ctx *queue_ctx; ++ unsigned int nr_queues; ++ unsigned int queue_depth; ++ struct blk_mq_hw_ctx **queue_hw_ctx; ++ unsigned int nr_hw_queues; ++ sector_t end_sector; ++ struct request *boundary_rq; ++ struct delayed_work delay_work; ++ struct backing_dev_info *backing_dev_info; ++ void *queuedata; ++ long unsigned int queue_flags; ++ atomic_t pm_only; ++ int id; ++ gfp_t bounce_gfp; ++ spinlock_t __queue_lock; ++ spinlock_t *queue_lock; ++ struct kobject kobj; ++ struct kobject *mq_kobj; ++ struct blk_integrity integrity; ++ struct device *dev; ++ int rpm_status; ++ unsigned int nr_pending; ++ long unsigned int nr_requests; ++ unsigned int nr_congestion_on; ++ unsigned int nr_congestion_off; ++ unsigned int nr_batching; ++ unsigned int dma_drain_size; ++ void *dma_drain_buffer; ++ unsigned int dma_pad_mask; ++ unsigned int dma_alignment; ++ struct blk_queue_tag *queue_tags; ++ unsigned int nr_sorted; ++ unsigned int in_flight[2]; ++ unsigned int request_fn_active; ++ unsigned int rq_timeout; ++ int poll_nsec; ++ struct blk_stat_callback *poll_cb; ++ struct blk_rq_stat poll_stat[16]; ++ struct timer_list timeout; ++ struct work_struct timeout_work; ++ struct list_head timeout_list; ++ struct list_head icq_list; ++ long unsigned int blkcg_pols[1]; ++ struct blkcg_gq *root_blkg; ++ struct list_head blkg_list; ++ struct queue_limits limits; ++ unsigned int nr_zones; ++ long unsigned int *seq_zones_bitmap; ++ long unsigned int *seq_zones_wlock; ++ unsigned int sg_timeout; ++ unsigned int sg_reserved_size; ++ int node; ++ struct blk_trace *blk_trace; ++ struct mutex blk_trace_mutex; ++ struct blk_flush_queue *fq; ++ struct list_head requeue_list; ++ spinlock_t requeue_lock; ++ struct delayed_work requeue_work; ++ struct mutex sysfs_lock; ++ struct list_head unused_hctx_list; ++ spinlock_t unused_hctx_lock; ++ int bypass_depth; ++ atomic_t mq_freeze_depth; ++ bsg_job_fn *bsg_job_fn; ++ struct bsg_class_device bsg_dev; ++ struct throtl_data *td; ++ struct callback_head callback_head; ++ wait_queue_head_t mq_freeze_wq; ++ struct percpu_ref q_usage_counter; ++ struct list_head all_q_node; ++ struct blk_mq_tag_set *tag_set; ++ struct list_head tag_set_list; ++ struct bio_set bio_split; ++ struct dentry *debugfs_dir; ++ struct dentry *sched_debugfs_dir; ++ bool mq_sysfs_init_done; ++ size_t cmd_size; ++ void *rq_alloc_data; ++ struct work_struct release_work; ++ u64 write_hints[5]; ++}; ++ ++struct rcu_segcblist { ++ struct callback_head *head; ++ struct callback_head **tails[4]; ++ long unsigned int gp_seq[4]; ++ long int len; ++ long int len_lazy; ++}; ++ ++struct srcu_node; ++ ++struct srcu_struct; ++ ++struct srcu_data { ++ long unsigned int srcu_lock_count[2]; ++ long unsigned int srcu_unlock_count[2]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t lock; ++ struct rcu_segcblist srcu_cblist; ++ long unsigned int srcu_gp_seq_needed; ++ long unsigned int srcu_gp_seq_needed_exp; ++ bool srcu_cblist_invoking; ++ struct delayed_work work; ++ struct callback_head srcu_barrier_head; ++ struct srcu_node *mynode; ++ long unsigned int grpmask; ++ int cpu; ++ struct srcu_struct *sp; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct srcu_node { ++ spinlock_t lock; ++ long unsigned int srcu_have_cbs[4]; ++ long unsigned int srcu_data_have_cbs[4]; ++ long unsigned int srcu_gp_seq_needed_exp; ++ struct srcu_node *srcu_parent; ++ int grplo; ++ int grphi; ++}; ++ ++struct srcu_struct { ++ struct srcu_node node[65]; ++ struct srcu_node *level[3]; ++ struct mutex srcu_cb_mutex; ++ spinlock_t lock; ++ struct mutex srcu_gp_mutex; ++ unsigned int srcu_idx; ++ long unsigned int srcu_gp_seq; ++ long unsigned int srcu_gp_seq_needed; ++ long unsigned int srcu_gp_seq_needed_exp; ++ long unsigned int srcu_last_gp_end; ++ struct srcu_data *sda; ++ long unsigned int srcu_barrier_seq; ++ struct mutex srcu_barrier_mutex; ++ struct completion srcu_barrier_completion; ++ atomic_t srcu_barrier_cpu_cnt; ++ struct delayed_work work; ++}; ++ ++enum umh_disable_depth { ++ UMH_ENABLED = 0, ++ UMH_FREEZING = 1, ++ UMH_DISABLED = 2, ++}; ++ ++struct trace_event_functions; ++ ++struct trace_event { ++ struct hlist_node node; ++ struct list_head list; ++ int type; ++ struct trace_event_functions *funcs; ++}; ++ ++struct trace_event_class; ++ ++struct trace_event_call { ++ struct list_head list; ++ struct trace_event_class *class; ++ union { ++ char *name; ++ struct tracepoint *tp; ++ }; ++ struct trace_event event; ++ char *print_fmt; ++ struct event_filter *filter; ++ void *mod; ++ void *data; ++ int flags; ++ int perf_refcount; ++ struct hlist_head *perf_events; ++ struct bpf_prog_array *prog_array; ++ int (*perf_perm)(struct trace_event_call *, struct perf_event *); ++}; ++ ++struct trace_eval_map { ++ const char *system; ++ const char *eval_string; ++ long unsigned int eval_value; ++}; ++ ++struct anon_vma { ++ struct anon_vma *root; ++ struct rw_semaphore rwsem; ++ atomic_t refcount; ++ unsigned int degree; ++ struct anon_vma *parent; ++ struct rb_root_cached rb_root; ++}; ++ ++struct linux_binprm; ++ ++struct coredump_params; ++ ++struct linux_binfmt { ++ struct list_head lh; ++ struct module *module; ++ int (*load_binary)(struct linux_binprm *); ++ int (*load_shlib)(struct file *); ++ int (*core_dump)(struct coredump_params *); ++ long unsigned int min_coredump; ++}; ++ ++struct partition_meta_info; ++ ++struct disk_stats; ++ ++struct hd_struct { ++ sector_t start_sect; ++ sector_t nr_sects; ++ seqcount_t nr_sects_seq; ++ sector_t alignment_offset; ++ unsigned int discard_alignment; ++ struct device __dev; ++ struct kobject *holder_dir; ++ int policy; ++ int partno; ++ struct partition_meta_info *info; ++ long unsigned int stamp; ++ atomic_t in_flight[2]; ++ struct disk_stats *dkstats; ++ struct percpu_ref ref; ++ struct gendisk *disk; ++ struct rcu_work rcu_work; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct disk_part_tbl; ++ ++struct block_device_operations; ++ ++struct timer_rand_state; ++ ++struct disk_events; ++ ++struct badblocks; ++ ++struct gendisk { ++ int major; ++ int first_minor; ++ int minors; ++ char disk_name[32]; ++ char * (*devnode)(struct gendisk *, umode_t *); ++ unsigned int events; ++ unsigned int async_events; ++ struct disk_part_tbl *part_tbl; ++ struct hd_struct part0; ++ const struct block_device_operations *fops; ++ struct request_queue *queue; ++ void *private_data; ++ int flags; ++ struct rw_semaphore lookup_sem; ++ struct kobject *slave_dir; ++ struct timer_rand_state *random; ++ atomic_t sync_io; ++ struct disk_events *ev; ++ struct kobject integrity_kobj; ++ int node_id; ++ struct badblocks *bb; ++ struct lockdep_map lockdep_map; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct linux_binprm { ++ char buf[256]; ++ struct vm_area_struct *vma; ++ long unsigned int vma_pages; ++ struct mm_struct *mm; ++ long unsigned int p; ++ unsigned int called_set_creds: 1; ++ unsigned int cap_elevated: 1; ++ unsigned int secureexec: 1; ++ unsigned int recursion_depth; ++ struct file *file; ++ struct cred *cred; ++ int unsafe; ++ unsigned int per_clear; ++ int argc; ++ int envc; ++ const char *filename; ++ const char *interp; ++ unsigned int interp_flags; ++ unsigned int interp_data; ++ long unsigned int loader; ++ long unsigned int exec; ++ struct rlimit rlim_stack; ++}; ++ ++struct coredump_params { ++ const siginfo_t *siginfo; ++ struct pt_regs *regs; ++ struct file *file; ++ long unsigned int limit; ++ long unsigned int mm_flags; ++ loff_t written; ++ loff_t pos; ++}; ++ ++struct stack_trace { ++ unsigned int nr_entries; ++ unsigned int max_entries; ++ long unsigned int *entries; ++ int skip; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++enum vm_event_item { ++ PGPGIN = 0, ++ PGPGOUT = 1, ++ PSWPIN = 2, ++ PSWPOUT = 3, ++ PGALLOC_DMA32 = 4, ++ PGALLOC_NORMAL = 5, ++ PGALLOC_MOVABLE = 6, ++ ALLOCSTALL_DMA32 = 7, ++ ALLOCSTALL_NORMAL = 8, ++ ALLOCSTALL_MOVABLE = 9, ++ PGSCAN_SKIP_DMA32 = 10, ++ PGSCAN_SKIP_NORMAL = 11, ++ PGSCAN_SKIP_MOVABLE = 12, ++ PGFREE = 13, ++ PGACTIVATE = 14, ++ PGDEACTIVATE = 15, ++ PGLAZYFREE = 16, ++ PGFAULT = 17, ++ PGMAJFAULT = 18, ++ PGLAZYFREED = 19, ++ PGREFILL = 20, ++ PGSTEAL_KSWAPD = 21, ++ PGSTEAL_DIRECT = 22, ++ PGSCAN_KSWAPD = 23, ++ PGSCAN_DIRECT = 24, ++ PGSCAN_DIRECT_THROTTLE = 25, ++ PGSCAN_ZONE_RECLAIM_FAILED = 26, ++ PGINODESTEAL = 27, ++ SLABS_SCANNED = 28, ++ KSWAPD_INODESTEAL = 29, ++ KSWAPD_LOW_WMARK_HIT_QUICKLY = 30, ++ KSWAPD_HIGH_WMARK_HIT_QUICKLY = 31, ++ PAGEOUTRUN = 32, ++ PGROTATED = 33, ++ DROP_PAGECACHE = 34, ++ DROP_SLAB = 35, ++ OOM_KILL = 36, ++ NUMA_PTE_UPDATES = 37, ++ NUMA_HUGE_PTE_UPDATES = 38, ++ NUMA_HINT_FAULTS = 39, ++ NUMA_HINT_FAULTS_LOCAL = 40, ++ NUMA_PAGE_MIGRATE = 41, ++ PGMIGRATE_SUCCESS = 42, ++ PGMIGRATE_FAIL = 43, ++ COMPACTMIGRATE_SCANNED = 44, ++ COMPACTFREE_SCANNED = 45, ++ COMPACTISOLATED = 46, ++ COMPACTSTALL = 47, ++ COMPACTFAIL = 48, ++ COMPACTSUCCESS = 49, ++ KCOMPACTD_WAKE = 50, ++ KCOMPACTD_MIGRATE_SCANNED = 51, ++ KCOMPACTD_FREE_SCANNED = 52, ++ HTLB_BUDDY_PGALLOC = 53, ++ HTLB_BUDDY_PGALLOC_FAIL = 54, ++ UNEVICTABLE_PGCULLED = 55, ++ UNEVICTABLE_PGSCANNED = 56, ++ UNEVICTABLE_PGRESCUED = 57, ++ UNEVICTABLE_PGMLOCKED = 58, ++ UNEVICTABLE_PGMUNLOCKED = 59, ++ UNEVICTABLE_PGCLEARED = 60, ++ UNEVICTABLE_PGSTRANDED = 61, ++ THP_FAULT_ALLOC = 62, ++ THP_FAULT_FALLBACK = 63, ++ THP_COLLAPSE_ALLOC = 64, ++ THP_COLLAPSE_ALLOC_FAILED = 65, ++ THP_FILE_ALLOC = 66, ++ THP_FILE_MAPPED = 67, ++ THP_SPLIT_PAGE = 68, ++ THP_SPLIT_PAGE_FAILED = 69, ++ THP_DEFERRED_SPLIT_PAGE = 70, ++ THP_SPLIT_PMD = 71, ++ THP_ZERO_PAGE_ALLOC = 72, ++ THP_ZERO_PAGE_ALLOC_FAILED = 73, ++ THP_SWPOUT = 74, ++ THP_SWPOUT_FALLBACK = 75, ++ BALLOON_INFLATE = 76, ++ BALLOON_DEFLATE = 77, ++ BALLOON_MIGRATE = 78, ++ SWAP_RA = 79, ++ SWAP_RA_HIT = 80, ++ NR_VM_EVENT_ITEMS = 81, ++}; ++ ++struct ring_buffer_event { ++ u32 type_len: 5; ++ u32 time_delta: 27; ++ u32 array[0]; ++}; ++ ++struct seq_buf { ++ char *buffer; ++ size_t size; ++ size_t len; ++ loff_t readpos; ++}; ++ ++struct trace_seq { ++ unsigned char buffer[65536]; ++ struct seq_buf seq; ++ int full; ++}; ++ ++union perf_mem_data_src { ++ __u64 val; ++ struct { ++ __u64 mem_op: 5; ++ __u64 mem_lvl: 14; ++ __u64 mem_snoop: 5; ++ __u64 mem_lock: 2; ++ __u64 mem_dtlb: 7; ++ __u64 mem_lvl_num: 4; ++ __u64 mem_remote: 1; ++ __u64 mem_snoopx: 2; ++ __u64 mem_rsvd: 24; ++ }; ++}; ++ ++struct perf_branch_entry { ++ __u64 from; ++ __u64 to; ++ __u64 mispred: 1; ++ __u64 predicted: 1; ++ __u64 in_tx: 1; ++ __u64 abort: 1; ++ __u64 cycles: 16; ++ __u64 type: 4; ++ __u64 reserved: 40; ++}; ++ ++struct perf_guest_info_callbacks { ++ int (*is_in_guest)(); ++ int (*is_user_mode)(); ++ long unsigned int (*get_guest_ip)(); ++}; ++ ++struct new_utsname { ++ char sysname[65]; ++ char nodename[65]; ++ char release[65]; ++ char version[65]; ++ char machine[65]; ++ char domainname[65]; ++}; ++ ++struct uts_namespace { ++ struct kref kref; ++ struct new_utsname name; ++ struct user_namespace *user_ns; ++ struct ucounts *ucounts; ++ struct ns_common ns; ++}; ++ ++enum ftrace_tracing_type_t { ++ FTRACE_TYPE_ENTER = 0, ++ FTRACE_TYPE_RETURN = 1, ++}; ++ ++enum ftrace_bug_type { ++ FTRACE_BUG_UNKNOWN = 0, ++ FTRACE_BUG_INIT = 1, ++ FTRACE_BUG_NOP = 2, ++ FTRACE_BUG_CALL = 3, ++ FTRACE_BUG_UPDATE = 4, ++}; ++ ++struct ftrace_graph_ent { ++ long unsigned int func; ++ int depth; ++} __attribute__((packed)); ++ ++struct ftrace_graph_ret { ++ long unsigned int func; ++ long unsigned int overrun; ++ long long unsigned int calltime; ++ long long unsigned int rettime; ++ int depth; ++} __attribute__((packed)); ++ ++typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); ++ ++typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); ++ ++typedef u32 phandle; ++ ++struct property; ++ ++struct device_node { ++ const char *name; ++ const char *type; ++ phandle phandle; ++ const char *full_name; ++ struct fwnode_handle fwnode; ++ struct property *properties; ++ struct property *deadprops; ++ struct device_node *parent; ++ struct device_node *child; ++ struct device_node *sibling; ++ struct kobject kobj; ++ long unsigned int _flags; ++ void *data; ++}; ++ ++enum cpuhp_state { ++ CPUHP_INVALID = 4294967295, ++ CPUHP_OFFLINE = 0, ++ CPUHP_CREATE_THREADS = 1, ++ CPUHP_PERF_PREPARE = 2, ++ CPUHP_PERF_X86_PREPARE = 3, ++ CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, ++ CPUHP_PERF_POWER = 5, ++ CPUHP_PERF_SUPERH = 6, ++ CPUHP_X86_HPET_DEAD = 7, ++ CPUHP_X86_APB_DEAD = 8, ++ CPUHP_X86_MCE_DEAD = 9, ++ CPUHP_VIRT_NET_DEAD = 10, ++ CPUHP_SLUB_DEAD = 11, ++ CPUHP_MM_WRITEBACK_DEAD = 12, ++ CPUHP_MM_VMSTAT_DEAD = 13, ++ CPUHP_SOFTIRQ_DEAD = 14, ++ CPUHP_NET_MVNETA_DEAD = 15, ++ CPUHP_CPUIDLE_DEAD = 16, ++ CPUHP_ARM64_FPSIMD_DEAD = 17, ++ CPUHP_ARM_OMAP_WAKE_DEAD = 18, ++ CPUHP_IRQ_POLL_DEAD = 19, ++ CPUHP_BLOCK_SOFTIRQ_DEAD = 20, ++ CPUHP_ACPI_CPUDRV_DEAD = 21, ++ CPUHP_S390_PFAULT_DEAD = 22, ++ CPUHP_BLK_MQ_DEAD = 23, ++ CPUHP_FS_BUFF_DEAD = 24, ++ CPUHP_PRINTK_DEAD = 25, ++ CPUHP_MM_MEMCQ_DEAD = 26, ++ CPUHP_PERCPU_CNT_DEAD = 27, ++ CPUHP_RADIX_DEAD = 28, ++ CPUHP_PAGE_ALLOC_DEAD = 29, ++ CPUHP_NET_DEV_DEAD = 30, ++ CPUHP_PCI_XGENE_DEAD = 31, ++ CPUHP_IOMMU_INTEL_DEAD = 32, ++ CPUHP_LUSTRE_CFS_DEAD = 33, ++ CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 34, ++ CPUHP_WORKQUEUE_PREP = 35, ++ CPUHP_POWER_NUMA_PREPARE = 36, ++ CPUHP_HRTIMERS_PREPARE = 37, ++ CPUHP_PROFILE_PREPARE = 38, ++ CPUHP_X2APIC_PREPARE = 39, ++ CPUHP_SMPCFD_PREPARE = 40, ++ CPUHP_RELAY_PREPARE = 41, ++ CPUHP_SLAB_PREPARE = 42, ++ CPUHP_MD_RAID5_PREPARE = 43, ++ CPUHP_RCUTREE_PREP = 44, ++ CPUHP_CPUIDLE_COUPLED_PREPARE = 45, ++ CPUHP_POWERPC_PMAC_PREPARE = 46, ++ CPUHP_POWERPC_MMU_CTX_PREPARE = 47, ++ CPUHP_XEN_PREPARE = 48, ++ CPUHP_XEN_EVTCHN_PREPARE = 49, ++ CPUHP_ARM_SHMOBILE_SCU_PREPARE = 50, ++ CPUHP_SH_SH3X_PREPARE = 51, ++ CPUHP_NET_FLOW_PREPARE = 52, ++ CPUHP_TOPOLOGY_PREPARE = 53, ++ CPUHP_NET_IUCV_PREPARE = 54, ++ CPUHP_ARM_BL_PREPARE = 55, ++ CPUHP_TRACE_RB_PREPARE = 56, ++ CPUHP_MM_ZS_PREPARE = 57, ++ CPUHP_MM_ZSWP_MEM_PREPARE = 58, ++ CPUHP_MM_ZSWP_POOL_PREPARE = 59, ++ CPUHP_KVM_PPC_BOOK3S_PREPARE = 60, ++ CPUHP_ZCOMP_PREPARE = 61, ++ CPUHP_TIMERS_PREPARE = 62, ++ CPUHP_MIPS_SOC_PREPARE = 63, ++ CPUHP_BP_PREPARE_DYN = 64, ++ CPUHP_BP_PREPARE_DYN_END = 84, ++ CPUHP_BRINGUP_CPU = 85, ++ CPUHP_AP_IDLE_DEAD = 86, ++ CPUHP_AP_OFFLINE = 87, ++ CPUHP_AP_SCHED_STARTING = 88, ++ CPUHP_AP_RCUTREE_DYING = 89, ++ CPUHP_AP_IRQ_GIC_STARTING = 90, ++ CPUHP_AP_IRQ_HIP04_STARTING = 91, ++ CPUHP_AP_IRQ_ARMADA_XP_STARTING = 92, ++ CPUHP_AP_IRQ_BCM2836_STARTING = 93, ++ CPUHP_AP_IRQ_MIPS_GIC_STARTING = 94, ++ CPUHP_AP_ARM_MVEBU_COHERENCY = 95, ++ CPUHP_AP_MICROCODE_LOADER = 96, ++ CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 97, ++ CPUHP_AP_PERF_X86_STARTING = 98, ++ CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 99, ++ CPUHP_AP_PERF_X86_CQM_STARTING = 100, ++ CPUHP_AP_PERF_X86_CSTATE_STARTING = 101, ++ CPUHP_AP_PERF_XTENSA_STARTING = 102, ++ CPUHP_AP_MIPS_OP_LOONGSON3_STARTING = 103, ++ CPUHP_AP_ARM_SDEI_STARTING = 104, ++ CPUHP_AP_ARM_VFP_STARTING = 105, ++ CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 106, ++ CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 107, ++ CPUHP_AP_PERF_ARM_ACPI_STARTING = 108, ++ CPUHP_AP_PERF_ARM_STARTING = 109, ++ CPUHP_AP_ARM_L2X0_STARTING = 110, ++ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 111, ++ CPUHP_AP_ARM_ARCH_TIMER_STARTING = 112, ++ CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 113, ++ CPUHP_AP_JCORE_TIMER_STARTING = 114, ++ CPUHP_AP_ARM_TWD_STARTING = 115, ++ CPUHP_AP_QCOM_TIMER_STARTING = 116, ++ CPUHP_AP_ARMADA_TIMER_STARTING = 117, ++ CPUHP_AP_MARCO_TIMER_STARTING = 118, ++ CPUHP_AP_MIPS_GIC_TIMER_STARTING = 119, ++ CPUHP_AP_ARC_TIMER_STARTING = 120, ++ CPUHP_AP_RISCV_TIMER_STARTING = 121, ++ CPUHP_AP_KVM_STARTING = 122, ++ CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING = 123, ++ CPUHP_AP_KVM_ARM_VGIC_STARTING = 124, ++ CPUHP_AP_KVM_ARM_TIMER_STARTING = 125, ++ CPUHP_AP_DUMMY_TIMER_STARTING = 126, ++ CPUHP_AP_ARM_XEN_STARTING = 127, ++ CPUHP_AP_ARM_CORESIGHT_STARTING = 128, ++ CPUHP_AP_ARM64_ISNDEP_STARTING = 129, ++ CPUHP_AP_SMPCFD_DYING = 130, ++ CPUHP_AP_X86_TBOOT_DYING = 131, ++ CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 132, ++ CPUHP_AP_ONLINE = 133, ++ CPUHP_TEARDOWN_CPU = 134, ++ CPUHP_AP_ONLINE_IDLE = 135, ++ CPUHP_AP_SMPBOOT_THREADS = 136, ++ CPUHP_AP_X86_VDSO_VMA_ONLINE = 137, ++ CPUHP_AP_IRQ_AFFINITY_ONLINE = 138, ++ CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 139, ++ CPUHP_AP_PERF_ONLINE = 140, ++ CPUHP_AP_PERF_X86_ONLINE = 141, ++ CPUHP_AP_PERF_X86_UNCORE_ONLINE = 142, ++ CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 143, ++ CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 144, ++ CPUHP_AP_PERF_X86_RAPL_ONLINE = 145, ++ CPUHP_AP_PERF_X86_CQM_ONLINE = 146, ++ CPUHP_AP_PERF_X86_CSTATE_ONLINE = 147, ++ CPUHP_AP_PERF_S390_CF_ONLINE = 148, ++ CPUHP_AP_PERF_S390_SF_ONLINE = 149, ++ CPUHP_AP_PERF_ARM_CCI_ONLINE = 150, ++ CPUHP_AP_PERF_ARM_CCN_ONLINE = 151, ++ CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 152, ++ CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 153, ++ CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 154, ++ CPUHP_AP_PERF_ARM_L2X0_ONLINE = 155, ++ CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 156, ++ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 157, ++ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 158, ++ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 159, ++ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 160, ++ CPUHP_AP_WATCHDOG_ONLINE = 161, ++ CPUHP_AP_WORKQUEUE_ONLINE = 162, ++ CPUHP_AP_RCUTREE_ONLINE = 163, ++ CPUHP_AP_BASE_CACHEINFO_ONLINE = 164, ++ CPUHP_AP_ONLINE_DYN = 165, ++ CPUHP_AP_ONLINE_DYN_END = 195, ++ CPUHP_AP_X86_HPET_ONLINE = 196, ++ CPUHP_AP_X86_KVM_CLK_ONLINE = 197, ++ CPUHP_AP_ACTIVE = 198, ++ CPUHP_ONLINE = 199, ++}; ++ ++struct perf_regs { ++ __u64 abi; ++ struct pt_regs *regs; ++}; ++ ++struct perf_callchain_entry { ++ __u64 nr; ++ __u64 ip[0]; ++}; ++ ++typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); ++ ++struct perf_raw_frag { ++ union { ++ struct perf_raw_frag *next; ++ long unsigned int pad; ++ }; ++ perf_copy_f copy; ++ void *data; ++ u32 size; ++} __attribute__((packed)); ++ ++struct perf_raw_record { ++ struct perf_raw_frag frag; ++ u32 size; ++}; ++ ++struct perf_branch_stack { ++ __u64 nr; ++ struct perf_branch_entry entries[0]; ++}; ++ ++struct perf_cpu_context; ++ ++struct pmu { ++ struct list_head entry; ++ struct module *module; ++ struct device *dev; ++ const struct attribute_group **attr_groups; ++ const char *name; ++ int type; ++ int capabilities; ++ int *pmu_disable_count; ++ struct perf_cpu_context *pmu_cpu_context; ++ atomic_t exclusive_cnt; ++ int task_ctx_nr; ++ int hrtimer_interval_ms; ++ unsigned int nr_addr_filters; ++ void (*pmu_enable)(struct pmu *); ++ void (*pmu_disable)(struct pmu *); ++ int (*event_init)(struct perf_event *); ++ void (*event_mapped)(struct perf_event *, struct mm_struct *); ++ void (*event_unmapped)(struct perf_event *, struct mm_struct *); ++ int (*add)(struct perf_event *, int); ++ void (*del)(struct perf_event *, int); ++ void (*start)(struct perf_event *, int); ++ void (*stop)(struct perf_event *, int); ++ void (*read)(struct perf_event *); ++ void (*start_txn)(struct pmu *, unsigned int); ++ int (*commit_txn)(struct pmu *); ++ void (*cancel_txn)(struct pmu *); ++ int (*event_idx)(struct perf_event *); ++ void (*sched_task)(struct perf_event_context *, bool); ++ size_t task_ctx_size; ++ void * (*setup_aux)(struct perf_event *, void **, int, bool); ++ void (*free_aux)(void *); ++ int (*addr_filters_validate)(struct list_head *); ++ void (*addr_filters_sync)(struct perf_event *); ++ int (*filter_match)(struct perf_event *); ++ int (*check_period)(struct perf_event *, u64); ++}; ++ ++struct perf_cpu_context { ++ struct perf_event_context ctx; ++ struct perf_event_context *task_ctx; ++ int active_oncpu; ++ int exclusive; ++ raw_spinlock_t hrtimer_lock; ++ struct hrtimer hrtimer; ++ ktime_t hrtimer_interval; ++ unsigned int hrtimer_active; ++ struct perf_cgroup *cgrp; ++ struct list_head cgrp_cpuctx_entry; ++ struct list_head sched_cb_entry; ++ int sched_cb_usage; ++ int online; ++}; ++ ++struct perf_addr_filter_range { ++ long unsigned int start; ++ long unsigned int size; ++}; ++ ++struct perf_sample_data { ++ u64 addr; ++ struct perf_raw_record *raw; ++ struct perf_branch_stack *br_stack; ++ u64 period; ++ u64 weight; ++ u64 txn; ++ union perf_mem_data_src data_src; ++ u64 type; ++ u64 ip; ++ struct { ++ u32 pid; ++ u32 tid; ++ } tid_entry; ++ u64 time; ++ u64 id; ++ u64 stream_id; ++ struct { ++ u32 cpu; ++ u32 reserved; ++ } cpu_entry; ++ struct perf_callchain_entry *callchain; ++ struct perf_regs regs_user; ++ struct pt_regs regs_user_copy; ++ struct perf_regs regs_intr; ++ u64 stack_user_size; ++ u64 phys_addr; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct perf_cgroup_info; ++ ++struct perf_cgroup { ++ struct cgroup_subsys_state css; ++ struct perf_cgroup_info *info; ++}; ++ ++struct perf_cgroup_info { ++ u64 time; ++ u64 timestamp; ++}; ++ ++struct trace_entry { ++ short unsigned int type; ++ unsigned char flags; ++ unsigned char preempt_count; ++ int pid; ++}; ++ ++struct trace_array; ++ ++struct tracer; ++ ++struct trace_buffer; ++ ++struct ring_buffer_iter; ++ ++struct trace_iterator { ++ struct trace_array *tr; ++ struct tracer *trace; ++ struct trace_buffer *trace_buffer; ++ void *private; ++ int cpu_file; ++ struct mutex mutex; ++ struct ring_buffer_iter **buffer_iter; ++ long unsigned int iter_flags; ++ struct trace_seq tmp_seq; ++ cpumask_var_t started; ++ bool snapshot; ++ struct trace_seq seq; ++ struct trace_entry *ent; ++ long unsigned int lost_events; ++ int leftover; ++ int ent_size; ++ int cpu; ++ u64 ts; ++ loff_t pos; ++ long int idx; ++}; ++ ++enum print_line_t { ++ TRACE_TYPE_PARTIAL_LINE = 0, ++ TRACE_TYPE_HANDLED = 1, ++ TRACE_TYPE_UNHANDLED = 2, ++ TRACE_TYPE_NO_CONSUME = 3, ++}; ++ ++typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); ++ ++struct trace_event_functions { ++ trace_print_func trace; ++ trace_print_func raw; ++ trace_print_func hex; ++ trace_print_func binary; ++}; ++ ++enum trace_reg { ++ TRACE_REG_REGISTER = 0, ++ TRACE_REG_UNREGISTER = 1, ++ TRACE_REG_PERF_REGISTER = 2, ++ TRACE_REG_PERF_UNREGISTER = 3, ++ TRACE_REG_PERF_OPEN = 4, ++ TRACE_REG_PERF_CLOSE = 5, ++ TRACE_REG_PERF_ADD = 6, ++ TRACE_REG_PERF_DEL = 7, ++}; ++ ++struct trace_event_class { ++ const char *system; ++ void *probe; ++ void *perf_probe; ++ int (*reg)(struct trace_event_call *, enum trace_reg, void *); ++ int (*define_fields)(struct trace_event_call *); ++ struct list_head * (*get_fields)(struct trace_event_call *); ++ struct list_head fields; ++ int (*raw_init)(struct trace_event_call *); ++}; ++ ++struct trace_event_file; ++ ++struct trace_event_buffer { ++ struct ring_buffer *buffer; ++ struct ring_buffer_event *event; ++ struct trace_event_file *trace_file; ++ void *entry; ++ long unsigned int flags; ++ int pc; ++}; ++ ++struct trace_subsystem_dir; ++ ++struct trace_event_file { ++ struct list_head list; ++ struct trace_event_call *event_call; ++ struct event_filter *filter; ++ struct dentry *dir; ++ struct trace_array *tr; ++ struct trace_subsystem_dir *system; ++ struct list_head triggers; ++ long unsigned int flags; ++ atomic_t sm_ref; ++ atomic_t tm_ref; ++}; ++ ++enum { ++ TRACE_EVENT_FL_FILTERED = 1, ++ TRACE_EVENT_FL_CAP_ANY = 2, ++ TRACE_EVENT_FL_NO_SET_FILTER = 4, ++ TRACE_EVENT_FL_IGNORE_ENABLE = 8, ++ TRACE_EVENT_FL_TRACEPOINT = 16, ++ TRACE_EVENT_FL_KPROBE = 32, ++ TRACE_EVENT_FL_UPROBE = 64, ++}; ++ ++enum { ++ EVENT_FILE_FL_ENABLED = 1, ++ EVENT_FILE_FL_RECORDED_CMD = 2, ++ EVENT_FILE_FL_RECORDED_TGID = 4, ++ EVENT_FILE_FL_FILTERED = 8, ++ EVENT_FILE_FL_NO_SET_FILTER = 16, ++ EVENT_FILE_FL_SOFT_MODE = 32, ++ EVENT_FILE_FL_SOFT_DISABLED = 64, ++ EVENT_FILE_FL_TRIGGER_MODE = 128, ++ EVENT_FILE_FL_TRIGGER_COND = 256, ++ EVENT_FILE_FL_PID_FILTER = 512, ++ EVENT_FILE_FL_WAS_ENABLED = 1024, ++}; ++ ++enum { ++ FILTER_OTHER = 0, ++ FILTER_STATIC_STRING = 1, ++ FILTER_DYN_STRING = 2, ++ FILTER_PTR_STRING = 3, ++ FILTER_TRACE_FN = 4, ++ FILTER_COMM = 5, ++ FILTER_CPU = 6, ++}; ++ ++enum memblock_flags { ++ MEMBLOCK_NONE = 0, ++ MEMBLOCK_HOTPLUG = 1, ++ MEMBLOCK_MIRROR = 2, ++ MEMBLOCK_NOMAP = 4, ++}; ++ ++struct memblock_region { ++ phys_addr_t base; ++ phys_addr_t size; ++ enum memblock_flags flags; ++ int nid; ++}; ++ ++struct memblock_type { ++ long unsigned int cnt; ++ long unsigned int max; ++ phys_addr_t total_size; ++ struct memblock_region *regions; ++ char *name; ++}; ++ ++struct memblock { ++ bool bottom_up; ++ phys_addr_t current_limit; ++ struct memblock_type memory; ++ struct memblock_type reserved; ++}; ++ ++struct acpi_table_header { ++ char signature[4]; ++ u32 length; ++ u8 revision; ++ u8 checksum; ++ char oem_id[6]; ++ char oem_table_id[8]; ++ u32 oem_revision; ++ char asl_compiler_id[4]; ++ u32 asl_compiler_revision; ++}; ++ ++struct acpi_generic_address { ++ u8 space_id; ++ u8 bit_width; ++ u8 bit_offset; ++ u8 access_width; ++ u64 address; ++} __attribute__((packed)); ++ ++struct acpi_table_fadt { ++ struct acpi_table_header header; ++ u32 facs; ++ u32 dsdt; ++ u8 model; ++ u8 preferred_profile; ++ u16 sci_interrupt; ++ u32 smi_command; ++ u8 acpi_enable; ++ u8 acpi_disable; ++ u8 s4_bios_request; ++ u8 pstate_control; ++ u32 pm1a_event_block; ++ u32 pm1b_event_block; ++ u32 pm1a_control_block; ++ u32 pm1b_control_block; ++ u32 pm2_control_block; ++ u32 pm_timer_block; ++ u32 gpe0_block; ++ u32 gpe1_block; ++ u8 pm1_event_length; ++ u8 pm1_control_length; ++ u8 pm2_control_length; ++ u8 pm_timer_length; ++ u8 gpe0_block_length; ++ u8 gpe1_block_length; ++ u8 gpe1_base; ++ u8 cst_control; ++ u16 c2_latency; ++ u16 c3_latency; ++ u16 flush_size; ++ u16 flush_stride; ++ u8 duty_offset; ++ u8 duty_width; ++ u8 day_alarm; ++ u8 month_alarm; ++ u8 century; ++ u16 boot_flags; ++ u8 reserved; ++ u32 flags; ++ struct acpi_generic_address reset_register; ++ u8 reset_value; ++ u16 arm_boot_flags; ++ u8 minor_revision; ++ u64 Xfacs; ++ u64 Xdsdt; ++ struct acpi_generic_address xpm1a_event_block; ++ struct acpi_generic_address xpm1b_event_block; ++ struct acpi_generic_address xpm1a_control_block; ++ struct acpi_generic_address xpm1b_control_block; ++ struct acpi_generic_address xpm2_control_block; ++ struct acpi_generic_address xpm_timer_block; ++ struct acpi_generic_address xgpe0_block; ++ struct acpi_generic_address xgpe1_block; ++ struct acpi_generic_address sleep_control; ++ struct acpi_generic_address sleep_status; ++ u64 hypervisor_id; ++} __attribute__((packed)); ++ ++enum reboot_mode { ++ REBOOT_COLD = 0, ++ REBOOT_WARM = 1, ++ REBOOT_HARD = 2, ++ REBOOT_SOFT = 3, ++ REBOOT_GPIO = 4, ++}; ++ ++enum reboot_type { ++ BOOT_TRIPLE = 116, ++ BOOT_KBD = 107, ++ BOOT_BIOS = 98, ++ BOOT_ACPI = 97, ++ BOOT_EFI = 101, ++ BOOT_CF9_FORCE = 112, ++ BOOT_CF9_SAFE = 113, ++}; ++ ++struct screen_info { ++ __u8 orig_x; ++ __u8 orig_y; ++ __u16 ext_mem_k; ++ __u16 orig_video_page; ++ __u8 orig_video_mode; ++ __u8 orig_video_cols; ++ __u8 flags; ++ __u8 unused2; ++ __u16 orig_video_ega_bx; ++ __u16 unused3; ++ __u8 orig_video_lines; ++ __u8 orig_video_isVGA; ++ __u16 orig_video_points; ++ __u16 lfb_width; ++ __u16 lfb_height; ++ __u16 lfb_depth; ++ __u32 lfb_base; ++ __u32 lfb_size; ++ __u16 cl_magic; ++ __u16 cl_offset; ++ __u16 lfb_linelength; ++ __u8 red_size; ++ __u8 red_pos; ++ __u8 green_size; ++ __u8 green_pos; ++ __u8 blue_size; ++ __u8 blue_pos; ++ __u8 rsvd_size; ++ __u8 rsvd_pos; ++ __u16 vesapm_seg; ++ __u16 vesapm_off; ++ __u16 pages; ++ __u16 vesa_attributes; ++ __u32 capabilities; ++ __u32 ext_lfb_base; ++ __u8 _reserved[2]; ++} __attribute__((packed)); ++ ++typedef long unsigned int efi_status_t; ++ ++typedef u8 efi_bool_t; ++ ++typedef u16 efi_char16_t; ++ ++typedef u64 efi_physical_addr_t; ++ ++typedef void *efi_handle_t; ++ ++typedef guid_t efi_guid_t; ++ ++typedef struct { ++ u64 signature; ++ u32 revision; ++ u32 headersize; ++ u32 crc32; ++ u32 reserved; ++} efi_table_hdr_t; ++ ++typedef struct { ++ u32 type; ++ u32 pad; ++ u64 phys_addr; ++ u64 virt_addr; ++ u64 num_pages; ++ u64 attribute; ++} efi_memory_desc_t; ++ ++typedef struct { ++ efi_guid_t guid; ++ u32 headersize; ++ u32 flags; ++ u32 imagesize; ++} efi_capsule_header_t; ++ ++typedef struct { ++ u16 year; ++ u8 month; ++ u8 day; ++ u8 hour; ++ u8 minute; ++ u8 second; ++ u8 pad1; ++ u32 nanosecond; ++ s16 timezone; ++ u8 daylight; ++ u8 pad2; ++} efi_time_t; ++ ++typedef struct { ++ u32 resolution; ++ u32 accuracy; ++ u8 sets_to_zero; ++} efi_time_cap_t; ++ ++typedef struct { ++ efi_table_hdr_t hdr; ++ void *raise_tpl; ++ void *restore_tpl; ++ efi_status_t (*allocate_pages)(int, int, long unsigned int, efi_physical_addr_t *); ++ efi_status_t (*free_pages)(efi_physical_addr_t, long unsigned int); ++ efi_status_t (*get_memory_map)(long unsigned int *, void *, long unsigned int *, long unsigned int *, u32 *); ++ efi_status_t (*allocate_pool)(int, long unsigned int, void **); ++ efi_status_t (*free_pool)(void *); ++ void *create_event; ++ void *set_timer; ++ void *wait_for_event; ++ void *signal_event; ++ void *close_event; ++ void *check_event; ++ void *install_protocol_interface; ++ void *reinstall_protocol_interface; ++ void *uninstall_protocol_interface; ++ efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); ++ void *__reserved; ++ void *register_protocol_notify; ++ efi_status_t (*locate_handle)(int, efi_guid_t *, void *, long unsigned int *, efi_handle_t *); ++ void *locate_device_path; ++ efi_status_t (*install_configuration_table)(efi_guid_t *, void *); ++ void *load_image; ++ void *start_image; ++ void *exit; ++ void *unload_image; ++ efi_status_t (*exit_boot_services)(efi_handle_t, long unsigned int); ++ void *get_next_monotonic_count; ++ void *stall; ++ void *set_watchdog_timer; ++ void *connect_controller; ++ void *disconnect_controller; ++ void *open_protocol; ++ void *close_protocol; ++ void *open_protocol_information; ++ void *protocols_per_handle; ++ void *locate_handle_buffer; ++ efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); ++ void *install_multiple_protocol_interfaces; ++ void *uninstall_multiple_protocol_interfaces; ++ void *calculate_crc32; ++ void *copy_mem; ++ void *set_mem; ++ void *create_event_ex; ++} efi_boot_services_t; ++ ++typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *); ++ ++typedef efi_status_t efi_set_time_t(efi_time_t *); ++ ++typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *); ++ ++typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *); ++ ++typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *); ++ ++typedef efi_status_t efi_get_next_variable_t(long unsigned int *, efi_char16_t *, efi_guid_t *); ++ ++typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *); ++ ++typedef efi_status_t efi_get_next_high_mono_count_t(u32 *); ++ ++typedef void efi_reset_system_t(int, efi_status_t, long unsigned int, efi_char16_t *); ++ ++typedef efi_status_t efi_set_virtual_address_map_t(long unsigned int, long unsigned int, u32, efi_memory_desc_t *); ++ ++typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *); ++ ++typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, long unsigned int, long unsigned int); ++ ++typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, long unsigned int, u64 *, int *); ++ ++typedef struct { ++ efi_table_hdr_t hdr; ++ efi_get_time_t *get_time; ++ efi_set_time_t *set_time; ++ efi_get_wakeup_time_t *get_wakeup_time; ++ efi_set_wakeup_time_t *set_wakeup_time; ++ efi_set_virtual_address_map_t *set_virtual_address_map; ++ void *convert_pointer; ++ efi_get_variable_t *get_variable; ++ efi_get_next_variable_t *get_next_variable; ++ efi_set_variable_t *set_variable; ++ efi_get_next_high_mono_count_t *get_next_high_mono_count; ++ efi_reset_system_t *reset_system; ++ efi_update_capsule_t *update_capsule; ++ efi_query_capsule_caps_t *query_capsule_caps; ++ efi_query_variable_info_t *query_variable_info; ++} efi_runtime_services_t; ++ ++typedef struct { ++ efi_table_hdr_t hdr; ++ long unsigned int fw_vendor; ++ u32 fw_revision; ++ long unsigned int con_in_handle; ++ long unsigned int con_in; ++ long unsigned int con_out_handle; ++ long unsigned int con_out; ++ long unsigned int stderr_handle; ++ long unsigned int stderr; ++ efi_runtime_services_t *runtime; ++ efi_boot_services_t *boottime; ++ long unsigned int nr_tables; ++ long unsigned int tables; ++} efi_system_table_t; ++ ++struct efi_memory_map { ++ phys_addr_t phys_map; ++ void *map; ++ void *map_end; ++ int nr_map; ++ long unsigned int desc_version; ++ long unsigned int desc_size; ++ bool late; ++}; ++ ++struct efi { ++ efi_system_table_t *systab; ++ unsigned int runtime_version; ++ long unsigned int mps; ++ long unsigned int acpi; ++ long unsigned int acpi20; ++ long unsigned int smbios; ++ long unsigned int smbios3; ++ long unsigned int sal_systab; ++ long unsigned int boot_info; ++ long unsigned int hcdp; ++ long unsigned int uga; ++ long unsigned int uv_systab; ++ long unsigned int fw_vendor; ++ long unsigned int runtime; ++ long unsigned int config_table; ++ long unsigned int esrt; ++ long unsigned int properties_table; ++ long unsigned int mem_attr_table; ++ long unsigned int rng_seed; ++ long unsigned int tpm_log; ++ long unsigned int mem_reserve; ++ efi_get_time_t *get_time; ++ efi_set_time_t *set_time; ++ efi_get_wakeup_time_t *get_wakeup_time; ++ efi_set_wakeup_time_t *set_wakeup_time; ++ efi_get_variable_t *get_variable; ++ efi_get_next_variable_t *get_next_variable; ++ efi_set_variable_t *set_variable; ++ efi_set_variable_t *set_variable_nonblocking; ++ efi_query_variable_info_t *query_variable_info; ++ efi_query_variable_info_t *query_variable_info_nonblocking; ++ efi_update_capsule_t *update_capsule; ++ efi_query_capsule_caps_t *query_capsule_caps; ++ efi_get_next_high_mono_count_t *get_next_high_mono_count; ++ efi_reset_system_t *reset_system; ++ efi_set_virtual_address_map_t *set_virtual_address_map; ++ struct efi_memory_map memmap; ++ long unsigned int flags; ++}; ++ ++enum psci_conduit { ++ PSCI_CONDUIT_NONE = 0, ++ PSCI_CONDUIT_SMC = 1, ++ PSCI_CONDUIT_HVC = 2, ++}; ++ ++enum smccc_version { ++ SMCCC_VERSION_1_0 = 0, ++ SMCCC_VERSION_1_1 = 1, ++}; ++ ++struct psci_operations { ++ u32 (*get_version)(); ++ int (*cpu_suspend)(u32, long unsigned int); ++ int (*cpu_off)(u32); ++ int (*cpu_on)(long unsigned int, long unsigned int); ++ int (*migrate)(long unsigned int); ++ int (*affinity_info)(long unsigned int, long unsigned int); ++ int (*migrate_info_type)(); ++ enum psci_conduit conduit; ++ enum smccc_version smccc_version; ++}; ++ ++struct mpidr_hash { ++ u64 mask; ++ u32 shift_aff[4]; ++ u32 bits; ++}; ++ ++enum acpi_irq_model_id { ++ ACPI_IRQ_MODEL_PIC = 0, ++ ACPI_IRQ_MODEL_IOAPIC = 1, ++ ACPI_IRQ_MODEL_IOSAPIC = 2, ++ ACPI_IRQ_MODEL_PLATFORM = 3, ++ ACPI_IRQ_MODEL_GIC = 4, ++ ACPI_IRQ_MODEL_COUNT = 5, ++}; ++ ++enum con_scroll { ++ SM_UP = 0, ++ SM_DOWN = 1, ++}; ++ ++struct vc_data; ++ ++struct console_font; ++ ++struct consw { ++ struct module *owner; ++ const char * (*con_startup)(); ++ void (*con_init)(struct vc_data *, int); ++ void (*con_deinit)(struct vc_data *); ++ void (*con_clear)(struct vc_data *, int, int, int, int); ++ void (*con_putc)(struct vc_data *, int, int, int); ++ void (*con_putcs)(struct vc_data *, const short unsigned int *, int, int, int); ++ void (*con_cursor)(struct vc_data *, int); ++ bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); ++ int (*con_switch)(struct vc_data *); ++ int (*con_blank)(struct vc_data *, int, int); ++ int (*con_font_set)(struct vc_data *, struct console_font *, unsigned int); ++ int (*con_font_get)(struct vc_data *, struct console_font *); ++ int (*con_font_default)(struct vc_data *, struct console_font *, char *); ++ int (*con_font_copy)(struct vc_data *, int); ++ int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); ++ void (*con_set_palette)(struct vc_data *, const unsigned char *); ++ void (*con_scrolldelta)(struct vc_data *, int); ++ int (*con_set_origin)(struct vc_data *); ++ void (*con_save_screen)(struct vc_data *); ++ u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); ++ void (*con_invert_region)(struct vc_data *, u16 *, int); ++ u16 * (*con_screen_pos)(struct vc_data *, int); ++ long unsigned int (*con_getxy)(struct vc_data *, long unsigned int, int *, int *); ++ void (*con_flush_scrollback)(struct vc_data *); ++ int (*con_debug_enter)(struct vc_data *); ++ int (*con_debug_leave)(struct vc_data *); ++}; ++ ++struct console { ++ char name[16]; ++ void (*write)(struct console *, const char *, unsigned int); ++ int (*read)(struct console *, char *, unsigned int); ++ struct tty_driver * (*device)(struct console *, int *); ++ void (*unblank)(); ++ int (*setup)(struct console *, char *); ++ int (*match)(struct console *, char *, int, char *); ++ short int flags; ++ short int index; ++ int cflag; ++ void *data; ++ struct console *next; ++}; ++ ++enum wb_stat_item { ++ WB_RECLAIMABLE = 0, ++ WB_WRITEBACK = 1, ++ WB_DIRTIED = 2, ++ WB_WRITTEN = 3, ++ NR_WB_STAT_ITEMS = 4, ++}; ++ ++enum stat_group { ++ STAT_READ = 0, ++ STAT_WRITE = 1, ++ STAT_DISCARD = 2, ++ NR_STAT_GROUPS = 3, ++}; ++ ++enum cpu_idle_type { ++ CPU_IDLE = 0, ++ CPU_NOT_IDLE = 1, ++ CPU_NEWLY_IDLE = 2, ++ CPU_MAX_IDLE_TYPES = 3, ++}; ++ ++struct property { ++ char *name; ++ int length; ++ void *value; ++ struct property *next; ++ long unsigned int _flags; ++ struct bin_attribute attr; ++}; ++ ++enum memcg_stat_item { ++ MEMCG_CACHE = 28, ++ MEMCG_RSS = 29, ++ MEMCG_RSS_HUGE = 30, ++ MEMCG_SWAP = 31, ++ MEMCG_SOCK = 32, ++ MEMCG_KERNEL_STACK_KB = 33, ++ MEMCG_NR_STAT = 34, ++}; ++ ++enum memcg_memory_event { ++ MEMCG_LOW = 0, ++ MEMCG_HIGH = 1, ++ MEMCG_MAX = 2, ++ MEMCG_OOM = 3, ++ MEMCG_OOM_KILL = 4, ++ MEMCG_SWAP_MAX = 5, ++ MEMCG_SWAP_FAIL = 6, ++ MEMCG_NR_MEMORY_EVENTS = 7, ++}; ++ ++enum mem_cgroup_events_target { ++ MEM_CGROUP_TARGET_THRESH = 0, ++ MEM_CGROUP_TARGET_SOFTLIMIT = 1, ++ MEM_CGROUP_TARGET_NUMAINFO = 2, ++ MEM_CGROUP_NTARGETS = 3, ++}; ++ ++struct disk_stats { ++ u64 nsecs[3]; ++ long unsigned int sectors[3]; ++ long unsigned int ios[3]; ++ long unsigned int merges[3]; ++ long unsigned int io_ticks; ++ long unsigned int time_in_queue; ++}; ++ ++struct partition_meta_info { ++ char uuid[37]; ++ u8 volname[64]; ++}; ++ ++struct disk_part_tbl { ++ struct callback_head callback_head; ++ int len; ++ struct hd_struct *last_lookup; ++ struct hd_struct *part[0]; ++}; ++ ++struct blk_integrity_iter; ++ ++typedef blk_status_t integrity_processing_fn(struct blk_integrity_iter *); ++ ++struct blk_integrity_profile { ++ integrity_processing_fn *generate_fn; ++ integrity_processing_fn *verify_fn; ++ const char *name; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct hd_geometry; ++ ++struct pr_ops; ++ ++struct block_device_operations { ++ int (*open)(struct block_device *, fmode_t); ++ void (*release)(struct gendisk *, fmode_t); ++ int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); ++ int (*ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int); ++ int (*compat_ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int); ++ unsigned int (*check_events)(struct gendisk *, unsigned int); ++ int (*media_changed)(struct gendisk *); ++ void (*unlock_native_capacity)(struct gendisk *); ++ int (*revalidate_disk)(struct gendisk *); ++ int (*getgeo)(struct block_device *, struct hd_geometry *); ++ void (*swap_slot_free_notify)(struct block_device *, long unsigned int); ++ struct module *owner; ++ const struct pr_ops *pr_ops; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct sg_io_v4 { ++ __s32 guard; ++ __u32 protocol; ++ __u32 subprotocol; ++ __u32 request_len; ++ __u64 request; ++ __u64 request_tag; ++ __u32 request_attr; ++ __u32 request_priority; ++ __u32 request_extra; ++ __u32 max_response_len; ++ __u64 response; ++ __u32 dout_iovec_count; ++ __u32 dout_xfer_len; ++ __u32 din_iovec_count; ++ __u32 din_xfer_len; ++ __u64 dout_xferp; ++ __u64 din_xferp; ++ __u32 timeout; ++ __u32 flags; ++ __u64 usr_ptr; ++ __u32 spare_in; ++ __u32 driver_status; ++ __u32 transport_status; ++ __u32 device_status; ++ __u32 retry_delay; ++ __u32 info; ++ __u32 duration; ++ __u32 response_len; ++ __s32 din_resid; ++ __s32 dout_resid; ++ __u64 generated_tag; ++ __u32 spare_out; ++ __u32 padding; ++}; ++ ++struct bsg_ops { ++ int (*check_proto)(struct sg_io_v4 *); ++ int (*fill_hdr)(struct request *, struct sg_io_v4 *, fmode_t); ++ int (*complete_rq)(struct request *, struct sg_io_v4 *); ++ void (*free_rq)(struct request *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++typedef __u32 req_flags_t; ++ ++typedef void rq_end_io_fn(struct request *, blk_status_t); ++ ++enum mq_rq_state { ++ MQ_RQ_IDLE = 0, ++ MQ_RQ_IN_FLIGHT = 1, ++ MQ_RQ_COMPLETE = 2, ++}; ++ ++struct request { ++ struct request_queue *q; ++ struct blk_mq_ctx *mq_ctx; ++ int cpu; ++ unsigned int cmd_flags; ++ req_flags_t rq_flags; ++ int internal_tag; ++ unsigned int __data_len; ++ int tag; ++ sector_t __sector; ++ struct bio *bio; ++ struct bio *biotail; ++ struct list_head queuelist; ++ union { ++ struct hlist_node hash; ++ struct list_head ipi_list; ++ }; ++ union { ++ struct rb_node rb_node; ++ struct bio_vec special_vec; ++ void *completion_data; ++ int error_count; ++ }; ++ union { ++ struct { ++ struct io_cq *icq; ++ void *priv[2]; ++ } elv; ++ struct { ++ unsigned int seq; ++ struct list_head list; ++ rq_end_io_fn *saved_end_io; ++ } flush; ++ }; ++ struct gendisk *rq_disk; ++ struct hd_struct *part; ++ u64 start_time_ns; ++ u64 io_start_time_ns; ++ short unsigned int wbt_flags; ++ short unsigned int nr_phys_segments; ++ short unsigned int nr_integrity_segments; ++ short unsigned int write_hint; ++ short unsigned int ioprio; ++ void *special; ++ unsigned int extra_len; ++ enum mq_rq_state state; ++ refcount_t ref; ++ unsigned int timeout; ++ long unsigned int __deadline; ++ struct list_head timeout_list; ++ union { ++ struct __call_single_data csd; ++ u64 fifo_time; ++ }; ++ rq_end_io_fn *end_io; ++ void *end_io_data; ++ struct request *next_rq; ++ struct request_list *rl; ++}; ++ ++enum elv_merge { ++ ELEVATOR_NO_MERGE = 0, ++ ELEVATOR_FRONT_MERGE = 1, ++ ELEVATOR_BACK_MERGE = 2, ++ ELEVATOR_DISCARD_MERGE = 3, ++}; ++ ++typedef enum elv_merge elevator_merge_fn(struct request_queue *, struct request **, struct bio *); ++ ++typedef void elevator_merge_req_fn(struct request_queue *, struct request *, struct request *); ++ ++typedef void elevator_merged_fn(struct request_queue *, struct request *, enum elv_merge); ++ ++typedef int elevator_allow_bio_merge_fn(struct request_queue *, struct request *, struct bio *); ++ ++typedef int elevator_allow_rq_merge_fn(struct request_queue *, struct request *, struct request *); ++ ++typedef void elevator_bio_merged_fn(struct request_queue *, struct request *, struct bio *); ++ ++typedef int elevator_dispatch_fn(struct request_queue *, int); ++ ++typedef void elevator_add_req_fn(struct request_queue *, struct request *); ++ ++typedef struct request *elevator_request_list_fn(struct request_queue *, struct request *); ++ ++typedef void elevator_completed_req_fn(struct request_queue *, struct request *); ++ ++typedef int elevator_may_queue_fn(struct request_queue *, unsigned int); ++ ++typedef void elevator_init_icq_fn(struct io_cq *); ++ ++typedef void elevator_exit_icq_fn(struct io_cq *); ++ ++typedef int elevator_set_req_fn(struct request_queue *, struct request *, struct bio *, gfp_t); ++ ++typedef void elevator_put_req_fn(struct request *); ++ ++typedef void elevator_activate_req_fn(struct request_queue *, struct request *); ++ ++typedef void elevator_deactivate_req_fn(struct request_queue *, struct request *); ++ ++struct elevator_type; ++ ++typedef int elevator_init_fn(struct request_queue *, struct elevator_type *); ++ ++typedef void elevator_exit_fn(struct elevator_queue *); ++ ++typedef void elevator_registered_fn(struct request_queue *); ++ ++struct elevator_ops { ++ elevator_merge_fn *elevator_merge_fn; ++ elevator_merged_fn *elevator_merged_fn; ++ elevator_merge_req_fn *elevator_merge_req_fn; ++ elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; ++ elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; ++ elevator_bio_merged_fn *elevator_bio_merged_fn; ++ elevator_dispatch_fn *elevator_dispatch_fn; ++ elevator_add_req_fn *elevator_add_req_fn; ++ elevator_activate_req_fn *elevator_activate_req_fn; ++ elevator_deactivate_req_fn *elevator_deactivate_req_fn; ++ elevator_completed_req_fn *elevator_completed_req_fn; ++ elevator_request_list_fn *elevator_former_req_fn; ++ elevator_request_list_fn *elevator_latter_req_fn; ++ elevator_init_icq_fn *elevator_init_icq_fn; ++ elevator_exit_icq_fn *elevator_exit_icq_fn; ++ elevator_set_req_fn *elevator_set_req_fn; ++ elevator_put_req_fn *elevator_put_req_fn; ++ elevator_may_queue_fn *elevator_may_queue_fn; ++ elevator_init_fn *elevator_init_fn; ++ elevator_exit_fn *elevator_exit_fn; ++ elevator_registered_fn *elevator_registered_fn; ++}; ++ ++struct blk_mq_alloc_data; ++ ++struct elevator_mq_ops { ++ int (*init_sched)(struct request_queue *, struct elevator_type *); ++ void (*exit_sched)(struct elevator_queue *); ++ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); ++ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); ++ void (*depth_updated)(struct blk_mq_hw_ctx *); ++ bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); ++ bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); ++ int (*request_merge)(struct request_queue *, struct request **, struct bio *); ++ void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); ++ void (*requests_merged)(struct request_queue *, struct request *, struct request *); ++ void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); ++ void (*prepare_request)(struct request *, struct bio *); ++ void (*finish_request)(struct request *); ++ void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); ++ struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); ++ bool (*has_work)(struct blk_mq_hw_ctx *); ++ void (*completed_request)(struct request *); ++ void (*started_request)(struct request *); ++ void (*requeue_request)(struct request *); ++ struct request * (*former_request)(struct request_queue *, struct request *); ++ struct request * (*next_request)(struct request_queue *, struct request *); ++ void (*init_icq)(struct io_cq *); ++ void (*exit_icq)(struct io_cq *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct elv_fs_entry; ++ ++struct blk_mq_debugfs_attr; ++ ++struct elevator_type { ++ struct kmem_cache *icq_cache; ++ union { ++ struct elevator_ops sq; ++ struct elevator_mq_ops mq; ++ } ops; ++ size_t icq_size; ++ size_t icq_align; ++ struct elv_fs_entry *elevator_attrs; ++ char elevator_name[16]; ++ const char *elevator_alias; ++ struct module *elevator_owner; ++ bool uses_mq; ++ const struct blk_mq_debugfs_attr *queue_debugfs_attrs; ++ const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; ++ char icq_cache_name[22]; ++ struct list_head list; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct elevator_queue { ++ struct elevator_type *type; ++ void *elevator_data; ++ struct kobject kobj; ++ struct mutex sysfs_lock; ++ unsigned int registered: 1; ++ unsigned int uses_mq: 1; ++ struct hlist_head hash[64]; ++}; ++ ++struct elv_fs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct elevator_queue *, char *); ++ ssize_t (*store)(struct elevator_queue *, const char *, size_t); ++}; ++ ++struct seq_file___2; ++ ++struct seq_operations___2; ++ ++struct blk_mq_debugfs_attr { ++ const char *name; ++ umode_t mode; ++ int (*show)(void *, struct seq_file___2 *); ++ ssize_t (*write)(void *, const char *, size_t, loff_t *); ++ const struct seq_operations___2 *seq_ops; ++}; ++ ++struct blk_queue_tag { ++ struct request **tag_index; ++ long unsigned int *tag_map; ++ int max_depth; ++ int real_max_depth; ++ atomic_t refcnt; ++ int alloc_policy; ++ int next_tag; ++}; ++ ++struct blk_mq_queue_data; ++ ++typedef blk_status_t queue_rq_fn(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); ++ ++typedef bool get_budget_fn(struct blk_mq_hw_ctx *); ++ ++typedef void put_budget_fn(struct blk_mq_hw_ctx *); ++ ++typedef enum blk_eh_timer_return timeout_fn(struct request *, bool); ++ ++typedef int poll_fn(struct blk_mq_hw_ctx *, unsigned int); ++ ++typedef int init_hctx_fn(struct blk_mq_hw_ctx *, void *, unsigned int); ++ ++typedef void exit_hctx_fn(struct blk_mq_hw_ctx *, unsigned int); ++ ++typedef int init_request_fn(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); ++ ++typedef void exit_request_fn(struct blk_mq_tag_set *, struct request *, unsigned int); ++ ++typedef void cleanup_rq_fn(struct request *); ++ ++typedef int map_queues_fn(struct blk_mq_tag_set *); ++ ++struct blk_mq_ops { ++ queue_rq_fn *queue_rq; ++ get_budget_fn *get_budget; ++ put_budget_fn *put_budget; ++ timeout_fn *timeout; ++ poll_fn *poll; ++ softirq_done_fn *complete; ++ init_hctx_fn *init_hctx; ++ exit_hctx_fn *exit_hctx; ++ init_request_fn *init_request; ++ exit_request_fn *exit_request; ++ void (*initialize_rq_fn)(struct request *); ++ cleanup_rq_fn *cleanup_rq; ++ map_queues_fn *map_queues; ++ void (*show_rq)(struct seq_file *, struct request *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct blk_integrity_iter { ++ void *prot_buf; ++ void *data_buf; ++ sector_t seed; ++ unsigned int data_size; ++ short unsigned int interval; ++ const char *disk_name; ++}; ++ ++enum pr_type { ++ PR_WRITE_EXCLUSIVE = 1, ++ PR_EXCLUSIVE_ACCESS = 2, ++ PR_WRITE_EXCLUSIVE_REG_ONLY = 3, ++ PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, ++ PR_WRITE_EXCLUSIVE_ALL_REGS = 5, ++ PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, ++}; ++ ++struct pr_ops { ++ int (*pr_register)(struct block_device *, u64, u64, u32); ++ int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); ++ int (*pr_release)(struct block_device *, u64, enum pr_type); ++ int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); ++ int (*pr_clear)(struct block_device *, u64); ++}; ++ ++struct trace_event_raw_initcall_level { ++ struct trace_entry ent; ++ u32 __data_loc_level; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_initcall_start { ++ struct trace_entry ent; ++ initcall_t func; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_initcall_finish { ++ struct trace_entry ent; ++ initcall_t func; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_initcall_level { ++ u32 level; ++}; ++ ++struct trace_event_data_offsets_initcall_start {}; ++ ++struct trace_event_data_offsets_initcall_finish {}; ++ ++struct blacklist_entry { ++ struct list_head next; ++ char *buf; ++}; ++ ++struct atomic_notifier_head___2; ++ ++struct rw_semaphore___2; ++ ++enum { ++ PROC_ROOT_INO = 1, ++ PROC_IPC_INIT_INO = 4026531839, ++ PROC_UTS_INIT_INO = 4026531838, ++ PROC_USER_INIT_INO = 4026531837, ++ PROC_PID_INIT_INO = 4026531836, ++ PROC_CGROUP_INIT_INO = 4026531835, ++}; ++ ++typedef u64 uint64_t; ++ ++struct fs_struct { ++ int users; ++ spinlock_t lock; ++ seqcount_t seq; ++ int umask; ++ int in_exec; ++ struct path root; ++ struct path pwd; ++}; ++ ++struct ida { ++ struct radix_tree_root ida_rt; ++}; ++ ++struct __kernel_sockaddr_storage { ++ __kernel_sa_family_t ss_family; ++ char __data[126]; ++}; ++ ++struct vfsmount { ++ struct dentry *mnt_root; ++ struct super_block *mnt_sb; ++ int mnt_flags; ++}; ++ ++struct posix_acl_entry { ++ short int e_tag; ++ short unsigned int e_perm; ++ union { ++ kuid_t e_uid; ++ kgid_t e_gid; ++ }; ++}; ++ ++struct posix_acl { ++ refcount_t a_refcount; ++ struct callback_head a_rcu; ++ unsigned int a_count; ++ struct posix_acl_entry a_entries[0]; ++}; ++ ++enum { ++ __PERCPU_REF_ATOMIC = 1, ++ __PERCPU_REF_DEAD = 2, ++ __PERCPU_REF_ATOMIC_DEAD = 3, ++ __PERCPU_REF_FLAG_BITS = 2, ++}; ++ ++enum pageflags { ++ PG_locked = 0, ++ PG_error = 1, ++ PG_referenced = 2, ++ PG_uptodate = 3, ++ PG_dirty = 4, ++ PG_lru = 5, ++ PG_active = 6, ++ PG_waiters = 7, ++ PG_slab = 8, ++ PG_owner_priv_1 = 9, ++ PG_arch_1 = 10, ++ PG_reserved = 11, ++ PG_private = 12, ++ PG_private_2 = 13, ++ PG_writeback = 14, ++ PG_head = 15, ++ PG_mappedtodisk = 16, ++ PG_reclaim = 17, ++ PG_swapbacked = 18, ++ PG_unevictable = 19, ++ PG_mlocked = 20, ++ PG_hwpoison = 21, ++ PG_young = 22, ++ PG_idle = 23, ++ PG_percpu_ref = 24, ++ __NR_PAGEFLAGS = 25, ++ PG_checked = 9, ++ PG_swapcache = 9, ++ PG_fscache = 13, ++ PG_pinned = 9, ++ PG_savepinned = 4, ++ PG_foreign = 9, ++ PG_slob_free = 12, ++ PG_double_map = 13, ++ PG_isolated = 17, ++}; ++ ++struct blkg_rwstat { ++ struct percpu_counter cpu_cnt[5]; ++ atomic64_t aux_cnt[5]; ++}; ++ ++struct blkcg; ++ ++struct blkg_policy_data; ++ ++struct blkcg_gq { ++ struct request_queue *q; ++ struct list_head q_node; ++ struct hlist_node blkcg_node; ++ struct blkcg *blkcg; ++ struct bdi_writeback_congested *wb_congested; ++ struct blkcg_gq *parent; ++ struct request_list rl; ++ atomic_t refcnt; ++ bool online; ++ struct blkg_rwstat stat_bytes; ++ struct blkg_rwstat stat_ios; ++ struct blkg_policy_data *pd[5]; ++ struct callback_head callback_head; ++ atomic_t use_delay; ++ atomic64_t delay_nsec; ++ atomic64_t delay_start; ++ u64 last_delay; ++ int last_use; ++}; ++ ++typedef int suspend_state_t; ++ ++enum suspend_stat_step { ++ SUSPEND_FREEZE = 1, ++ SUSPEND_PREPARE = 2, ++ SUSPEND_SUSPEND = 3, ++ SUSPEND_SUSPEND_LATE = 4, ++ SUSPEND_SUSPEND_NOIRQ = 5, ++ SUSPEND_RESUME_NOIRQ = 6, ++ SUSPEND_RESUME_EARLY = 7, ++ SUSPEND_RESUME = 8, ++}; ++ ++struct suspend_stats { ++ int success; ++ int fail; ++ int failed_freeze; ++ int failed_prepare; ++ int failed_suspend; ++ int failed_suspend_late; ++ int failed_suspend_noirq; ++ int failed_resume; ++ int failed_resume_early; ++ int failed_resume_noirq; ++ int last_failed_dev; ++ char failed_devs[80]; ++ int last_failed_errno; ++ int errno[2]; ++ int last_failed_step; ++ enum suspend_stat_step failed_steps[2]; ++}; ++ ++enum s2idle_states { ++ S2IDLE_STATE_NONE = 0, ++ S2IDLE_STATE_ENTER = 1, ++ S2IDLE_STATE_WAKE = 2, ++}; ++ ++struct pbe { ++ void *address; ++ void *orig_address; ++ struct pbe *next; ++}; ++ ++enum { ++ Root_NFS = 255, ++ Root_RAM0 = 1048576, ++ Root_RAM1 = 1048577, ++ Root_FD0 = 2097152, ++ Root_HDA1 = 3145729, ++ Root_HDA2 = 3145730, ++ Root_SDA1 = 8388609, ++ Root_SDA2 = 8388610, ++ Root_HDC1 = 23068673, ++ Root_SR0 = 11534336, ++}; ++ ++struct xdr_buf { ++ struct kvec head[1]; ++ struct kvec tail[1]; ++ struct page **pages; ++ unsigned int page_base; ++ unsigned int page_len; ++ unsigned int flags; ++ unsigned int buflen; ++ unsigned int len; ++}; ++ ++struct xdr_stream { ++ __be32 *p; ++ struct xdr_buf *buf; ++ __be32 *end; ++ struct kvec *iov; ++ struct kvec scratch; ++ struct page **page_ptr; ++ unsigned int nwords; ++}; ++ ++struct rpc_rqst; ++ ++typedef void (*kxdreproc_t)(struct rpc_rqst *, struct xdr_stream *, const void *); ++ ++struct rpc_xprt; ++ ++struct rpc_task; ++ ++struct rpc_cred; ++ ++struct rpc_rqst { ++ struct rpc_xprt *rq_xprt; ++ struct xdr_buf rq_snd_buf; ++ struct xdr_buf rq_rcv_buf; ++ struct rpc_task *rq_task; ++ struct rpc_cred *rq_cred; ++ __be32 rq_xid; ++ int rq_cong; ++ u32 rq_seqno; ++ int rq_enc_pages_num; ++ struct page **rq_enc_pages; ++ void (*rq_release_snd_buf)(struct rpc_rqst *); ++ struct list_head rq_list; ++ void *rq_buffer; ++ size_t rq_callsize; ++ void *rq_rbuffer; ++ size_t rq_rcvsize; ++ size_t rq_xmit_bytes_sent; ++ size_t rq_reply_bytes_recvd; ++ struct xdr_buf rq_private_buf; ++ long unsigned int rq_majortimeo; ++ long unsigned int rq_timeout; ++ ktime_t rq_rtt; ++ unsigned int rq_retries; ++ unsigned int rq_connect_cookie; ++ u32 rq_bytes_sent; ++ ktime_t rq_xtime; ++ int rq_ntrans; ++ struct list_head rq_bc_list; ++ long unsigned int rq_bc_pa_state; ++ struct list_head rq_bc_pa_list; ++}; ++ ++typedef int (*kxdrdproc_t)(struct rpc_rqst *, struct xdr_stream *, void *); ++ ++struct rpc_procinfo; ++ ++struct rpc_message { ++ const struct rpc_procinfo *rpc_proc; ++ void *rpc_argp; ++ void *rpc_resp; ++ struct rpc_cred *rpc_cred; ++}; ++ ++struct rpc_procinfo { ++ u32 p_proc; ++ kxdreproc_t p_encode; ++ kxdrdproc_t p_decode; ++ unsigned int p_arglen; ++ unsigned int p_replen; ++ unsigned int p_timer; ++ u32 p_statidx; ++ const char *p_name; ++}; ++ ++struct rpc_auth; ++ ++struct rpc_credops; ++ ++struct rpc_cred { ++ struct hlist_node cr_hash; ++ struct list_head cr_lru; ++ struct callback_head cr_rcu; ++ struct rpc_auth *cr_auth; ++ const struct rpc_credops *cr_ops; ++ long unsigned int cr_expire; ++ long unsigned int cr_flags; ++ atomic_t cr_count; ++ kuid_t cr_uid; ++}; ++ ++struct rpc_wait { ++ struct list_head list; ++ struct list_head links; ++ struct list_head timer_list; ++ long unsigned int expires; ++}; ++ ++struct rpc_wait_queue; ++ ++struct rpc_call_ops; ++ ++struct rpc_clnt; ++ ++struct rpc_task { ++ atomic_t tk_count; ++ int tk_status; ++ struct list_head tk_task; ++ void (*tk_callback)(struct rpc_task *); ++ void (*tk_action)(struct rpc_task *); ++ long unsigned int tk_timeout; ++ long unsigned int tk_runstate; ++ struct rpc_wait_queue *tk_waitqueue; ++ union { ++ struct work_struct tk_work; ++ struct rpc_wait tk_wait; ++ } u; ++ struct rpc_message tk_msg; ++ void *tk_calldata; ++ const struct rpc_call_ops *tk_ops; ++ struct rpc_clnt *tk_client; ++ struct rpc_xprt *tk_xprt; ++ struct rpc_rqst *tk_rqstp; ++ struct workqueue_struct *tk_workqueue; ++ ktime_t tk_start; ++ pid_t tk_owner; ++ short unsigned int tk_flags; ++ short unsigned int tk_timeouts; ++ short unsigned int tk_pid; ++ unsigned char tk_priority: 2; ++ unsigned char tk_garb_retry: 2; ++ unsigned char tk_cred_retry: 2; ++ unsigned char tk_rebind_retry: 2; ++}; ++ ++struct rpc_timer { ++ struct timer_list timer; ++ struct list_head list; ++ long unsigned int expires; ++}; ++ ++struct rpc_wait_queue { ++ spinlock_t lock; ++ struct list_head tasks[4]; ++ unsigned char maxpriority; ++ unsigned char priority; ++ unsigned char nr; ++ short unsigned int qlen; ++ struct rpc_timer timer_list; ++ const char *name; ++}; ++ ++struct rpc_call_ops { ++ void (*rpc_call_prepare)(struct rpc_task *, void *); ++ void (*rpc_call_done)(struct rpc_task *, void *); ++ void (*rpc_count_stats)(struct rpc_task *, void *); ++ void (*rpc_release)(void *); ++}; ++ ++struct rpc_iostats; ++ ++struct rpc_pipe_dir_head { ++ struct list_head pdh_entries; ++ struct dentry *pdh_dentry; ++}; ++ ++struct rpc_rtt { ++ long unsigned int timeo; ++ long unsigned int srtt[5]; ++ long unsigned int sdrtt[5]; ++ int ntimeouts[5]; ++}; ++ ++struct rpc_timeout { ++ long unsigned int to_initval; ++ long unsigned int to_maxval; ++ long unsigned int to_increment; ++ unsigned int to_retries; ++ unsigned char to_exponential; ++}; ++ ++struct rpc_xprt_switch; ++ ++struct rpc_xprt_iter_ops; ++ ++struct rpc_xprt_iter { ++ struct rpc_xprt_switch *xpi_xpswitch; ++ struct rpc_xprt *xpi_cursor; ++ const struct rpc_xprt_iter_ops *xpi_ops; ++}; ++ ++struct rpc_stat; ++ ++struct rpc_program; ++ ++struct rpc_clnt { ++ atomic_t cl_count; ++ unsigned int cl_clid; ++ struct list_head cl_clients; ++ struct list_head cl_tasks; ++ spinlock_t cl_lock; ++ struct rpc_xprt *cl_xprt; ++ const struct rpc_procinfo *cl_procinfo; ++ u32 cl_prog; ++ u32 cl_vers; ++ u32 cl_maxproc; ++ struct rpc_auth *cl_auth; ++ struct rpc_stat *cl_stats; ++ struct rpc_iostats *cl_metrics; ++ unsigned int cl_softrtry: 1; ++ unsigned int cl_discrtry: 1; ++ unsigned int cl_noretranstimeo: 1; ++ unsigned int cl_autobind: 1; ++ unsigned int cl_chatty: 1; ++ struct rpc_rtt *cl_rtt; ++ const struct rpc_timeout *cl_timeout; ++ atomic_t cl_swapper; ++ int cl_nodelen; ++ char cl_nodename[65]; ++ struct rpc_pipe_dir_head cl_pipedir_objects; ++ struct rpc_clnt *cl_parent; ++ struct rpc_rtt cl_rtt_default; ++ struct rpc_timeout cl_timeout_default; ++ const struct rpc_program *cl_program; ++ struct dentry *cl_debugfs; ++ struct rpc_xprt_iter cl_xpi; ++}; ++ ++struct svc_xprt; ++ ++struct svc_serv; ++ ++struct rpc_xprt_ops; ++ ++struct rpc_xprt { ++ struct kref kref; ++ const struct rpc_xprt_ops *ops; ++ const struct rpc_timeout *timeout; ++ struct __kernel_sockaddr_storage addr; ++ size_t addrlen; ++ int prot; ++ long unsigned int cong; ++ long unsigned int cwnd; ++ size_t max_payload; ++ unsigned int tsh_size; ++ struct rpc_wait_queue binding; ++ struct rpc_wait_queue sending; ++ struct rpc_wait_queue pending; ++ struct rpc_wait_queue backlog; ++ struct list_head free; ++ unsigned int max_reqs; ++ unsigned int min_reqs; ++ unsigned int num_reqs; ++ long unsigned int state; ++ unsigned char resvport: 1; ++ atomic_t swapper; ++ unsigned int bind_index; ++ struct list_head xprt_switch; ++ long unsigned int bind_timeout; ++ long unsigned int reestablish_timeout; ++ unsigned int connect_cookie; ++ struct work_struct task_cleanup; ++ struct timer_list timer; ++ long unsigned int last_used; ++ long unsigned int idle_timeout; ++ long unsigned int connect_timeout; ++ long unsigned int max_reconnect_timeout; ++ spinlock_t transport_lock; ++ spinlock_t reserve_lock; ++ spinlock_t recv_lock; ++ u32 xid; ++ struct rpc_task *snd_task; ++ struct svc_xprt *bc_xprt; ++ struct svc_serv *bc_serv; ++ int bc_alloc_count; ++ atomic_t bc_free_slots; ++ spinlock_t bc_pa_lock; ++ struct list_head bc_pa_list; ++ struct list_head recv; ++ struct { ++ long unsigned int bind_count; ++ long unsigned int connect_count; ++ long unsigned int connect_start; ++ long unsigned int connect_time; ++ long unsigned int sends; ++ long unsigned int recvs; ++ long unsigned int bad_xids; ++ long unsigned int max_slots; ++ long long unsigned int req_u; ++ long long unsigned int bklog_u; ++ long long unsigned int sending_u; ++ long long unsigned int pending_u; ++ } stat; ++ struct net *xprt_net; ++ const char *servername; ++ const char *address_strings[6]; ++ struct dentry *debugfs; ++ atomic_t inject_disconnect; ++ struct callback_head rcu; ++}; ++ ++typedef u32 rpc_authflavor_t; ++ ++struct auth_cred { ++ kuid_t uid; ++ kgid_t gid; ++ struct group_info *group_info; ++ const char *principal; ++ long unsigned int ac_flags; ++ unsigned char machine_cred: 1; ++}; ++ ++struct rpc_cred_cache; ++ ++struct rpc_authops; ++ ++struct rpc_auth { ++ unsigned int au_cslack; ++ unsigned int au_rslack; ++ unsigned int au_verfsize; ++ unsigned int au_flags; ++ const struct rpc_authops *au_ops; ++ rpc_authflavor_t au_flavor; ++ atomic_t au_count; ++ struct rpc_cred_cache *au_credcache; ++}; ++ ++struct rpc_credops { ++ const char *cr_name; ++ int (*cr_init)(struct rpc_auth *, struct rpc_cred *); ++ void (*crdestroy)(struct rpc_cred *); ++ int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); ++ struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int); ++ __be32 * (*crmarshal)(struct rpc_task *, __be32 *); ++ int (*crrefresh)(struct rpc_task *); ++ __be32 * (*crvalidate)(struct rpc_task *, __be32 *); ++ int (*crwrap_req)(struct rpc_task *, kxdreproc_t, void *, __be32 *, void *); ++ int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, void *, __be32 *, void *); ++ int (*crkey_timeout)(struct rpc_cred *); ++ bool (*crkey_to_expire)(struct rpc_cred *); ++ char * (*crstringify_acceptor)(struct rpc_cred *); ++}; ++ ++struct rpc_auth_create_args; ++ ++struct rpcsec_gss_info; ++ ++struct rpc_authops { ++ struct module *owner; ++ rpc_authflavor_t au_flavor; ++ char *au_name; ++ struct rpc_auth * (*create)(const struct rpc_auth_create_args *, struct rpc_clnt *); ++ void (*destroy)(struct rpc_auth *); ++ int (*hash_cred)(struct auth_cred *, unsigned int); ++ struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); ++ struct rpc_cred * (*crcreate)(struct rpc_auth *, struct auth_cred *, int, gfp_t); ++ int (*list_pseudoflavors)(rpc_authflavor_t *, int); ++ rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); ++ int (*flavor2info)(rpc_authflavor_t, struct rpcsec_gss_info *); ++ int (*key_timeout)(struct rpc_auth *, struct rpc_cred *); ++}; ++ ++struct rpc_auth_create_args { ++ rpc_authflavor_t pseudoflavor; ++ const char *target_name; ++}; ++ ++struct rpcsec_gss_oid { ++ unsigned int len; ++ u8 data[32]; ++}; ++ ++struct rpcsec_gss_info { ++ struct rpcsec_gss_oid oid; ++ u32 qop; ++ u32 service; ++}; ++ ++struct rpc_xprt_ops { ++ void (*set_buffer_size)(struct rpc_xprt *, size_t, size_t); ++ int (*reserve_xprt)(struct rpc_xprt *, struct rpc_task *); ++ void (*release_xprt)(struct rpc_xprt *, struct rpc_task *); ++ void (*alloc_slot)(struct rpc_xprt *, struct rpc_task *); ++ void (*free_slot)(struct rpc_xprt *, struct rpc_rqst *); ++ void (*rpcbind)(struct rpc_task *); ++ void (*set_port)(struct rpc_xprt *, short unsigned int); ++ void (*connect)(struct rpc_xprt *, struct rpc_task *); ++ int (*buf_alloc)(struct rpc_task *); ++ void (*buf_free)(struct rpc_task *); ++ int (*send_request)(struct rpc_task *); ++ void (*set_retrans_timeout)(struct rpc_task *); ++ void (*timer)(struct rpc_xprt *, struct rpc_task *); ++ void (*release_request)(struct rpc_task *); ++ void (*close)(struct rpc_xprt *); ++ void (*destroy)(struct rpc_xprt *); ++ void (*set_connect_timeout)(struct rpc_xprt *, long unsigned int, long unsigned int); ++ void (*print_stats)(struct rpc_xprt *, struct seq_file *); ++ int (*enable_swap)(struct rpc_xprt *); ++ void (*disable_swap)(struct rpc_xprt *); ++ void (*inject_disconnect)(struct rpc_xprt *); ++ int (*bc_setup)(struct rpc_xprt *, unsigned int); ++ int (*bc_up)(struct svc_serv *, struct net *); ++ size_t (*bc_maxpayload)(struct rpc_xprt *); ++ void (*bc_free_rqst)(struct rpc_rqst *); ++ void (*bc_destroy)(struct rpc_xprt *, unsigned int); ++}; ++ ++struct rpc_xprt_switch { ++ spinlock_t xps_lock; ++ struct kref xps_kref; ++ unsigned int xps_nxprts; ++ struct list_head xps_xprt_list; ++ struct net *xps_net; ++ const struct rpc_xprt_iter_ops *xps_iter_ops; ++ struct callback_head xps_rcu; ++}; ++ ++struct rpc_stat { ++ const struct rpc_program *program; ++ unsigned int netcnt; ++ unsigned int netudpcnt; ++ unsigned int nettcpcnt; ++ unsigned int nettcpconn; ++ unsigned int netreconn; ++ unsigned int rpccnt; ++ unsigned int rpcretrans; ++ unsigned int rpcauthrefresh; ++ unsigned int rpcgarbage; ++}; ++ ++struct rpc_version; ++ ++struct rpc_program { ++ const char *name; ++ u32 number; ++ unsigned int nrvers; ++ const struct rpc_version **version; ++ struct rpc_stat *stats; ++ const char *pipe_dir_name; ++}; ++ ++struct rpc_xprt_iter_ops { ++ void (*xpi_rewind)(struct rpc_xprt_iter *); ++ struct rpc_xprt * (*xpi_xprt)(struct rpc_xprt_iter *); ++ struct rpc_xprt * (*xpi_next)(struct rpc_xprt_iter *); ++}; ++ ++struct rpc_version { ++ u32 number; ++ unsigned int nrprocs; ++ const struct rpc_procinfo *procs; ++ unsigned int *counts; ++}; ++ ++struct nfs_fh { ++ short unsigned int size; ++ unsigned char data[128]; ++}; ++ ++enum nfs3_stable_how { ++ NFS_UNSTABLE = 0, ++ NFS_DATA_SYNC = 1, ++ NFS_FILE_SYNC = 2, ++ NFS_INVALID_STABLE_HOW = 4294967295, ++}; ++ ++struct nfs4_label { ++ uint32_t lfs; ++ uint32_t pi; ++ u32 len; ++ char *label; ++}; ++ ++typedef struct { ++ char data[8]; ++} nfs4_verifier; ++ ++struct nfs4_stateid_struct { ++ union { ++ char data[16]; ++ struct { ++ __be32 seqid; ++ char other[12]; ++ }; ++ }; ++ enum { ++ NFS4_INVALID_STATEID_TYPE = 0, ++ NFS4_SPECIAL_STATEID_TYPE = 1, ++ NFS4_OPEN_STATEID_TYPE = 2, ++ NFS4_LOCK_STATEID_TYPE = 3, ++ NFS4_DELEGATION_STATEID_TYPE = 4, ++ NFS4_LAYOUT_STATEID_TYPE = 5, ++ NFS4_PNFS_DS_STATEID_TYPE = 6, ++ NFS4_REVOKED_STATEID_TYPE = 7, ++ } type; ++}; ++ ++typedef struct nfs4_stateid_struct nfs4_stateid; ++ ++enum nfs_opnum4 { ++ OP_ACCESS = 3, ++ OP_CLOSE = 4, ++ OP_COMMIT = 5, ++ OP_CREATE = 6, ++ OP_DELEGPURGE = 7, ++ OP_DELEGRETURN = 8, ++ OP_GETATTR = 9, ++ OP_GETFH = 10, ++ OP_LINK = 11, ++ OP_LOCK = 12, ++ OP_LOCKT = 13, ++ OP_LOCKU = 14, ++ OP_LOOKUP = 15, ++ OP_LOOKUPP = 16, ++ OP_NVERIFY = 17, ++ OP_OPEN = 18, ++ OP_OPENATTR = 19, ++ OP_OPEN_CONFIRM = 20, ++ OP_OPEN_DOWNGRADE = 21, ++ OP_PUTFH = 22, ++ OP_PUTPUBFH = 23, ++ OP_PUTROOTFH = 24, ++ OP_READ = 25, ++ OP_READDIR = 26, ++ OP_READLINK = 27, ++ OP_REMOVE = 28, ++ OP_RENAME = 29, ++ OP_RENEW = 30, ++ OP_RESTOREFH = 31, ++ OP_SAVEFH = 32, ++ OP_SECINFO = 33, ++ OP_SETATTR = 34, ++ OP_SETCLIENTID = 35, ++ OP_SETCLIENTID_CONFIRM = 36, ++ OP_VERIFY = 37, ++ OP_WRITE = 38, ++ OP_RELEASE_LOCKOWNER = 39, ++ OP_BACKCHANNEL_CTL = 40, ++ OP_BIND_CONN_TO_SESSION = 41, ++ OP_EXCHANGE_ID = 42, ++ OP_CREATE_SESSION = 43, ++ OP_DESTROY_SESSION = 44, ++ OP_FREE_STATEID = 45, ++ OP_GET_DIR_DELEGATION = 46, ++ OP_GETDEVICEINFO = 47, ++ OP_GETDEVICELIST = 48, ++ OP_LAYOUTCOMMIT = 49, ++ OP_LAYOUTGET = 50, ++ OP_LAYOUTRETURN = 51, ++ OP_SECINFO_NO_NAME = 52, ++ OP_SEQUENCE = 53, ++ OP_SET_SSV = 54, ++ OP_TEST_STATEID = 55, ++ OP_WANT_DELEGATION = 56, ++ OP_DESTROY_CLIENTID = 57, ++ OP_RECLAIM_COMPLETE = 58, ++ OP_ALLOCATE = 59, ++ OP_COPY = 60, ++ OP_COPY_NOTIFY = 61, ++ OP_DEALLOCATE = 62, ++ OP_IO_ADVISE = 63, ++ OP_LAYOUTERROR = 64, ++ OP_LAYOUTSTATS = 65, ++ OP_OFFLOAD_CANCEL = 66, ++ OP_OFFLOAD_STATUS = 67, ++ OP_READ_PLUS = 68, ++ OP_SEEK = 69, ++ OP_WRITE_SAME = 70, ++ OP_CLONE = 71, ++ OP_ILLEGAL = 10044, ++}; ++ ++struct nfs4_string { ++ unsigned int len; ++ char *data; ++}; ++ ++struct nfs_fsid { ++ uint64_t major; ++ uint64_t minor; ++}; ++ ++struct nfs4_threshold { ++ __u32 bm; ++ __u32 l_type; ++ __u64 rd_sz; ++ __u64 wr_sz; ++ __u64 rd_io_sz; ++ __u64 wr_io_sz; ++}; ++ ++struct nfs_fattr { ++ unsigned int valid; ++ umode_t mode; ++ __u32 nlink; ++ kuid_t uid; ++ kgid_t gid; ++ dev_t rdev; ++ __u64 size; ++ union { ++ struct { ++ __u32 blocksize; ++ __u32 blocks; ++ } nfs2; ++ struct { ++ __u64 used; ++ } nfs3; ++ } du; ++ struct nfs_fsid fsid; ++ __u64 fileid; ++ __u64 mounted_on_fileid; ++ struct timespec atime; ++ struct timespec mtime; ++ struct timespec ctime; ++ __u64 change_attr; ++ __u64 pre_change_attr; ++ __u64 pre_size; ++ struct timespec pre_mtime; ++ struct timespec pre_ctime; ++ long unsigned int time_start; ++ long unsigned int gencount; ++ struct nfs4_string *owner_name; ++ struct nfs4_string *group_name; ++ struct nfs4_threshold *mdsthreshold; ++}; ++ ++struct nfs_fsinfo { ++ struct nfs_fattr *fattr; ++ __u32 rtmax; ++ __u32 rtpref; ++ __u32 rtmult; ++ __u32 wtmax; ++ __u32 wtpref; ++ __u32 wtmult; ++ __u32 dtpref; ++ __u64 maxfilesize; ++ struct timespec time_delta; ++ __u32 lease_time; ++ __u32 nlayouttypes; ++ __u32 layouttype[8]; ++ __u32 blksize; ++ __u32 clone_blksize; ++}; ++ ++struct nfs_fsstat { ++ struct nfs_fattr *fattr; ++ __u64 tbytes; ++ __u64 fbytes; ++ __u64 abytes; ++ __u64 tfiles; ++ __u64 ffiles; ++ __u64 afiles; ++}; ++ ++struct nfs_pathconf { ++ struct nfs_fattr *fattr; ++ __u32 max_link; ++ __u32 max_namelen; ++}; ++ ++struct nfs4_change_info { ++ u32 atomic; ++ u64 before; ++ u64 after; ++}; ++ ++struct nfs4_slot; ++ ++struct nfs4_sequence_args { ++ struct nfs4_slot *sa_slot; ++ u8 sa_cache_this: 1; ++ u8 sa_privileged: 1; ++}; ++ ++struct nfs4_sequence_res { ++ struct nfs4_slot *sr_slot; ++ long unsigned int sr_timestamp; ++ int sr_status; ++ u32 sr_status_flags; ++ u32 sr_highest_slotid; ++ u32 sr_target_highest_slotid; ++}; ++ ++struct nfs_open_context; ++ ++struct nfs_lock_context { ++ refcount_t count; ++ struct list_head list; ++ struct nfs_open_context *open_context; ++ fl_owner_t lockowner; ++ atomic_t io_count; ++}; ++ ++struct nfs4_state; ++ ++struct nfs_open_context { ++ struct nfs_lock_context lock_context; ++ fl_owner_t flock_owner; ++ struct dentry *dentry; ++ struct rpc_cred *cred; ++ struct nfs4_state *state; ++ fmode_t mode; ++ long unsigned int flags; ++ int error; ++ struct list_head list; ++ struct nfs4_threshold *mdsthreshold; ++}; ++ ++struct nlm_host; ++ ++struct nfs_iostats; ++ ++struct nfs_auth_info { ++ unsigned int flavor_len; ++ rpc_authflavor_t flavors[12]; ++}; ++ ++struct nfs_fscache_key; ++ ++struct fscache_cookie; ++ ++struct pnfs_layoutdriver_type; ++ ++struct nfs_client; ++ ++struct nfs_server { ++ struct nfs_client *nfs_client; ++ struct list_head client_link; ++ struct list_head master_link; ++ struct rpc_clnt *client; ++ struct rpc_clnt *client_acl; ++ struct nlm_host *nlm_host; ++ struct nfs_iostats *io_stats; ++ atomic_long_t writeback; ++ int flags; ++ unsigned int caps; ++ unsigned int rsize; ++ unsigned int rpages; ++ unsigned int wsize; ++ unsigned int wpages; ++ unsigned int wtmult; ++ unsigned int dtsize; ++ short unsigned int port; ++ unsigned int bsize; ++ unsigned int acregmin; ++ unsigned int acregmax; ++ unsigned int acdirmin; ++ unsigned int acdirmax; ++ unsigned int namelen; ++ unsigned int options; ++ unsigned int clone_blksize; ++ struct nfs_fsid fsid; ++ __u64 maxfilesize; ++ struct timespec time_delta; ++ long unsigned int mount_time; ++ struct super_block *super; ++ dev_t s_dev; ++ struct nfs_auth_info auth_info; ++ struct nfs_fscache_key *fscache_key; ++ struct fscache_cookie *fscache; ++ u32 pnfs_blksize; ++ u32 attr_bitmask[3]; ++ u32 attr_bitmask_nl[3]; ++ u32 exclcreat_bitmask[3]; ++ u32 cache_consistency_bitmask[3]; ++ u32 acl_bitmask; ++ u32 fh_expire_type; ++ struct pnfs_layoutdriver_type *pnfs_curr_ld; ++ struct rpc_wait_queue roc_rpcwaitq; ++ void *pnfs_ld_data; ++ struct rb_root state_owners; ++ struct ida openowner_id; ++ struct ida lockowner_id; ++ struct list_head state_owners_lru; ++ struct list_head layouts; ++ struct list_head delegations; ++ struct list_head ss_copies; ++ long unsigned int mig_gen; ++ long unsigned int mig_status; ++ void (*destroy)(struct nfs_server *); ++ atomic_t active; ++ int: 32; ++ struct __kernel_sockaddr_storage mountd_address; ++ size_t mountd_addrlen; ++ u32 mountd_version; ++ short unsigned int mountd_port; ++ short unsigned int mountd_protocol; ++ struct rpc_wait_queue uoc_rpcwaitq; ++}; ++ ++struct nfs_subversion; ++ ++struct idmap; ++ ++struct nfs4_minor_version_ops; ++ ++struct nfs4_slot_table; ++ ++struct nfs4_session; ++ ++struct nfs_rpc_ops; ++ ++struct nfs41_server_owner; ++ ++struct nfs41_server_scope; ++ ++struct nfs41_impl_id; ++ ++struct nfs_client { ++ refcount_t cl_count; ++ atomic_t cl_mds_count; ++ int cl_cons_state; ++ long unsigned int cl_res_state; ++ long unsigned int cl_flags; ++ struct __kernel_sockaddr_storage cl_addr; ++ size_t cl_addrlen; ++ char *cl_hostname; ++ char *cl_acceptor; ++ struct list_head cl_share_link; ++ struct list_head cl_superblocks; ++ struct rpc_clnt *cl_rpcclient; ++ const struct nfs_rpc_ops *rpc_ops; ++ int cl_proto; ++ struct nfs_subversion *cl_nfs_mod; ++ u32 cl_minorversion; ++ struct rpc_cred *cl_machine_cred; ++ struct list_head cl_ds_clients; ++ u64 cl_clientid; ++ nfs4_verifier cl_confirm; ++ long unsigned int cl_state; ++ spinlock_t cl_lock; ++ long unsigned int cl_lease_time; ++ long unsigned int cl_last_renewal; ++ struct delayed_work cl_renewd; ++ struct rpc_wait_queue cl_rpcwaitq; ++ struct idmap *cl_idmap; ++ const char *cl_owner_id; ++ u32 cl_cb_ident; ++ const struct nfs4_minor_version_ops *cl_mvops; ++ long unsigned int cl_mig_gen; ++ struct nfs4_slot_table *cl_slot_tbl; ++ u32 cl_seqid; ++ u32 cl_exchange_flags; ++ struct nfs4_session *cl_session; ++ bool cl_preserve_clid; ++ struct nfs41_server_owner *cl_serverowner; ++ struct nfs41_server_scope *cl_serverscope; ++ struct nfs41_impl_id *cl_implid; ++ long unsigned int cl_sp4_flags; ++ wait_queue_head_t cl_lock_waitq; ++ char cl_ipaddr[48]; ++ struct fscache_cookie *fscache; ++ struct net *cl_net; ++ struct list_head pending_cb_stateids; ++}; ++ ++struct nfs_write_verifier { ++ char data[8]; ++}; ++ ++struct nfs_writeverf { ++ struct nfs_write_verifier verifier; ++ enum nfs3_stable_how committed; ++}; ++ ++struct nfs_pgio_args { ++ struct nfs4_sequence_args seq_args; ++ struct nfs_fh *fh; ++ struct nfs_open_context *context; ++ struct nfs_lock_context *lock_context; ++ nfs4_stateid stateid; ++ __u64 offset; ++ __u32 count; ++ unsigned int pgbase; ++ struct page **pages; ++ const u32 *bitmask; ++ enum nfs3_stable_how stable; ++}; ++ ++struct nfs_pgio_res { ++ struct nfs4_sequence_res seq_res; ++ struct nfs_fattr *fattr; ++ __u32 count; ++ __u32 op_status; ++ int eof; ++ struct nfs_writeverf *verf; ++ const struct nfs_server *server; ++}; ++ ++struct nfs_commitargs { ++ struct nfs4_sequence_args seq_args; ++ struct nfs_fh *fh; ++ __u64 offset; ++ __u32 count; ++ const u32 *bitmask; ++}; ++ ++struct nfs_commitres { ++ struct nfs4_sequence_res seq_res; ++ __u32 op_status; ++ struct nfs_fattr *fattr; ++ struct nfs_writeverf *verf; ++ const struct nfs_server *server; ++}; ++ ++struct nfs_removeargs { ++ struct nfs4_sequence_args seq_args; ++ const struct nfs_fh *fh; ++ struct qstr name; ++}; ++ ++struct nfs_removeres { ++ struct nfs4_sequence_res seq_res; ++ struct nfs_server *server; ++ struct nfs_fattr *dir_attr; ++ struct nfs4_change_info cinfo; ++}; ++ ++struct nfs_renameargs { ++ struct nfs4_sequence_args seq_args; ++ const struct nfs_fh *old_dir; ++ const struct nfs_fh *new_dir; ++ const struct qstr *old_name; ++ const struct qstr *new_name; ++}; ++ ++struct nfs_renameres { ++ struct nfs4_sequence_res seq_res; ++ struct nfs_server *server; ++ struct nfs4_change_info old_cinfo; ++ struct nfs_fattr *old_fattr; ++ struct nfs4_change_info new_cinfo; ++ struct nfs_fattr *new_fattr; ++}; ++ ++struct nfs_entry { ++ __u64 ino; ++ __u64 cookie; ++ __u64 prev_cookie; ++ const char *name; ++ unsigned int len; ++ int eof; ++ struct nfs_fh *fh; ++ struct nfs_fattr *fattr; ++ struct nfs4_label *label; ++ unsigned char d_type; ++ struct nfs_server *server; ++}; ++ ++struct nfstime4 { ++ u64 seconds; ++ u32 nseconds; ++}; ++ ++struct pnfs_layout_segment; ++ ++struct pnfs_commit_bucket { ++ struct list_head written; ++ struct list_head committing; ++ struct pnfs_layout_segment *wlseg; ++ struct pnfs_layout_segment *clseg; ++ struct nfs_writeverf direct_verf; ++}; ++ ++struct pnfs_ds_commit_info { ++ int nwritten; ++ int ncommitting; ++ int nbuckets; ++ struct pnfs_commit_bucket *buckets; ++}; ++ ++struct nfs41_server_owner { ++ uint64_t minor_id; ++ uint32_t major_id_sz; ++ char major_id[1024]; ++}; ++ ++struct nfs41_server_scope { ++ uint32_t server_scope_sz; ++ char server_scope[1024]; ++}; ++ ++struct nfs41_impl_id { ++ char domain[1025]; ++ char name[1025]; ++ struct nfstime4 date; ++}; ++ ++struct nfs_page_array { ++ struct page **pagevec; ++ unsigned int npages; ++ struct page *page_array[8]; ++}; ++ ++struct nfs_page; ++ ++struct nfs_rw_ops; ++ ++struct nfs_io_completion; ++ ++struct nfs_direct_req; ++ ++struct nfs_pgio_completion_ops; ++ ++struct nfs_pgio_header { ++ struct inode *inode; ++ struct rpc_cred *cred; ++ struct list_head pages; ++ struct nfs_page *req; ++ struct nfs_writeverf verf; ++ fmode_t rw_mode; ++ struct pnfs_layout_segment *lseg; ++ loff_t io_start; ++ const struct rpc_call_ops *mds_ops; ++ void (*release)(struct nfs_pgio_header *); ++ const struct nfs_pgio_completion_ops *completion_ops; ++ const struct nfs_rw_ops *rw_ops; ++ struct nfs_io_completion *io_completion; ++ struct nfs_direct_req *dreq; ++ spinlock_t lock; ++ int pnfs_error; ++ int error; ++ long unsigned int good_bytes; ++ long unsigned int flags; ++ struct rpc_task task; ++ struct nfs_fattr fattr; ++ struct nfs_pgio_args args; ++ struct nfs_pgio_res res; ++ long unsigned int timestamp; ++ int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *); ++ __u64 mds_offset; ++ struct nfs_page_array page_array; ++ struct nfs_client *ds_clp; ++ int ds_commit_idx; ++ int pgio_mirror_idx; ++}; ++ ++struct nfs_pgio_completion_ops { ++ void (*error_cleanup)(struct list_head *, int); ++ void (*init_hdr)(struct nfs_pgio_header *); ++ void (*completion)(struct nfs_pgio_header *); ++ void (*reschedule_io)(struct nfs_pgio_header *); ++}; ++ ++struct nfs_mds_commit_info { ++ atomic_t rpcs_out; ++ atomic_long_t ncommit; ++ struct list_head list; ++}; ++ ++struct nfs_commit_data; ++ ++struct nfs_commit_info; ++ ++struct nfs_commit_completion_ops { ++ void (*completion)(struct nfs_commit_data *); ++ void (*resched_write)(struct nfs_commit_info *, struct nfs_page *); ++}; ++ ++struct nfs_commit_data { ++ struct rpc_task task; ++ struct inode *inode; ++ struct rpc_cred *cred; ++ struct nfs_fattr fattr; ++ struct nfs_writeverf verf; ++ struct list_head pages; ++ struct list_head list; ++ struct nfs_direct_req *dreq; ++ struct nfs_commitargs args; ++ struct nfs_commitres res; ++ struct nfs_open_context *context; ++ struct pnfs_layout_segment *lseg; ++ struct nfs_client *ds_clp; ++ int ds_commit_index; ++ loff_t lwb; ++ const struct rpc_call_ops *mds_ops; ++ const struct nfs_commit_completion_ops *completion_ops; ++ int (*commit_done_cb)(struct rpc_task *, struct nfs_commit_data *); ++ long unsigned int flags; ++}; ++ ++struct nfs_commit_info { ++ struct inode *inode; ++ struct nfs_mds_commit_info *mds; ++ struct pnfs_ds_commit_info *ds; ++ struct nfs_direct_req *dreq; ++ const struct nfs_commit_completion_ops *completion_ops; ++}; ++ ++struct nfs_unlinkdata { ++ struct nfs_removeargs args; ++ struct nfs_removeres res; ++ struct dentry *dentry; ++ wait_queue_head_t wq; ++ struct rpc_cred *cred; ++ struct nfs_fattr dir_attr; ++ long int timeout; ++}; ++ ++struct nfs_renamedata { ++ struct nfs_renameargs args; ++ struct nfs_renameres res; ++ struct rpc_cred *cred; ++ struct inode *old_dir; ++ struct dentry *old_dentry; ++ struct nfs_fattr old_fattr; ++ struct inode *new_dir; ++ struct dentry *new_dentry; ++ struct nfs_fattr new_fattr; ++ void (*complete)(struct rpc_task *, struct nfs_renamedata *); ++ long int timeout; ++ bool cancelled; ++}; ++ ++struct nlmclnt_operations; ++ ++struct nfs_mount_info; ++ ++struct nfs_client_initdata; ++ ++struct nfs_access_entry; ++ ++struct nfs_rpc_ops { ++ u32 version; ++ const struct dentry_operations *dentry_ops; ++ const struct inode_operations *dir_inode_ops; ++ const struct inode_operations *file_inode_ops; ++ const struct file_operations *file_ops; ++ const struct nlmclnt_operations *nlmclnt_ops; ++ int (*getroot)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); ++ struct vfsmount * (*submount)(struct nfs_server *, struct dentry *, struct nfs_fh *, struct nfs_fattr *); ++ struct dentry * (*try_mount)(int, const char *, struct nfs_mount_info *, struct nfs_subversion *); ++ int (*getattr)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *, struct inode *); ++ int (*setattr)(struct dentry *, struct nfs_fattr *, struct iattr *); ++ int (*lookup)(struct inode *, const struct qstr *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); ++ int (*lookupp)(struct inode *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); ++ int (*access)(struct inode *, struct nfs_access_entry *); ++ int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); ++ int (*create)(struct inode *, struct dentry *, struct iattr *, int); ++ int (*remove)(struct inode *, struct dentry *); ++ void (*unlink_setup)(struct rpc_message *, struct dentry *, struct inode *); ++ void (*unlink_rpc_prepare)(struct rpc_task *, struct nfs_unlinkdata *); ++ int (*unlink_done)(struct rpc_task *, struct inode *); ++ void (*rename_setup)(struct rpc_message *, struct dentry *, struct dentry *); ++ void (*rename_rpc_prepare)(struct rpc_task *, struct nfs_renamedata *); ++ int (*rename_done)(struct rpc_task *, struct inode *, struct inode *); ++ int (*link)(struct inode *, struct inode *, const struct qstr *); ++ int (*symlink)(struct inode *, struct dentry *, struct page *, unsigned int, struct iattr *); ++ int (*mkdir)(struct inode *, struct dentry *, struct iattr *); ++ int (*rmdir)(struct inode *, const struct qstr *); ++ int (*readdir)(struct dentry *, struct rpc_cred *, u64, struct page **, unsigned int, bool); ++ int (*mknod)(struct inode *, struct dentry *, struct iattr *, dev_t); ++ int (*statfs)(struct nfs_server *, struct nfs_fh *, struct nfs_fsstat *); ++ int (*fsinfo)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); ++ int (*pathconf)(struct nfs_server *, struct nfs_fh *, struct nfs_pathconf *); ++ int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); ++ int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool); ++ int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_header *); ++ void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *); ++ int (*read_done)(struct rpc_task *, struct nfs_pgio_header *); ++ void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, struct rpc_clnt **); ++ int (*write_done)(struct rpc_task *, struct nfs_pgio_header *); ++ void (*commit_setup)(struct nfs_commit_data *, struct rpc_message *, struct rpc_clnt **); ++ void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); ++ int (*commit_done)(struct rpc_task *, struct nfs_commit_data *); ++ int (*lock)(struct file *, int, struct file_lock *); ++ int (*lock_check_bounds)(const struct file_lock *); ++ void (*clear_acl_cache)(struct inode *); ++ void (*close_context)(struct nfs_open_context *, int); ++ struct inode * (*open_context)(struct inode *, struct nfs_open_context *, int, struct iattr *, int *); ++ int (*have_delegation)(struct inode *, fmode_t); ++ struct nfs_client * (*alloc_client)(const struct nfs_client_initdata *); ++ struct nfs_client * (*init_client)(struct nfs_client *, const struct nfs_client_initdata *); ++ void (*free_client)(struct nfs_client *); ++ struct nfs_server * (*create_server)(struct nfs_mount_info *, struct nfs_subversion *); ++ struct nfs_server * (*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); ++}; ++ ++struct nfs_access_entry { ++ struct rb_node rb_node; ++ struct list_head lru; ++ struct rpc_cred *cred; ++ __u32 mask; ++ struct callback_head callback_head; ++}; ++ ++enum blkg_rwstat_type { ++ BLKG_RWSTAT_READ = 0, ++ BLKG_RWSTAT_WRITE = 1, ++ BLKG_RWSTAT_SYNC = 2, ++ BLKG_RWSTAT_ASYNC = 3, ++ BLKG_RWSTAT_DISCARD = 4, ++ BLKG_RWSTAT_NR = 5, ++ BLKG_RWSTAT_TOTAL = 5, ++}; ++ ++struct blkcg_policy_data; ++ ++struct blkcg { ++ struct cgroup_subsys_state css; ++ spinlock_t lock; ++ struct radix_tree_root blkg_tree; ++ struct blkcg_gq *blkg_hint; ++ struct hlist_head blkg_list; ++ struct blkcg_policy_data *cpd[5]; ++ struct list_head all_blkcgs_node; ++ struct list_head cgwb_list; ++ refcount_t cgwb_refcnt; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct blkcg_policy_data { ++ struct blkcg *blkcg; ++ int plid; ++}; ++ ++struct blkg_policy_data { ++ struct blkcg_gq *blkg; ++ int plid; ++}; ++ ++enum perf_sw_ids { ++ PERF_COUNT_SW_CPU_CLOCK = 0, ++ PERF_COUNT_SW_TASK_CLOCK = 1, ++ PERF_COUNT_SW_PAGE_FAULTS = 2, ++ PERF_COUNT_SW_CONTEXT_SWITCHES = 3, ++ PERF_COUNT_SW_CPU_MIGRATIONS = 4, ++ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, ++ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, ++ PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, ++ PERF_COUNT_SW_EMULATION_FAULTS = 8, ++ PERF_COUNT_SW_DUMMY = 9, ++ PERF_COUNT_SW_BPF_OUTPUT = 10, ++ PERF_COUNT_SW_MAX = 11, ++}; ++ ++enum perf_branch_sample_type_shift { ++ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, ++ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, ++ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, ++ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, ++ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, ++ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, ++ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, ++ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, ++ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, ++ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, ++ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, ++ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, ++ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, ++ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, ++ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, ++ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, ++ PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, ++ PERF_SAMPLE_BRANCH_MAX_SHIFT = 17, ++}; ++ ++enum { ++ TSK_TRACE_FL_TRACE_BIT = 0, ++ TSK_TRACE_FL_GRAPH_BIT = 1, ++}; ++ ++enum { ++ TRACE_EVENT_FL_FILTERED_BIT = 0, ++ TRACE_EVENT_FL_CAP_ANY_BIT = 1, ++ TRACE_EVENT_FL_NO_SET_FILTER_BIT = 2, ++ TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 3, ++ TRACE_EVENT_FL_TRACEPOINT_BIT = 4, ++ TRACE_EVENT_FL_KPROBE_BIT = 5, ++ TRACE_EVENT_FL_UPROBE_BIT = 6, ++}; ++ ++enum { ++ EVENT_FILE_FL_ENABLED_BIT = 0, ++ EVENT_FILE_FL_RECORDED_CMD_BIT = 1, ++ EVENT_FILE_FL_RECORDED_TGID_BIT = 2, ++ EVENT_FILE_FL_FILTERED_BIT = 3, ++ EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, ++ EVENT_FILE_FL_SOFT_MODE_BIT = 5, ++ EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, ++ EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, ++ EVENT_FILE_FL_TRIGGER_COND_BIT = 8, ++ EVENT_FILE_FL_PID_FILTER_BIT = 9, ++ EVENT_FILE_FL_WAS_ENABLED_BIT = 10, ++}; ++ ++struct uuidcmp { ++ const char *uuid; ++ int len; ++}; ++ ++struct subprocess_info { ++ struct work_struct work; ++ struct completion *complete; ++ const char *path; ++ char **argv; ++ char **envp; ++ struct file *file; ++ int wait; ++ int retval; ++ pid_t pid; ++ int (*init)(struct subprocess_info *, struct cred *); ++ void (*cleanup)(struct subprocess_info *); ++ void *data; ++}; ++ ++enum { ++ TASKSTATS_CMD_UNSPEC = 0, ++ TASKSTATS_CMD_GET = 1, ++ TASKSTATS_CMD_NEW = 2, ++ __TASKSTATS_CMD_MAX = 3, ++}; ++ ++enum ucount_type { ++ UCOUNT_USER_NAMESPACES = 0, ++ UCOUNT_PID_NAMESPACES = 1, ++ UCOUNT_UTS_NAMESPACES = 2, ++ UCOUNT_IPC_NAMESPACES = 3, ++ UCOUNT_NET_NAMESPACES = 4, ++ UCOUNT_MNT_NAMESPACES = 5, ++ UCOUNT_CGROUP_NAMESPACES = 6, ++ UCOUNT_INOTIFY_INSTANCES = 7, ++ UCOUNT_INOTIFY_WATCHES = 8, ++ UCOUNT_COUNTS = 9, ++}; ++ ++enum { ++ HI_SOFTIRQ = 0, ++ TIMER_SOFTIRQ = 1, ++ NET_TX_SOFTIRQ = 2, ++ NET_RX_SOFTIRQ = 3, ++ BLOCK_SOFTIRQ = 4, ++ IRQ_POLL_SOFTIRQ = 5, ++ TASKLET_SOFTIRQ = 6, ++ SCHED_SOFTIRQ = 7, ++ HRTIMER_SOFTIRQ = 8, ++ RCU_SOFTIRQ = 9, ++ NR_SOFTIRQS = 10, ++}; ++ ++enum cpu_usage_stat { ++ CPUTIME_USER = 0, ++ CPUTIME_NICE = 1, ++ CPUTIME_SYSTEM = 2, ++ CPUTIME_SOFTIRQ = 3, ++ CPUTIME_IRQ = 4, ++ CPUTIME_IDLE = 5, ++ CPUTIME_IOWAIT = 6, ++ CPUTIME_STEAL = 7, ++ CPUTIME_GUEST = 8, ++ CPUTIME_GUEST_NICE = 9, ++ CPUTIME_SOFTIRQ_IDLE = 10, ++ CPUTIME_IRQ_IDLE = 11, ++ NR_STATS = 12, ++}; ++ ++enum cgroup_subsys_id { ++ cpuset_cgrp_id = 0, ++ cpu_cgrp_id = 1, ++ cpuacct_cgrp_id = 2, ++ io_cgrp_id = 3, ++ memory_cgrp_id = 4, ++ devices_cgrp_id = 5, ++ freezer_cgrp_id = 6, ++ net_cls_cgrp_id = 7, ++ perf_event_cgrp_id = 8, ++ net_prio_cgrp_id = 9, ++ hugetlb_cgrp_id = 10, ++ pids_cgrp_id = 11, ++ rdma_cgrp_id = 12, ++ files_cgrp_id = 13, ++ CGROUP_SUBSYS_COUNT = 14, ++}; ++ ++struct mdu_array_info_s { ++ int major_version; ++ int minor_version; ++ int patch_version; ++ unsigned int ctime; ++ int level; ++ int size; ++ int nr_disks; ++ int raid_disks; ++ int md_minor; ++ int not_persistent; ++ unsigned int utime; ++ int state; ++ int active_disks; ++ int working_disks; ++ int failed_disks; ++ int spare_disks; ++ int layout; ++ int chunk_size; ++}; ++ ++typedef struct mdu_array_info_s mdu_array_info_t; ++ ++struct mdu_disk_info_s { ++ int number; ++ int major; ++ int minor; ++ int raid_disk; ++ int state; ++}; ++ ++typedef struct mdu_disk_info_s mdu_disk_info_t; ++ ++struct hash { ++ int ino; ++ int minor; ++ int major; ++ umode_t mode; ++ struct hash *next; ++ char name[4098]; ++}; ++ ++struct dir_entry { ++ struct list_head list; ++ char *name; ++ time64_t mtime; ++}; ++ ++enum state { ++ Start = 0, ++ Collect = 1, ++ GotHeader = 2, ++ SkipIt = 3, ++ GotName = 4, ++ CopyFile = 5, ++ GotSymlink = 6, ++ Reset = 7, ++}; ++ ++typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); ++ ++typedef u32 note_buf_t[106]; ++ ++typedef long unsigned int kimage_entry_t; ++ ++struct kexec_segment { ++ union { ++ void *buf; ++ void *kbuf; ++ }; ++ size_t bufsz; ++ long unsigned int mem; ++ size_t memsz; ++}; ++ ++struct kimage { ++ kimage_entry_t head; ++ kimage_entry_t *entry; ++ kimage_entry_t *last_entry; ++ long unsigned int start; ++ struct page *control_code_page; ++ struct page *swap_page; ++ void *vmcoreinfo_data_copy; ++ long unsigned int nr_segments; ++ struct kexec_segment segment[16]; ++ struct list_head control_pages; ++ struct list_head dest_pages; ++ struct list_head unusable_pages; ++ long unsigned int control_page; ++ unsigned int type: 1; ++ unsigned int preserve_context: 1; ++ unsigned int file_mode: 1; ++}; ++ ++enum flow_dissector_key_id { ++ FLOW_DISSECTOR_KEY_CONTROL = 0, ++ FLOW_DISSECTOR_KEY_BASIC = 1, ++ FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, ++ FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, ++ FLOW_DISSECTOR_KEY_PORTS = 4, ++ FLOW_DISSECTOR_KEY_ICMP = 5, ++ FLOW_DISSECTOR_KEY_ETH_ADDRS = 6, ++ FLOW_DISSECTOR_KEY_TIPC = 7, ++ FLOW_DISSECTOR_KEY_ARP = 8, ++ FLOW_DISSECTOR_KEY_VLAN = 9, ++ FLOW_DISSECTOR_KEY_FLOW_LABEL = 10, ++ FLOW_DISSECTOR_KEY_GRE_KEYID = 11, ++ FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 12, ++ FLOW_DISSECTOR_KEY_ENC_KEYID = 13, ++ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 14, ++ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 15, ++ FLOW_DISSECTOR_KEY_ENC_CONTROL = 16, ++ FLOW_DISSECTOR_KEY_ENC_PORTS = 17, ++ FLOW_DISSECTOR_KEY_MPLS = 18, ++ FLOW_DISSECTOR_KEY_TCP = 19, ++ FLOW_DISSECTOR_KEY_IP = 20, ++ FLOW_DISSECTOR_KEY_CVLAN = 21, ++ FLOW_DISSECTOR_KEY_ENC_IP = 22, ++ FLOW_DISSECTOR_KEY_ENC_OPTS = 23, ++ FLOW_DISSECTOR_KEY_MAX = 24, ++}; ++ ++enum { ++ ICMP_MIB_NUM = 0, ++ ICMP_MIB_INMSGS = 1, ++ ICMP_MIB_INERRORS = 2, ++ ICMP_MIB_INDESTUNREACHS = 3, ++ ICMP_MIB_INTIMEEXCDS = 4, ++ ICMP_MIB_INPARMPROBS = 5, ++ ICMP_MIB_INSRCQUENCHS = 6, ++ ICMP_MIB_INREDIRECTS = 7, ++ ICMP_MIB_INECHOS = 8, ++ ICMP_MIB_INECHOREPS = 9, ++ ICMP_MIB_INTIMESTAMPS = 10, ++ ICMP_MIB_INTIMESTAMPREPS = 11, ++ ICMP_MIB_INADDRMASKS = 12, ++ ICMP_MIB_INADDRMASKREPS = 13, ++ ICMP_MIB_OUTMSGS = 14, ++ ICMP_MIB_OUTERRORS = 15, ++ ICMP_MIB_OUTDESTUNREACHS = 16, ++ ICMP_MIB_OUTTIMEEXCDS = 17, ++ ICMP_MIB_OUTPARMPROBS = 18, ++ ICMP_MIB_OUTSRCQUENCHS = 19, ++ ICMP_MIB_OUTREDIRECTS = 20, ++ ICMP_MIB_OUTECHOS = 21, ++ ICMP_MIB_OUTECHOREPS = 22, ++ ICMP_MIB_OUTTIMESTAMPS = 23, ++ ICMP_MIB_OUTTIMESTAMPREPS = 24, ++ ICMP_MIB_OUTADDRMASKS = 25, ++ ICMP_MIB_OUTADDRMASKREPS = 26, ++ ICMP_MIB_CSUMERRORS = 27, ++ __ICMP_MIB_MAX = 28, ++}; ++ ++enum { ++ ICMP6_MIB_NUM = 0, ++ ICMP6_MIB_INMSGS = 1, ++ ICMP6_MIB_INERRORS = 2, ++ ICMP6_MIB_OUTMSGS = 3, ++ ICMP6_MIB_OUTERRORS = 4, ++ ICMP6_MIB_CSUMERRORS = 5, ++ __ICMP6_MIB_MAX = 6, ++}; ++ ++enum { ++ TCP_MIB_NUM = 0, ++ TCP_MIB_RTOALGORITHM = 1, ++ TCP_MIB_RTOMIN = 2, ++ TCP_MIB_RTOMAX = 3, ++ TCP_MIB_MAXCONN = 4, ++ TCP_MIB_ACTIVEOPENS = 5, ++ TCP_MIB_PASSIVEOPENS = 6, ++ TCP_MIB_ATTEMPTFAILS = 7, ++ TCP_MIB_ESTABRESETS = 8, ++ TCP_MIB_CURRESTAB = 9, ++ TCP_MIB_INSEGS = 10, ++ TCP_MIB_OUTSEGS = 11, ++ TCP_MIB_RETRANSSEGS = 12, ++ TCP_MIB_INERRS = 13, ++ TCP_MIB_OUTRSTS = 14, ++ TCP_MIB_CSUMERRORS = 15, ++ __TCP_MIB_MAX = 16, ++}; ++ ++enum { ++ UDP_MIB_NUM = 0, ++ UDP_MIB_INDATAGRAMS = 1, ++ UDP_MIB_NOPORTS = 2, ++ UDP_MIB_INERRORS = 3, ++ UDP_MIB_OUTDATAGRAMS = 4, ++ UDP_MIB_RCVBUFERRORS = 5, ++ UDP_MIB_SNDBUFERRORS = 6, ++ UDP_MIB_CSUMERRORS = 7, ++ UDP_MIB_IGNOREDMULTI = 8, ++ __UDP_MIB_MAX = 9, ++}; ++ ++enum { ++ LINUX_MIB_NUM = 0, ++ LINUX_MIB_SYNCOOKIESSENT = 1, ++ LINUX_MIB_SYNCOOKIESRECV = 2, ++ LINUX_MIB_SYNCOOKIESFAILED = 3, ++ LINUX_MIB_EMBRYONICRSTS = 4, ++ LINUX_MIB_PRUNECALLED = 5, ++ LINUX_MIB_RCVPRUNED = 6, ++ LINUX_MIB_OFOPRUNED = 7, ++ LINUX_MIB_OUTOFWINDOWICMPS = 8, ++ LINUX_MIB_LOCKDROPPEDICMPS = 9, ++ LINUX_MIB_ARPFILTER = 10, ++ LINUX_MIB_TIMEWAITED = 11, ++ LINUX_MIB_TIMEWAITRECYCLED = 12, ++ LINUX_MIB_TIMEWAITKILLED = 13, ++ LINUX_MIB_PAWSACTIVEREJECTED = 14, ++ LINUX_MIB_PAWSESTABREJECTED = 15, ++ LINUX_MIB_DELAYEDACKS = 16, ++ LINUX_MIB_DELAYEDACKLOCKED = 17, ++ LINUX_MIB_DELAYEDACKLOST = 18, ++ LINUX_MIB_LISTENOVERFLOWS = 19, ++ LINUX_MIB_LISTENDROPS = 20, ++ LINUX_MIB_TCPHPHITS = 21, ++ LINUX_MIB_TCPPUREACKS = 22, ++ LINUX_MIB_TCPHPACKS = 23, ++ LINUX_MIB_TCPRENORECOVERY = 24, ++ LINUX_MIB_TCPSACKRECOVERY = 25, ++ LINUX_MIB_TCPSACKRENEGING = 26, ++ LINUX_MIB_TCPSACKREORDER = 27, ++ LINUX_MIB_TCPRENOREORDER = 28, ++ LINUX_MIB_TCPTSREORDER = 29, ++ LINUX_MIB_TCPFULLUNDO = 30, ++ LINUX_MIB_TCPPARTIALUNDO = 31, ++ LINUX_MIB_TCPDSACKUNDO = 32, ++ LINUX_MIB_TCPLOSSUNDO = 33, ++ LINUX_MIB_TCPLOSTRETRANSMIT = 34, ++ LINUX_MIB_TCPRENOFAILURES = 35, ++ LINUX_MIB_TCPSACKFAILURES = 36, ++ LINUX_MIB_TCPLOSSFAILURES = 37, ++ LINUX_MIB_TCPFASTRETRANS = 38, ++ LINUX_MIB_TCPSLOWSTARTRETRANS = 39, ++ LINUX_MIB_TCPTIMEOUTS = 40, ++ LINUX_MIB_TCPLOSSPROBES = 41, ++ LINUX_MIB_TCPLOSSPROBERECOVERY = 42, ++ LINUX_MIB_TCPRENORECOVERYFAIL = 43, ++ LINUX_MIB_TCPSACKRECOVERYFAIL = 44, ++ LINUX_MIB_TCPRCVCOLLAPSED = 45, ++ LINUX_MIB_TCPDSACKOLDSENT = 46, ++ LINUX_MIB_TCPDSACKOFOSENT = 47, ++ LINUX_MIB_TCPDSACKRECV = 48, ++ LINUX_MIB_TCPDSACKOFORECV = 49, ++ LINUX_MIB_TCPABORTONDATA = 50, ++ LINUX_MIB_TCPABORTONCLOSE = 51, ++ LINUX_MIB_TCPABORTONMEMORY = 52, ++ LINUX_MIB_TCPABORTONTIMEOUT = 53, ++ LINUX_MIB_TCPABORTONLINGER = 54, ++ LINUX_MIB_TCPABORTFAILED = 55, ++ LINUX_MIB_TCPMEMORYPRESSURES = 56, ++ LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, ++ LINUX_MIB_TCPSACKDISCARD = 58, ++ LINUX_MIB_TCPDSACKIGNOREDOLD = 59, ++ LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, ++ LINUX_MIB_TCPSPURIOUSRTOS = 61, ++ LINUX_MIB_TCPMD5NOTFOUND = 62, ++ LINUX_MIB_TCPMD5UNEXPECTED = 63, ++ LINUX_MIB_TCPMD5FAILURE = 64, ++ LINUX_MIB_SACKSHIFTED = 65, ++ LINUX_MIB_SACKMERGED = 66, ++ LINUX_MIB_SACKSHIFTFALLBACK = 67, ++ LINUX_MIB_TCPBACKLOGDROP = 68, ++ LINUX_MIB_PFMEMALLOCDROP = 69, ++ LINUX_MIB_TCPMINTTLDROP = 70, ++ LINUX_MIB_TCPDEFERACCEPTDROP = 71, ++ LINUX_MIB_IPRPFILTER = 72, ++ LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, ++ LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, ++ LINUX_MIB_TCPREQQFULLDROP = 75, ++ LINUX_MIB_TCPRETRANSFAIL = 76, ++ LINUX_MIB_TCPRCVCOALESCE = 77, ++ LINUX_MIB_TCPBACKLOGCOALESCE = 78, ++ LINUX_MIB_TCPOFOQUEUE = 79, ++ LINUX_MIB_TCPOFODROP = 80, ++ LINUX_MIB_TCPOFOMERGE = 81, ++ LINUX_MIB_TCPCHALLENGEACK = 82, ++ LINUX_MIB_TCPSYNCHALLENGE = 83, ++ LINUX_MIB_TCPFASTOPENACTIVE = 84, ++ LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, ++ LINUX_MIB_TCPFASTOPENPASSIVE = 86, ++ LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, ++ LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, ++ LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, ++ LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, ++ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, ++ LINUX_MIB_BUSYPOLLRXPACKETS = 92, ++ LINUX_MIB_TCPAUTOCORKING = 93, ++ LINUX_MIB_TCPFROMZEROWINDOWADV = 94, ++ LINUX_MIB_TCPTOZEROWINDOWADV = 95, ++ LINUX_MIB_TCPWANTZEROWINDOWADV = 96, ++ LINUX_MIB_TCPSYNRETRANS = 97, ++ LINUX_MIB_TCPORIGDATASENT = 98, ++ LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, ++ LINUX_MIB_TCPHYSTARTTRAINCWND = 100, ++ LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, ++ LINUX_MIB_TCPHYSTARTDELAYCWND = 102, ++ LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, ++ LINUX_MIB_TCPACKSKIPPEDPAWS = 104, ++ LINUX_MIB_TCPACKSKIPPEDSEQ = 105, ++ LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, ++ LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, ++ LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, ++ LINUX_MIB_TCPWINPROBE = 109, ++ LINUX_MIB_TCPKEEPALIVE = 110, ++ LINUX_MIB_TCPMTUPFAIL = 111, ++ LINUX_MIB_TCPMTUPSUCCESS = 112, ++ LINUX_MIB_TCPDELIVERED = 113, ++ LINUX_MIB_TCPDELIVEREDCE = 114, ++ LINUX_MIB_TCPACKCOMPRESSED = 115, ++ LINUX_MIB_TCPZEROWINDOWDROP = 116, ++ LINUX_MIB_TCPRCVQDROP = 117, ++ LINUX_MIB_TCPWQUEUETOOBIG = 118, ++ __LINUX_MIB_MAX = 119, ++}; ++ ++enum { ++ LINUX_MIB_XFRMNUM = 0, ++ LINUX_MIB_XFRMINERROR = 1, ++ LINUX_MIB_XFRMINBUFFERERROR = 2, ++ LINUX_MIB_XFRMINHDRERROR = 3, ++ LINUX_MIB_XFRMINNOSTATES = 4, ++ LINUX_MIB_XFRMINSTATEPROTOERROR = 5, ++ LINUX_MIB_XFRMINSTATEMODEERROR = 6, ++ LINUX_MIB_XFRMINSTATESEQERROR = 7, ++ LINUX_MIB_XFRMINSTATEEXPIRED = 8, ++ LINUX_MIB_XFRMINSTATEMISMATCH = 9, ++ LINUX_MIB_XFRMINSTATEINVALID = 10, ++ LINUX_MIB_XFRMINTMPLMISMATCH = 11, ++ LINUX_MIB_XFRMINNOPOLS = 12, ++ LINUX_MIB_XFRMINPOLBLOCK = 13, ++ LINUX_MIB_XFRMINPOLERROR = 14, ++ LINUX_MIB_XFRMOUTERROR = 15, ++ LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, ++ LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, ++ LINUX_MIB_XFRMOUTNOSTATES = 18, ++ LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, ++ LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, ++ LINUX_MIB_XFRMOUTSTATESEQERROR = 21, ++ LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, ++ LINUX_MIB_XFRMOUTPOLBLOCK = 23, ++ LINUX_MIB_XFRMOUTPOLDEAD = 24, ++ LINUX_MIB_XFRMOUTPOLERROR = 25, ++ LINUX_MIB_XFRMFWDHDRERROR = 26, ++ LINUX_MIB_XFRMOUTSTATEINVALID = 27, ++ LINUX_MIB_XFRMACQUIREERROR = 28, ++ __LINUX_MIB_XFRMMAX = 29, ++}; ++ ++enum tcp_conntrack { ++ TCP_CONNTRACK_NONE = 0, ++ TCP_CONNTRACK_SYN_SENT = 1, ++ TCP_CONNTRACK_SYN_RECV = 2, ++ TCP_CONNTRACK_ESTABLISHED = 3, ++ TCP_CONNTRACK_FIN_WAIT = 4, ++ TCP_CONNTRACK_CLOSE_WAIT = 5, ++ TCP_CONNTRACK_LAST_ACK = 6, ++ TCP_CONNTRACK_TIME_WAIT = 7, ++ TCP_CONNTRACK_CLOSE = 8, ++ TCP_CONNTRACK_LISTEN = 9, ++ TCP_CONNTRACK_MAX = 10, ++ TCP_CONNTRACK_IGNORE = 11, ++ TCP_CONNTRACK_RETRANS = 12, ++ TCP_CONNTRACK_UNACK = 13, ++ TCP_CONNTRACK_TIMEOUT_MAX = 14, ++}; ++ ++enum ct_dccp_states { ++ CT_DCCP_NONE = 0, ++ CT_DCCP_REQUEST = 1, ++ CT_DCCP_RESPOND = 2, ++ CT_DCCP_PARTOPEN = 3, ++ CT_DCCP_OPEN = 4, ++ CT_DCCP_CLOSEREQ = 5, ++ CT_DCCP_CLOSING = 6, ++ CT_DCCP_TIMEWAIT = 7, ++ CT_DCCP_IGNORE = 8, ++ CT_DCCP_INVALID = 9, ++ __CT_DCCP_MAX = 10, ++}; ++ ++enum udp_conntrack { ++ UDP_CT_UNREPLIED = 0, ++ UDP_CT_REPLIED = 1, ++ UDP_CT_MAX = 2, ++}; ++ ++enum { ++ XFRM_POLICY_IN = 0, ++ XFRM_POLICY_OUT = 1, ++ XFRM_POLICY_FWD = 2, ++ XFRM_POLICY_MASK = 3, ++ XFRM_POLICY_MAX = 3, ++}; ++ ++enum sched_tunable_scaling { ++ SCHED_TUNABLESCALING_NONE = 0, ++ SCHED_TUNABLESCALING_LOG = 1, ++ SCHED_TUNABLESCALING_LINEAR = 2, ++ SCHED_TUNABLESCALING_END = 3, ++}; ++ ++typedef long int (*syscall_fn_t)(const struct pt_regs *); ++ ++struct step_hook { ++ struct list_head node; ++ int (*fn)(struct pt_regs *, unsigned int); ++}; ++ ++struct break_hook { ++ struct list_head node; ++ u32 esr_val; ++ u32 esr_mask; ++ int (*fn)(struct pt_regs *, unsigned int); ++}; ++ ++enum dbg_active_el { ++ DBG_ACTIVE_EL0 = 0, ++ DBG_ACTIVE_EL1 = 1, ++}; ++ ++typedef u32 probe_opcode_t; ++ ++typedef void probes_handler_t(u32, long int, struct pt_regs *); ++ ++struct arch_probe_insn { ++ probe_opcode_t *insn; ++ pstate_check_t *pstate_cc; ++ probes_handler_t *handler; ++ long unsigned int restore; ++}; ++ ++typedef u32 kprobe_opcode_t; ++ ++struct arch_specific_insn { ++ struct arch_probe_insn api; ++}; ++ ++struct kprobe; ++ ++struct prev_kprobe { ++ struct kprobe *kp; ++ unsigned int status; ++}; ++ ++typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); ++ ++typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); ++ ++typedef int (*kprobe_fault_handler_t)(struct kprobe *, struct pt_regs *, int); ++ ++struct kprobe { ++ struct hlist_node hlist; ++ struct list_head list; ++ long unsigned int nmissed; ++ kprobe_opcode_t *addr; ++ const char *symbol_name; ++ unsigned int offset; ++ kprobe_pre_handler_t pre_handler; ++ kprobe_post_handler_t post_handler; ++ kprobe_fault_handler_t fault_handler; ++ kprobe_opcode_t opcode; ++ struct arch_specific_insn ainsn; ++ u32 flags; ++}; ++ ++struct kprobe_step_ctx { ++ long unsigned int ss_pending; ++ long unsigned int match_addr; ++}; ++ ++struct kprobe_ctlblk { ++ unsigned int kprobe_status; ++ long unsigned int saved_irqflag; ++ struct prev_kprobe prev_kprobe; ++ struct kprobe_step_ctx ss_ctx; ++}; ++ ++struct kretprobe_blackpoint { ++ const char *name; ++ void *addr; ++}; ++ ++struct kprobe_insn_cache { ++ struct mutex mutex; ++ void * (*alloc)(); ++ void (*free)(void *); ++ struct list_head pages; ++ size_t insn_size; ++ int nr_garbage; ++}; ++ ++enum irqchip_irq_state { ++ IRQCHIP_STATE_PENDING = 0, ++ IRQCHIP_STATE_ACTIVE = 1, ++ IRQCHIP_STATE_MASKED = 2, ++ IRQCHIP_STATE_LINE_LEVEL = 3, ++}; ++ ++struct irq_common_data; ++ ++struct irq_chip; ++ ++struct irq_data { ++ u32 mask; ++ unsigned int irq; ++ long unsigned int hwirq; ++ struct irq_common_data *common; ++ struct irq_chip *chip; ++ struct irq_domain *domain; ++ struct irq_data *parent_data; ++ void *chip_data; ++}; ++ ++struct msi_desc; ++ ++struct irq_common_data { ++ unsigned int state_use_accessors; ++ unsigned int node; ++ void *handler_data; ++ struct msi_desc *msi_desc; ++ cpumask_var_t affinity; ++ cpumask_var_t effective_affinity; ++}; ++ ++struct msi_msg; ++ ++struct irq_chip { ++ struct device *parent_device; ++ const char *name; ++ unsigned int (*irq_startup)(struct irq_data *); ++ void (*irq_shutdown)(struct irq_data *); ++ void (*irq_enable)(struct irq_data *); ++ void (*irq_disable)(struct irq_data *); ++ void (*irq_ack)(struct irq_data *); ++ void (*irq_mask)(struct irq_data *); ++ void (*irq_mask_ack)(struct irq_data *); ++ void (*irq_unmask)(struct irq_data *); ++ void (*irq_eoi)(struct irq_data *); ++ int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); ++ int (*irq_retrigger)(struct irq_data *); ++ int (*irq_set_type)(struct irq_data *, unsigned int); ++ int (*irq_set_wake)(struct irq_data *, unsigned int); ++ void (*irq_bus_lock)(struct irq_data *); ++ void (*irq_bus_sync_unlock)(struct irq_data *); ++ void (*irq_cpu_online)(struct irq_data *); ++ void (*irq_cpu_offline)(struct irq_data *); ++ void (*irq_suspend)(struct irq_data *); ++ void (*irq_resume)(struct irq_data *); ++ void (*irq_pm_shutdown)(struct irq_data *); ++ void (*irq_calc_mask)(struct irq_data *); ++ void (*irq_print_chip)(struct irq_data *, struct seq_file *); ++ int (*irq_request_resources)(struct irq_data *); ++ void (*irq_release_resources)(struct irq_data *); ++ void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); ++ void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); ++ int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); ++ int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); ++ int (*irq_set_vcpu_affinity)(struct irq_data *, void *); ++ void (*ipi_send_single)(struct irq_data *, unsigned int); ++ void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); ++ int (*irq_nmi_setup)(struct irq_data *); ++ void (*irq_nmi_teardown)(struct irq_data *); ++ long unsigned int flags; ++}; ++ ++struct midr_range { ++ u32 model; ++ u32 rv_min; ++ u32 rv_max; ++}; ++ ++struct arm64_midr_revidr { ++ u32 midr_rv; ++ u32 revidr_mask; ++}; ++ ++struct arm64_cpu_capabilities { ++ const char *desc; ++ u16 capability; ++ u16 type; ++ bool (*matches)(const struct arm64_cpu_capabilities *, int); ++ void (*cpu_enable)(const struct arm64_cpu_capabilities *); ++ union { ++ struct { ++ struct midr_range midr_range; ++ const struct arm64_midr_revidr * const fixed_revs; ++ }; ++ const struct midr_range *midr_range_list; ++ struct { ++ u32 sys_reg; ++ u8 field_pos; ++ u8 min_field_value; ++ u8 hwcap_type; ++ bool sign; ++ long unsigned int hwcap; ++ }; ++ const struct arm64_cpu_capabilities *match_list; ++ }; ++}; ++ ++enum cpu_pm_event { ++ CPU_PM_ENTER = 0, ++ CPU_PM_ENTER_FAILED = 1, ++ CPU_PM_EXIT = 2, ++ CPU_CLUSTER_PM_ENTER = 3, ++ CPU_CLUSTER_PM_ENTER_FAILED = 4, ++ CPU_CLUSTER_PM_EXIT = 5, ++}; ++ ++struct fpsimd_last_state_struct { ++ struct user_fpsimd_state *st; ++}; ++ ++struct plist_head { ++ struct list_head node_list; ++}; ++ ++enum pm_qos_type { ++ PM_QOS_UNITIALIZED = 0, ++ PM_QOS_MAX = 1, ++ PM_QOS_MIN = 2, ++ PM_QOS_SUM = 3, ++}; ++ ++struct pm_qos_constraints { ++ struct plist_head list; ++ s32 target_value; ++ s32 default_value; ++ s32 no_constraint_value; ++ enum pm_qos_type type; ++ struct blocking_notifier_head *notifiers; ++}; ++ ++struct pm_qos_flags { ++ struct list_head list; ++ s32 effective_flags; ++}; ++ ++struct dev_pm_qos_request; ++ ++struct dev_pm_qos { ++ struct pm_qos_constraints resume_latency; ++ struct pm_qos_constraints latency_tolerance; ++ struct pm_qos_flags flags; ++ struct dev_pm_qos_request *resume_latency_req; ++ struct dev_pm_qos_request *latency_tolerance_req; ++ struct dev_pm_qos_request *flags_req; ++}; ++ ++enum cpufreq_table_sorting { ++ CPUFREQ_TABLE_UNSORTED = 0, ++ CPUFREQ_TABLE_SORTED_ASCENDING = 1, ++ CPUFREQ_TABLE_SORTED_DESCENDING = 2, ++}; ++ ++struct cpufreq_cpuinfo { ++ unsigned int max_freq; ++ unsigned int min_freq; ++ unsigned int transition_latency; ++}; ++ ++struct cpufreq_user_policy { ++ unsigned int min; ++ unsigned int max; ++}; ++ ++struct clk; ++ ++struct cpufreq_governor; ++ ++struct cpufreq_frequency_table; ++ ++struct cpufreq_stats; ++ ++struct cpufreq_policy { ++ cpumask_var_t cpus; ++ cpumask_var_t related_cpus; ++ cpumask_var_t real_cpus; ++ unsigned int shared_type; ++ unsigned int cpu; ++ struct clk *clk; ++ struct cpufreq_cpuinfo cpuinfo; ++ unsigned int min; ++ unsigned int max; ++ unsigned int cur; ++ unsigned int restore_freq; ++ unsigned int suspend_freq; ++ unsigned int policy; ++ unsigned int last_policy; ++ struct cpufreq_governor *governor; ++ void *governor_data; ++ char last_governor[16]; ++ struct work_struct update; ++ struct cpufreq_user_policy user_policy; ++ struct cpufreq_frequency_table *freq_table; ++ enum cpufreq_table_sorting freq_table_sorted; ++ struct list_head policy_list; ++ struct kobject kobj; ++ struct completion kobj_unregister; ++ struct rw_semaphore rwsem; ++ bool fast_switch_possible; ++ bool fast_switch_enabled; ++ unsigned int transition_delay_us; ++ bool dvfs_possible_from_any_cpu; ++ unsigned int cached_target_freq; ++ int cached_resolved_idx; ++ bool transition_ongoing; ++ spinlock_t transition_lock; ++ wait_queue_head_t transition_wait; ++ struct task_struct *transition_task; ++ struct cpufreq_stats *stats; ++ void *driver_data; ++}; ++ ++struct cpufreq_governor { ++ char name[16]; ++ int (*init)(struct cpufreq_policy *); ++ void (*exit)(struct cpufreq_policy *); ++ int (*start)(struct cpufreq_policy *); ++ void (*stop)(struct cpufreq_policy *); ++ void (*limits)(struct cpufreq_policy *); ++ ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); ++ int (*store_setspeed)(struct cpufreq_policy *, unsigned int); ++ bool dynamic_switching; ++ struct list_head governor_list; ++ struct module *owner; ++}; ++ ++struct cpufreq_frequency_table { ++ unsigned int flags; ++ unsigned int driver_data; ++ unsigned int frequency; ++}; ++ ++struct freq_attr { ++ struct attribute attr; ++ ssize_t (*show)(struct cpufreq_policy *, char *); ++ ssize_t (*store)(struct cpufreq_policy *, const char *, size_t); ++}; ++ ++struct pm_qos_flags_request { ++ struct list_head node; ++ s32 flags; ++}; ++ ++enum dev_pm_qos_req_type { ++ DEV_PM_QOS_RESUME_LATENCY = 1, ++ DEV_PM_QOS_LATENCY_TOLERANCE = 2, ++ DEV_PM_QOS_FLAGS = 3, ++}; ++ ++struct dev_pm_qos_request { ++ enum dev_pm_qos_req_type type; ++ union { ++ struct plist_node pnode; ++ struct pm_qos_flags_request flr; ++ } data; ++ struct device *dev; ++}; ++ ++struct stackframe { ++ long unsigned int fp; ++ long unsigned int pc; ++ int graph; ++}; ++ ++struct intel_pqr_state { ++ u32 cur_rmid; ++ u32 cur_closid; ++ u32 default_rmid; ++ u32 default_closid; ++}; ++ ++enum mpam_enable_type { ++ enable_denied = 0, ++ enable_default = 1, ++ enable_acpi = 2, ++}; ++ ++struct user_sve_header { ++ __u32 size; ++ __u32 max_size; ++ __u16 vl; ++ __u16 max_vl; ++ __u16 flags; ++ __u16 __reserved; ++}; ++ ++struct seccomp_data { ++ int nr; ++ __u32 arch; ++ __u64 instruction_pointer; ++ __u64 args[6]; ++}; ++ ++typedef u32 compat_ulong_t; ++ ++enum perf_type_id { ++ PERF_TYPE_HARDWARE = 0, ++ PERF_TYPE_SOFTWARE = 1, ++ PERF_TYPE_TRACEPOINT = 2, ++ PERF_TYPE_HW_CACHE = 3, ++ PERF_TYPE_RAW = 4, ++ PERF_TYPE_BREAKPOINT = 5, ++ PERF_TYPE_MAX = 6, ++}; ++ ++enum { ++ HW_BREAKPOINT_LEN_1 = 1, ++ HW_BREAKPOINT_LEN_2 = 2, ++ HW_BREAKPOINT_LEN_3 = 3, ++ HW_BREAKPOINT_LEN_4 = 4, ++ HW_BREAKPOINT_LEN_5 = 5, ++ HW_BREAKPOINT_LEN_6 = 6, ++ HW_BREAKPOINT_LEN_7 = 7, ++ HW_BREAKPOINT_LEN_8 = 8, ++}; ++ ++enum { ++ HW_BREAKPOINT_EMPTY = 0, ++ HW_BREAKPOINT_R = 1, ++ HW_BREAKPOINT_W = 2, ++ HW_BREAKPOINT_RW = 3, ++ HW_BREAKPOINT_X = 4, ++ HW_BREAKPOINT_INVALID = 7, ++}; ++ ++enum bp_type_idx { ++ TYPE_INST = 0, ++ TYPE_DATA = 1, ++ TYPE_MAX = 2, ++}; ++ ++struct user_regset; ++ ++typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); ++ ++typedef int user_regset_get_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, void *, void *); ++ ++typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); ++ ++typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); ++ ++typedef unsigned int user_regset_get_size_fn(struct task_struct *, const struct user_regset *); ++ ++struct user_regset { ++ user_regset_get_fn *get; ++ user_regset_set_fn *set; ++ user_regset_active_fn *active; ++ user_regset_writeback_fn *writeback; ++ user_regset_get_size_fn *get_size; ++ unsigned int n; ++ unsigned int size; ++ unsigned int align; ++ unsigned int bias; ++ unsigned int core_note_type; ++}; ++ ++struct user_regset_view { ++ const char *name; ++ const struct user_regset *regsets; ++ unsigned int n; ++ u32 e_flags; ++ u16 e_machine; ++ u8 ei_osabi; ++}; ++ ++enum stack_type { ++ STACK_TYPE_UNKNOWN = 0, ++ STACK_TYPE_TASK = 1, ++ STACK_TYPE_IRQ = 2, ++ STACK_TYPE_OVERFLOW = 3, ++ STACK_TYPE_SDEI_NORMAL = 4, ++ STACK_TYPE_SDEI_CRITICAL = 5, ++}; ++ ++struct stack_info { ++ long unsigned int low; ++ long unsigned int high; ++ enum stack_type type; ++}; ++ ++struct trace_event_raw_sys_enter { ++ struct trace_entry ent; ++ long int id; ++ long unsigned int args[6]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sys_exit { ++ struct trace_entry ent; ++ long int id; ++ long int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_sys_enter {}; ++ ++struct trace_event_data_offsets_sys_exit {}; ++ ++struct pt_regs_offset { ++ const char *name; ++ int offset; ++}; ++ ++enum aarch64_regset { ++ REGSET_GPR = 0, ++ REGSET_FPR = 1, ++ REGSET_TLS = 2, ++ REGSET_HW_BREAK = 3, ++ REGSET_HW_WATCH = 4, ++ REGSET_SYSTEM_CALL = 5, ++ REGSET_SVE = 6, ++}; ++ ++enum compat_regset { ++ REGSET_COMPAT_GPR = 0, ++ REGSET_COMPAT_VFP = 1, ++}; ++ ++enum ptrace_syscall_dir { ++ PTRACE_SYSCALL_ENTER = 0, ++ PTRACE_SYSCALL_EXIT = 1, ++}; ++ ++enum meminit_context { ++ MEMINIT_EARLY = 0, ++ MEMINIT_HOTPLUG = 1, ++}; ++ ++struct cpu { ++ int node_id; ++ int hotpluggable; ++ struct device dev; ++}; ++ ++struct cpuinfo_arm64 { ++ struct cpu cpu; ++ struct kobject kobj; ++ u32 reg_ctr; ++ u32 reg_cntfrq; ++ u32 reg_dczid; ++ u32 reg_midr; ++ u32 reg_revidr; ++ u64 reg_id_aa64dfr0; ++ u64 reg_id_aa64dfr1; ++ u64 reg_id_aa64isar0; ++ u64 reg_id_aa64isar1; ++ u64 reg_id_aa64mmfr0; ++ u64 reg_id_aa64mmfr1; ++ u64 reg_id_aa64mmfr2; ++ u64 reg_id_aa64pfr0; ++ u64 reg_id_aa64pfr1; ++ u64 reg_id_aa64zfr0; ++ u32 reg_id_dfr0; ++ u32 reg_id_isar0; ++ u32 reg_id_isar1; ++ u32 reg_id_isar2; ++ u32 reg_id_isar3; ++ u32 reg_id_isar4; ++ u32 reg_id_isar5; ++ u32 reg_id_mmfr0; ++ u32 reg_id_mmfr1; ++ u32 reg_id_mmfr2; ++ u32 reg_id_mmfr3; ++ u32 reg_id_pfr0; ++ u32 reg_id_pfr1; ++ u32 reg_mvfr0; ++ u32 reg_mvfr1; ++ u32 reg_mvfr2; ++ u64 reg_zcr; ++}; ++ ++struct cpu_operations { ++ const char *name; ++ int (*cpu_init)(unsigned int); ++ int (*cpu_prepare)(unsigned int); ++ int (*cpu_boot)(unsigned int); ++ void (*cpu_postboot)(); ++ int (*cpu_disable)(unsigned int); ++ void (*cpu_die)(unsigned int); ++ int (*cpu_kill)(unsigned int); ++ int (*cpu_init_idle)(unsigned int); ++ int (*cpu_suspend)(long unsigned int); ++}; ++ ++struct sigcontext { ++ __u64 fault_address; ++ __u64 regs[31]; ++ __u64 sp; ++ __u64 pc; ++ __u64 pstate; ++ long: 64; ++ __u8 __reserved[4096]; ++}; ++ ++struct _aarch64_ctx { ++ __u32 magic; ++ __u32 size; ++}; ++ ++struct fpsimd_context { ++ struct _aarch64_ctx head; ++ __u32 fpsr; ++ __u32 fpcr; ++ __int128 unsigned vregs[32]; ++}; ++ ++struct esr_context { ++ struct _aarch64_ctx head; ++ __u64 esr; ++}; ++ ++struct extra_context { ++ struct _aarch64_ctx head; ++ __u64 datap; ++ __u32 size; ++ __u32 __reserved[3]; ++}; ++ ++struct sve_context { ++ struct _aarch64_ctx head; ++ __u16 vl; ++ __u16 __reserved[3]; ++}; ++ ++struct sigaltstack { ++ void *ss_sp; ++ int ss_flags; ++ size_t ss_size; ++}; ++ ++typedef struct sigaltstack stack_t; ++ ++struct ksignal { ++ struct k_sigaction ka; ++ siginfo_t info; ++ int sig; ++}; ++ ++struct syscall_metadata { ++ const char *name; ++ int syscall_nr; ++ int nb_args; ++ const char **types; ++ const char **args; ++ struct list_head enter_fields; ++ struct trace_event_call *enter_event; ++ struct trace_event_call *exit_event; ++}; ++ ++struct ucontext { ++ long unsigned int uc_flags; ++ struct ucontext *uc_link; ++ stack_t uc_stack; ++ sigset_t uc_sigmask; ++ __u8 __unused[120]; ++ long: 64; ++ struct sigcontext uc_mcontext; ++}; ++ ++struct rt_sigframe { ++ struct siginfo info; ++ struct ucontext uc; ++}; ++ ++struct frame_record; ++ ++struct rt_sigframe_user_layout { ++ void *sigframe; ++ struct frame_record *next_frame; ++ long unsigned int size; ++ long unsigned int limit; ++ long unsigned int fpsimd_offset; ++ long unsigned int esr_offset; ++ long unsigned int sve_offset; ++ long unsigned int extra_offset; ++ long unsigned int end_offset; ++}; ++ ++struct frame_record { ++ u64 fp; ++ u64 lr; ++}; ++ ++struct user_ctxs { ++ struct fpsimd_context *fpsimd; ++ struct sve_context *sve; ++}; ++ ++enum { ++ PER_LINUX = 0, ++ PER_LINUX_32BIT = 8388608, ++ PER_LINUX_FDPIC = 524288, ++ PER_SVR4 = 68157441, ++ PER_SVR3 = 83886082, ++ PER_SCOSVR3 = 117440515, ++ PER_OSR5 = 100663299, ++ PER_WYSEV386 = 83886084, ++ PER_ISCR4 = 67108869, ++ PER_BSD = 6, ++ PER_SUNOS = 67108870, ++ PER_XENIX = 83886087, ++ PER_LINUX32 = 8, ++ PER_LINUX32_3GB = 134217736, ++ PER_IRIX32 = 67108873, ++ PER_IRIXN32 = 67108874, ++ PER_IRIX64 = 67108875, ++ PER_RISCOS = 12, ++ PER_SOLARIS = 67108877, ++ PER_UW7 = 68157454, ++ PER_OSF4 = 15, ++ PER_HPUX = 16, ++ PER_MASK = 255, ++}; ++ ++struct stack_trace_data { ++ struct stack_trace *trace; ++ unsigned int no_sched_functions; ++ unsigned int skip; ++}; ++ ++typedef void (*clock_access_fn)(struct timespec64 *); ++ ++struct clk_hw; ++ ++struct clk_rate_request { ++ long unsigned int rate; ++ long unsigned int min_rate; ++ long unsigned int max_rate; ++ long unsigned int best_parent_rate; ++ struct clk_hw *best_parent_hw; ++}; ++ ++struct clk_core; ++ ++struct clk_init_data; ++ ++struct clk_hw { ++ struct clk_core *core; ++ struct clk *clk; ++ const struct clk_init_data *init; ++}; ++ ++struct clk_duty { ++ unsigned int num; ++ unsigned int den; ++}; ++ ++struct clk_ops { ++ int (*prepare)(struct clk_hw *); ++ void (*unprepare)(struct clk_hw *); ++ int (*is_prepared)(struct clk_hw *); ++ void (*unprepare_unused)(struct clk_hw *); ++ int (*enable)(struct clk_hw *); ++ void (*disable)(struct clk_hw *); ++ int (*is_enabled)(struct clk_hw *); ++ void (*disable_unused)(struct clk_hw *); ++ long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int); ++ long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *); ++ int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); ++ int (*set_parent)(struct clk_hw *, u8); ++ u8 (*get_parent)(struct clk_hw *); ++ int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int); ++ int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8); ++ long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int); ++ int (*get_phase)(struct clk_hw *); ++ int (*set_phase)(struct clk_hw *, int); ++ int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); ++ int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); ++ void (*init)(struct clk_hw *); ++ void (*debug_init)(struct clk_hw *, struct dentry *); ++}; ++ ++struct clk_init_data { ++ const char *name; ++ const struct clk_ops *ops; ++ const char * const *parent_names; ++ u8 num_parents; ++ long unsigned int flags; ++}; ++ ++enum lockdep_ok { ++ LOCKDEP_STILL_OK = 0, ++ LOCKDEP_NOW_UNRELIABLE = 1, ++}; ++ ++enum bug_trap_type { ++ BUG_TRAP_TYPE_NONE = 0, ++ BUG_TRAP_TYPE_WARN = 1, ++ BUG_TRAP_TYPE_BUG = 2, ++}; ++ ++enum siginfo_layout { ++ SIL_KILL = 0, ++ SIL_TIMER = 1, ++ SIL_POLL = 2, ++ SIL_FAULT = 3, ++ SIL_FAULT_MCEERR = 4, ++ SIL_FAULT_BNDERR = 5, ++ SIL_FAULT_PKUERR = 6, ++ SIL_CHLD = 7, ++ SIL_RT = 8, ++ SIL_SYS = 9, ++}; ++ ++enum die_val { ++ DIE_UNUSED = 0, ++ DIE_OOPS = 1, ++}; ++ ++struct undef_hook { ++ struct list_head node; ++ u32 instr_mask; ++ u32 instr_val; ++ u64 pstate_mask; ++ u64 pstate_val; ++ int (*fn)(struct pt_regs *, u32); ++}; ++ ++struct sys64_hook { ++ unsigned int esr_mask; ++ unsigned int esr_val; ++ void (*handler)(unsigned int, struct pt_regs *); ++}; ++ ++struct arch_clocksource_data { ++ bool vdso_direct; ++ bool vdso_fix; ++}; ++ ++struct clocksource { ++ u64 (*read)(struct clocksource *); ++ u64 mask; ++ u32 mult; ++ u32 shift; ++ u64 max_idle_ns; ++ u32 maxadj; ++ struct arch_clocksource_data archdata; ++ u64 max_cycles; ++ const char *name; ++ struct list_head list; ++ int rating; ++ int (*enable)(struct clocksource *); ++ void (*disable)(struct clocksource *); ++ long unsigned int flags; ++ void (*suspend)(struct clocksource *); ++ void (*resume)(struct clocksource *); ++ void (*mark_unstable)(struct clocksource *); ++ void (*tick_stable)(struct clocksource *); ++ struct module *owner; ++}; ++ ++struct vm_special_mapping { ++ const char *name; ++ struct page **pages; ++ vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); ++ int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); ++}; ++ ++struct tk_read_base { ++ struct clocksource *clock; ++ u64 mask; ++ u64 cycle_last; ++ u32 mult; ++ u32 shift; ++ u64 xtime_nsec; ++ ktime_t base; ++ u64 base_real; ++}; ++ ++struct timekeeper { ++ struct tk_read_base tkr_mono; ++ struct tk_read_base tkr_raw; ++ u64 xtime_sec; ++ long unsigned int ktime_sec; ++ struct timespec64 wall_to_monotonic; ++ ktime_t offs_real; ++ ktime_t offs_boot; ++ ktime_t offs_tai; ++ s32 tai_offset; ++ unsigned int clock_was_set_seq; ++ u8 cs_was_changed_seq; ++ ktime_t next_leap_ktime; ++ u64 raw_sec; ++ u64 cycle_interval; ++ u64 xtime_interval; ++ s64 xtime_remainder; ++ u64 raw_interval; ++ u64 ntp_tick; ++ s64 ntp_error; ++ u32 ntp_error_shift; ++ u32 ntp_err_mult; ++ u32 skip_second_overflow; ++}; ++ ++struct vdso_data { ++ __u64 cs_cycle_last; ++ __u64 raw_time_sec; ++ __u64 raw_time_nsec; ++ __u64 xtime_clock_sec; ++ __u64 xtime_clock_nsec; ++ __u64 xtime_coarse_sec; ++ __u64 xtime_coarse_nsec; ++ __u64 wtm_clock_sec; ++ __u64 wtm_clock_nsec; ++ __u32 tb_seq_count; ++ __u32 cs_mono_mult; ++ __u32 cs_shift; ++ __u32 cs_raw_mult; ++ __u32 tz_minuteswest; ++ __u32 tz_dsttime; ++ __u32 use_syscall; ++ __u32 hrtimer_res; ++ __u32 vdso_fix; ++}; ++ ++enum aarch64_insn_encoding_class { ++ AARCH64_INSN_CLS_UNKNOWN = 0, ++ AARCH64_INSN_CLS_DP_IMM = 1, ++ AARCH64_INSN_CLS_DP_REG = 2, ++ AARCH64_INSN_CLS_DP_FPSIMD = 3, ++ AARCH64_INSN_CLS_LDST = 4, ++ AARCH64_INSN_CLS_BR_SYS = 5, ++}; ++ ++enum aarch64_insn_hint_op { ++ AARCH64_INSN_HINT_NOP = 0, ++ AARCH64_INSN_HINT_YIELD = 32, ++ AARCH64_INSN_HINT_WFE = 64, ++ AARCH64_INSN_HINT_WFI = 96, ++ AARCH64_INSN_HINT_SEV = 128, ++ AARCH64_INSN_HINT_SEVL = 160, ++}; ++ ++enum aarch64_insn_imm_type { ++ AARCH64_INSN_IMM_ADR = 0, ++ AARCH64_INSN_IMM_26 = 1, ++ AARCH64_INSN_IMM_19 = 2, ++ AARCH64_INSN_IMM_16 = 3, ++ AARCH64_INSN_IMM_14 = 4, ++ AARCH64_INSN_IMM_12 = 5, ++ AARCH64_INSN_IMM_9 = 6, ++ AARCH64_INSN_IMM_7 = 7, ++ AARCH64_INSN_IMM_6 = 8, ++ AARCH64_INSN_IMM_S = 9, ++ AARCH64_INSN_IMM_R = 10, ++ AARCH64_INSN_IMM_N = 11, ++ AARCH64_INSN_IMM_MAX = 12, ++}; ++ ++enum aarch64_insn_register_type { ++ AARCH64_INSN_REGTYPE_RT = 0, ++ AARCH64_INSN_REGTYPE_RN = 1, ++ AARCH64_INSN_REGTYPE_RT2 = 2, ++ AARCH64_INSN_REGTYPE_RM = 3, ++ AARCH64_INSN_REGTYPE_RD = 4, ++ AARCH64_INSN_REGTYPE_RA = 5, ++ AARCH64_INSN_REGTYPE_RS = 6, ++}; ++ ++enum aarch64_insn_register { ++ AARCH64_INSN_REG_0 = 0, ++ AARCH64_INSN_REG_1 = 1, ++ AARCH64_INSN_REG_2 = 2, ++ AARCH64_INSN_REG_3 = 3, ++ AARCH64_INSN_REG_4 = 4, ++ AARCH64_INSN_REG_5 = 5, ++ AARCH64_INSN_REG_6 = 6, ++ AARCH64_INSN_REG_7 = 7, ++ AARCH64_INSN_REG_8 = 8, ++ AARCH64_INSN_REG_9 = 9, ++ AARCH64_INSN_REG_10 = 10, ++ AARCH64_INSN_REG_11 = 11, ++ AARCH64_INSN_REG_12 = 12, ++ AARCH64_INSN_REG_13 = 13, ++ AARCH64_INSN_REG_14 = 14, ++ AARCH64_INSN_REG_15 = 15, ++ AARCH64_INSN_REG_16 = 16, ++ AARCH64_INSN_REG_17 = 17, ++ AARCH64_INSN_REG_18 = 18, ++ AARCH64_INSN_REG_19 = 19, ++ AARCH64_INSN_REG_20 = 20, ++ AARCH64_INSN_REG_21 = 21, ++ AARCH64_INSN_REG_22 = 22, ++ AARCH64_INSN_REG_23 = 23, ++ AARCH64_INSN_REG_24 = 24, ++ AARCH64_INSN_REG_25 = 25, ++ AARCH64_INSN_REG_26 = 26, ++ AARCH64_INSN_REG_27 = 27, ++ AARCH64_INSN_REG_28 = 28, ++ AARCH64_INSN_REG_29 = 29, ++ AARCH64_INSN_REG_FP = 29, ++ AARCH64_INSN_REG_30 = 30, ++ AARCH64_INSN_REG_LR = 30, ++ AARCH64_INSN_REG_ZR = 31, ++ AARCH64_INSN_REG_SP = 31, ++}; ++ ++enum aarch64_insn_variant { ++ AARCH64_INSN_VARIANT_32BIT = 0, ++ AARCH64_INSN_VARIANT_64BIT = 1, ++}; ++ ++enum aarch64_insn_condition { ++ AARCH64_INSN_COND_EQ = 0, ++ AARCH64_INSN_COND_NE = 1, ++ AARCH64_INSN_COND_CS = 2, ++ AARCH64_INSN_COND_CC = 3, ++ AARCH64_INSN_COND_MI = 4, ++ AARCH64_INSN_COND_PL = 5, ++ AARCH64_INSN_COND_VS = 6, ++ AARCH64_INSN_COND_VC = 7, ++ AARCH64_INSN_COND_HI = 8, ++ AARCH64_INSN_COND_LS = 9, ++ AARCH64_INSN_COND_GE = 10, ++ AARCH64_INSN_COND_LT = 11, ++ AARCH64_INSN_COND_GT = 12, ++ AARCH64_INSN_COND_LE = 13, ++ AARCH64_INSN_COND_AL = 14, ++}; ++ ++enum aarch64_insn_branch_type { ++ AARCH64_INSN_BRANCH_NOLINK = 0, ++ AARCH64_INSN_BRANCH_LINK = 1, ++ AARCH64_INSN_BRANCH_RETURN = 2, ++ AARCH64_INSN_BRANCH_COMP_ZERO = 3, ++ AARCH64_INSN_BRANCH_COMP_NONZERO = 4, ++}; ++ ++enum aarch64_insn_size_type { ++ AARCH64_INSN_SIZE_8 = 0, ++ AARCH64_INSN_SIZE_16 = 1, ++ AARCH64_INSN_SIZE_32 = 2, ++ AARCH64_INSN_SIZE_64 = 3, ++}; ++ ++enum aarch64_insn_ldst_type { ++ AARCH64_INSN_LDST_LOAD_REG_OFFSET = 0, ++ AARCH64_INSN_LDST_STORE_REG_OFFSET = 1, ++ AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX = 2, ++ AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX = 3, ++ AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX = 4, ++ AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX = 5, ++ AARCH64_INSN_LDST_LOAD_EX = 6, ++ AARCH64_INSN_LDST_STORE_EX = 7, ++}; ++ ++enum aarch64_insn_adsb_type { ++ AARCH64_INSN_ADSB_ADD = 0, ++ AARCH64_INSN_ADSB_SUB = 1, ++ AARCH64_INSN_ADSB_ADD_SETFLAGS = 2, ++ AARCH64_INSN_ADSB_SUB_SETFLAGS = 3, ++}; ++ ++enum aarch64_insn_movewide_type { ++ AARCH64_INSN_MOVEWIDE_ZERO = 0, ++ AARCH64_INSN_MOVEWIDE_KEEP = 1, ++ AARCH64_INSN_MOVEWIDE_INVERSE = 2, ++}; ++ ++enum aarch64_insn_bitfield_type { ++ AARCH64_INSN_BITFIELD_MOVE = 0, ++ AARCH64_INSN_BITFIELD_MOVE_UNSIGNED = 1, ++ AARCH64_INSN_BITFIELD_MOVE_SIGNED = 2, ++}; ++ ++enum aarch64_insn_data1_type { ++ AARCH64_INSN_DATA1_REVERSE_16 = 0, ++ AARCH64_INSN_DATA1_REVERSE_32 = 1, ++ AARCH64_INSN_DATA1_REVERSE_64 = 2, ++}; ++ ++enum aarch64_insn_data2_type { ++ AARCH64_INSN_DATA2_UDIV = 0, ++ AARCH64_INSN_DATA2_SDIV = 1, ++ AARCH64_INSN_DATA2_LSLV = 2, ++ AARCH64_INSN_DATA2_LSRV = 3, ++ AARCH64_INSN_DATA2_ASRV = 4, ++ AARCH64_INSN_DATA2_RORV = 5, ++}; ++ ++enum aarch64_insn_data3_type { ++ AARCH64_INSN_DATA3_MADD = 0, ++ AARCH64_INSN_DATA3_MSUB = 1, ++}; ++ ++enum aarch64_insn_logic_type { ++ AARCH64_INSN_LOGIC_AND = 0, ++ AARCH64_INSN_LOGIC_BIC = 1, ++ AARCH64_INSN_LOGIC_ORR = 2, ++ AARCH64_INSN_LOGIC_ORN = 3, ++ AARCH64_INSN_LOGIC_EOR = 4, ++ AARCH64_INSN_LOGIC_EON = 5, ++ AARCH64_INSN_LOGIC_AND_SETFLAGS = 6, ++ AARCH64_INSN_LOGIC_BIC_SETFLAGS = 7, ++}; ++ ++enum aarch64_insn_prfm_type { ++ AARCH64_INSN_PRFM_TYPE_PLD = 0, ++ AARCH64_INSN_PRFM_TYPE_PLI = 1, ++ AARCH64_INSN_PRFM_TYPE_PST = 2, ++}; ++ ++enum aarch64_insn_prfm_target { ++ AARCH64_INSN_PRFM_TARGET_L1 = 0, ++ AARCH64_INSN_PRFM_TARGET_L2 = 1, ++ AARCH64_INSN_PRFM_TARGET_L3 = 2, ++}; ++ ++enum aarch64_insn_prfm_policy { ++ AARCH64_INSN_PRFM_POLICY_KEEP = 0, ++ AARCH64_INSN_PRFM_POLICY_STRM = 1, ++}; ++ ++enum fixed_addresses { ++ FIX_HOLE = 0, ++ FIX_FDT_END = 1, ++ FIX_FDT = 64, ++ FIX_EARLYCON_MEM_BASE = 65, ++ FIX_TEXT_POKE0 = 66, ++ FIX_APEI_GHES_IRQ = 67, ++ FIX_APEI_GHES_NMI = 68, ++ FIX_ENTRY_TRAMP_DATA = 69, ++ FIX_ENTRY_TRAMP_TEXT = 70, ++ __end_of_permanent_fixed_addresses = 71, ++ FIX_BTMAP_END = 71, ++ FIX_BTMAP_BEGIN = 98, ++ FIX_PTE = 99, ++ FIX_PMD = 100, ++ FIX_PUD = 101, ++ FIX_PGD = 102, ++ __end_of_fixed_addresses = 103, ++}; ++ ++struct aarch64_insn_patch { ++ void **text_addrs; ++ u32 *new_insns; ++ int insn_cnt; ++ atomic_t cpu_count; ++}; ++ ++struct return_address_data { ++ unsigned int level; ++ void *addr; ++}; ++ ++struct arm_smccc_res { ++ long unsigned int a0; ++ long unsigned int a1; ++ long unsigned int a2; ++ long unsigned int a3; ++}; ++ ++struct alt_instr { ++ s32 orig_offset; ++ s32 alt_offset; ++ u16 cpufeature; ++ u8 orig_len; ++ u8 alt_len; ++}; ++ ++struct device_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct device *, struct device_attribute *, char *); ++ ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); ++}; ++ ++struct ssbd_options { ++ const char *str; ++ int state; ++}; ++ ++enum { ++ CAP_HWCAP = 1, ++ CAP_COMPAT_HWCAP = 2, ++ CAP_COMPAT_HWCAP2 = 3, ++}; ++ ++struct __ftr_reg_entry { ++ u32 sys_id; ++ struct arm64_ftr_reg *reg; ++}; ++ ++typedef void kpti_remap_fn(int, int, phys_addr_t); ++ ++typedef void (*alternative_cb_t)(struct alt_instr *, __le32 *, __le32 *, int); ++ ++struct alt_region { ++ struct alt_instr *begin; ++ struct alt_instr *end; ++}; ++ ++enum cache_type { ++ CACHE_TYPE_NOCACHE = 0, ++ CACHE_TYPE_INST = 1, ++ CACHE_TYPE_DATA = 2, ++ CACHE_TYPE_SEPARATE = 3, ++ CACHE_TYPE_UNIFIED = 4, ++}; ++ ++struct cacheinfo { ++ unsigned int id; ++ enum cache_type type; ++ unsigned int level; ++ unsigned int coherency_line_size; ++ unsigned int number_of_sets; ++ unsigned int ways_of_associativity; ++ unsigned int physical_line_partition; ++ unsigned int size; ++ cpumask_t shared_cpu_map; ++ unsigned int attributes; ++ void *fw_token; ++ bool disable_sysfs; ++ void *priv; ++}; ++ ++struct cpu_cacheinfo { ++ struct cacheinfo *info_list; ++ unsigned int num_levels; ++ unsigned int num_leaves; ++ bool cpu_map_populated; ++}; ++ ++typedef u64 acpi_size; ++ ++struct acpi_subtable_header { ++ u8 type; ++ u8 length; ++}; ++ ++enum acpi_madt_type { ++ ACPI_MADT_TYPE_LOCAL_APIC = 0, ++ ACPI_MADT_TYPE_IO_APIC = 1, ++ ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, ++ ACPI_MADT_TYPE_NMI_SOURCE = 3, ++ ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, ++ ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, ++ ACPI_MADT_TYPE_IO_SAPIC = 6, ++ ACPI_MADT_TYPE_LOCAL_SAPIC = 7, ++ ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, ++ ACPI_MADT_TYPE_LOCAL_X2APIC = 9, ++ ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, ++ ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, ++ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, ++ ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, ++ ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ++ ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, ++ ACPI_MADT_TYPE_RESERVED = 16, ++}; ++ ++struct acpi_madt_generic_interrupt { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 cpu_interface_number; ++ u32 uid; ++ u32 flags; ++ u32 parking_version; ++ u32 performance_interrupt; ++ u64 parked_address; ++ u64 base_address; ++ u64 gicv_base_address; ++ u64 gich_base_address; ++ u32 vgic_interrupt; ++ u64 gicr_base_address; ++ u64 arm_mpidr; ++ u8 efficiency_class; ++ u8 reserved2[1]; ++ u16 spe_interrupt; ++} __attribute__((packed)); ++ ++enum perf_hw_id { ++ PERF_COUNT_HW_CPU_CYCLES = 0, ++ PERF_COUNT_HW_INSTRUCTIONS = 1, ++ PERF_COUNT_HW_CACHE_REFERENCES = 2, ++ PERF_COUNT_HW_CACHE_MISSES = 3, ++ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, ++ PERF_COUNT_HW_BRANCH_MISSES = 5, ++ PERF_COUNT_HW_BUS_CYCLES = 6, ++ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, ++ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, ++ PERF_COUNT_HW_REF_CPU_CYCLES = 9, ++ PERF_COUNT_HW_MAX = 10, ++}; ++ ++enum perf_hw_cache_id { ++ PERF_COUNT_HW_CACHE_L1D = 0, ++ PERF_COUNT_HW_CACHE_L1I = 1, ++ PERF_COUNT_HW_CACHE_LL = 2, ++ PERF_COUNT_HW_CACHE_DTLB = 3, ++ PERF_COUNT_HW_CACHE_ITLB = 4, ++ PERF_COUNT_HW_CACHE_BPU = 5, ++ PERF_COUNT_HW_CACHE_NODE = 6, ++ PERF_COUNT_HW_CACHE_MAX = 7, ++}; ++ ++enum perf_hw_cache_op_id { ++ PERF_COUNT_HW_CACHE_OP_READ = 0, ++ PERF_COUNT_HW_CACHE_OP_WRITE = 1, ++ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, ++ PERF_COUNT_HW_CACHE_OP_MAX = 3, ++}; ++ ++enum perf_hw_cache_op_result_id { ++ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, ++ PERF_COUNT_HW_CACHE_RESULT_MISS = 1, ++ PERF_COUNT_HW_CACHE_RESULT_MAX = 2, ++}; ++ ++enum armpmu_attr_groups { ++ ARMPMU_ATTR_GROUP_COMMON = 0, ++ ARMPMU_ATTR_GROUP_EVENTS = 1, ++ ARMPMU_ATTR_GROUP_FORMATS = 2, ++ ARMPMU_NR_ATTR_GROUPS = 3, ++}; ++ ++struct trace_event_raw_ipi_raise { ++ struct trace_entry ent; ++ u32 __data_loc_target_cpus; ++ const char *reason; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_ipi_handler { ++ struct trace_entry ent; ++ const char *reason; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_ipi_raise { ++ u32 target_cpus; ++}; ++ ++struct trace_event_data_offsets_ipi_handler {}; ++ ++enum ipi_msg_type { ++ IPI_RESCHEDULE = 0, ++ IPI_CALL_FUNC = 1, ++ IPI_CPU_STOP = 2, ++ IPI_CPU_CRASH_STOP = 3, ++ IPI_TIMER = 4, ++ IPI_IRQ_WORK = 5, ++ IPI_WAKEUP = 6, ++ IPI_CPU_BACKTRACE = 7, ++}; ++ ++typedef __u64 __le64; ++ ++typedef u32 compat_size_t; ++ ++typedef s32 compat_clock_t; ++ ++typedef s32 compat_pid_t; ++ ++typedef u32 __compat_uid32_t; ++ ++typedef s32 compat_timer_t; ++ ++typedef s32 compat_int_t; ++ ++typedef u64 compat_u64; ++ ++typedef u32 compat_sigset_word; ++ ++struct compat_sigaltstack { ++ compat_uptr_t ss_sp; ++ int ss_flags; ++ compat_size_t ss_size; ++}; ++ ++typedef struct compat_sigaltstack compat_stack_t; ++ ++typedef struct { ++ compat_sigset_word sig[2]; ++} compat_sigset_t; ++ ++union compat_sigval { ++ compat_int_t sival_int; ++ compat_uptr_t sival_ptr; ++}; ++ ++typedef union compat_sigval compat_sigval_t; ++ ++struct compat_siginfo { ++ int si_signo; ++ int si_errno; ++ int si_code; ++ union { ++ int _pad[29]; ++ struct { ++ compat_pid_t _pid; ++ __compat_uid32_t _uid; ++ } _kill; ++ struct { ++ compat_timer_t _tid; ++ int _overrun; ++ compat_sigval_t _sigval; ++ } _timer; ++ struct { ++ compat_pid_t _pid; ++ __compat_uid32_t _uid; ++ compat_sigval_t _sigval; ++ } _rt; ++ struct { ++ compat_pid_t _pid; ++ __compat_uid32_t _uid; ++ int _status; ++ compat_clock_t _utime; ++ compat_clock_t _stime; ++ } _sigchld; ++ struct { ++ compat_uptr_t _addr; ++ union { ++ short int _addr_lsb; ++ struct { ++ char _dummy_bnd[4]; ++ compat_uptr_t _lower; ++ compat_uptr_t _upper; ++ } _addr_bnd; ++ struct { ++ char _dummy_pkey[4]; ++ u32 _pkey; ++ } _addr_pkey; ++ }; ++ } _sigfault; ++ struct { ++ compat_long_t _band; ++ int _fd; ++ } _sigpoll; ++ struct { ++ compat_uptr_t _call_addr; ++ int _syscall; ++ unsigned int _arch; ++ } _sigsys; ++ } _sifields; ++}; ++ ++struct a32_sigcontext { ++ compat_ulong_t trap_no; ++ compat_ulong_t error_code; ++ compat_ulong_t oldmask; ++ compat_ulong_t arm_r0; ++ compat_ulong_t arm_r1; ++ compat_ulong_t arm_r2; ++ compat_ulong_t arm_r3; ++ compat_ulong_t arm_r4; ++ compat_ulong_t arm_r5; ++ compat_ulong_t arm_r6; ++ compat_ulong_t arm_r7; ++ compat_ulong_t arm_r8; ++ compat_ulong_t arm_r9; ++ compat_ulong_t arm_r10; ++ compat_ulong_t arm_fp; ++ compat_ulong_t arm_ip; ++ compat_ulong_t arm_sp; ++ compat_ulong_t arm_lr; ++ compat_ulong_t arm_pc; ++ compat_ulong_t arm_cpsr; ++ compat_ulong_t fault_address; ++}; ++ ++struct a32_ucontext { ++ compat_ulong_t uc_flags; ++ compat_uptr_t uc_link; ++ compat_stack_t uc_stack; ++ struct a32_sigcontext uc_mcontext; ++ compat_sigset_t uc_sigmask; ++ int __unused[30]; ++ compat_ulong_t uc_regspace[128]; ++}; ++ ++struct compat_user_vfp { ++ compat_u64 fpregs[32]; ++ compat_ulong_t fpscr; ++}; ++ ++struct compat_user_vfp_exc { ++ compat_ulong_t fpexc; ++ compat_ulong_t fpinst; ++ compat_ulong_t fpinst2; ++}; ++ ++struct a32_vfp_sigframe { ++ compat_ulong_t magic; ++ compat_ulong_t size; ++ struct compat_user_vfp ufp; ++ struct compat_user_vfp_exc ufp_exc; ++}; ++ ++struct a32_aux_sigframe { ++ struct a32_vfp_sigframe vfp; ++ long unsigned int end_magic; ++}; ++ ++struct a32_sigframe { ++ struct a32_ucontext uc; ++ compat_ulong_t retcode[2]; ++}; ++ ++struct a32_rt_sigframe { ++ struct compat_siginfo info; ++ struct a32_sigframe sig; ++}; ++ ++union __fpsimd_vreg { ++ __int128 unsigned raw; ++ struct { ++ u64 lo; ++ u64 hi; ++ }; ++}; ++ ++typedef short unsigned int __kernel_old_uid_t; ++ ++typedef short unsigned int __kernel_old_gid_t; ++ ++typedef __kernel_long_t __kernel_suseconds_t; ++ ++typedef __kernel_old_uid_t old_uid_t; ++ ++typedef __kernel_old_gid_t old_gid_t; ++ ++typedef void (*exitcall_t)(); ++ ++typedef unsigned int compat_elf_greg_t; ++ ++typedef compat_elf_greg_t compat_elf_gregset_t[18]; ++ ++typedef __u32 Elf32_Addr; ++ ++typedef __u16 Elf32_Half; ++ ++typedef __u32 Elf32_Off; ++ ++struct elf32_hdr { ++ unsigned char e_ident[16]; ++ Elf32_Half e_type; ++ Elf32_Half e_machine; ++ Elf32_Word e_version; ++ Elf32_Addr e_entry; ++ Elf32_Off e_phoff; ++ Elf32_Off e_shoff; ++ Elf32_Word e_flags; ++ Elf32_Half e_ehsize; ++ Elf32_Half e_phentsize; ++ Elf32_Half e_phnum; ++ Elf32_Half e_shentsize; ++ Elf32_Half e_shnum; ++ Elf32_Half e_shstrndx; ++}; ++ ++struct elf32_phdr { ++ Elf32_Word p_type; ++ Elf32_Off p_offset; ++ Elf32_Addr p_vaddr; ++ Elf32_Addr p_paddr; ++ Elf32_Word p_filesz; ++ Elf32_Word p_memsz; ++ Elf32_Word p_flags; ++ Elf32_Word p_align; ++}; ++ ++struct elf32_shdr { ++ Elf32_Word sh_name; ++ Elf32_Word sh_type; ++ Elf32_Word sh_flags; ++ Elf32_Addr sh_addr; ++ Elf32_Off sh_offset; ++ Elf32_Word sh_size; ++ Elf32_Word sh_link; ++ Elf32_Word sh_info; ++ Elf32_Word sh_addralign; ++ Elf32_Word sh_entsize; ++}; ++ ++struct timeval { ++ __kernel_time_t tv_sec; ++ __kernel_suseconds_t tv_usec; ++}; ++ ++struct compat_timeval { ++ compat_time_t tv_sec; ++ s32 tv_usec; ++}; ++ ++typedef u16 __compat_uid_t; ++ ++typedef u16 __compat_gid_t; ++ ++typedef struct compat_siginfo compat_siginfo_t; ++ ++struct compat_elf_siginfo { ++ compat_int_t si_signo; ++ compat_int_t si_code; ++ compat_int_t si_errno; ++}; ++ ++struct compat_elf_prstatus { ++ struct compat_elf_siginfo pr_info; ++ short int pr_cursig; ++ compat_ulong_t pr_sigpend; ++ compat_ulong_t pr_sighold; ++ compat_pid_t pr_pid; ++ compat_pid_t pr_ppid; ++ compat_pid_t pr_pgrp; ++ compat_pid_t pr_sid; ++ struct compat_timeval pr_utime; ++ struct compat_timeval pr_stime; ++ struct compat_timeval pr_cutime; ++ struct compat_timeval pr_cstime; ++ compat_elf_gregset_t pr_reg; ++ compat_int_t pr_fpvalid; ++}; ++ ++struct compat_elf_prpsinfo { ++ char pr_state; ++ char pr_sname; ++ char pr_zomb; ++ char pr_nice; ++ compat_ulong_t pr_flag; ++ __compat_uid_t pr_uid; ++ __compat_gid_t pr_gid; ++ compat_pid_t pr_pid; ++ compat_pid_t pr_ppid; ++ compat_pid_t pr_pgrp; ++ compat_pid_t pr_sid; ++ char pr_fname[16]; ++ char pr_psargs[80]; ++}; ++ ++struct arch_elf_state {}; ++ ++struct memelfnote { ++ const char *name; ++ int type; ++ unsigned int datasz; ++ void *data; ++}; ++ ++struct elf_thread_core_info { ++ struct elf_thread_core_info *next; ++ struct task_struct *task; ++ struct compat_elf_prstatus prstatus; ++ struct memelfnote notes[0]; ++}; ++ ++struct elf_note_info { ++ struct elf_thread_core_info *thread; ++ struct memelfnote psinfo; ++ struct memelfnote signote; ++ struct memelfnote auxv; ++ struct memelfnote files; ++ compat_siginfo_t csigdata; ++ size_t size; ++ int thread_notes; ++}; ++ ++struct compat_statfs64; ++ ++struct kobject___2; ++ ++struct dyn_arch_ftrace {}; ++ ++struct dyn_ftrace { ++ long unsigned int ip; ++ long unsigned int flags; ++ struct dyn_arch_ftrace arch; ++}; ++ ++struct elf64_rela { ++ Elf64_Addr r_offset; ++ Elf64_Xword r_info; ++ Elf64_Sxword r_addend; ++}; ++ ++typedef struct elf64_rela Elf64_Rela; ++ ++enum aarch64_reloc_op { ++ RELOC_OP_NONE = 0, ++ RELOC_OP_ABS = 1, ++ RELOC_OP_PREL = 2, ++ RELOC_OP_PAGE = 3, ++}; ++ ++enum aarch64_insn_movw_imm_type { ++ AARCH64_INSN_IMM_MOVNZ = 0, ++ AARCH64_INSN_IMM_MOVKZ = 1, ++}; ++ ++enum perf_sample_regs_abi { ++ PERF_SAMPLE_REGS_ABI_NONE = 0, ++ PERF_SAMPLE_REGS_ABI_32 = 1, ++ PERF_SAMPLE_REGS_ABI_64 = 2, ++}; ++ ++enum perf_event_arm_regs { ++ PERF_REG_ARM64_X0 = 0, ++ PERF_REG_ARM64_X1 = 1, ++ PERF_REG_ARM64_X2 = 2, ++ PERF_REG_ARM64_X3 = 3, ++ PERF_REG_ARM64_X4 = 4, ++ PERF_REG_ARM64_X5 = 5, ++ PERF_REG_ARM64_X6 = 6, ++ PERF_REG_ARM64_X7 = 7, ++ PERF_REG_ARM64_X8 = 8, ++ PERF_REG_ARM64_X9 = 9, ++ PERF_REG_ARM64_X10 = 10, ++ PERF_REG_ARM64_X11 = 11, ++ PERF_REG_ARM64_X12 = 12, ++ PERF_REG_ARM64_X13 = 13, ++ PERF_REG_ARM64_X14 = 14, ++ PERF_REG_ARM64_X15 = 15, ++ PERF_REG_ARM64_X16 = 16, ++ PERF_REG_ARM64_X17 = 17, ++ PERF_REG_ARM64_X18 = 18, ++ PERF_REG_ARM64_X19 = 19, ++ PERF_REG_ARM64_X20 = 20, ++ PERF_REG_ARM64_X21 = 21, ++ PERF_REG_ARM64_X22 = 22, ++ PERF_REG_ARM64_X23 = 23, ++ PERF_REG_ARM64_X24 = 24, ++ PERF_REG_ARM64_X25 = 25, ++ PERF_REG_ARM64_X26 = 26, ++ PERF_REG_ARM64_X27 = 27, ++ PERF_REG_ARM64_X28 = 28, ++ PERF_REG_ARM64_X29 = 29, ++ PERF_REG_ARM64_LR = 30, ++ PERF_REG_ARM64_SP = 31, ++ PERF_REG_ARM64_PC = 32, ++ PERF_REG_ARM64_MAX = 33, ++}; ++ ++struct perf_callchain_entry_ctx { ++ struct perf_callchain_entry *entry; ++ u32 max_stack; ++ u32 nr; ++ short int contexts; ++ bool contexts_maxed; ++}; ++ ++struct frame_tail { ++ struct frame_tail *fp; ++ long unsigned int lr; ++}; ++ ++struct a32_frame_tail { ++ compat_uptr_t fp; ++ u32 sp; ++ u32 lr; ++}; ++ ++typedef long unsigned int ulong; ++ ++typedef long unsigned int irq_hw_number_t; ++ ++struct preempt_notifier; ++ ++struct preempt_ops { ++ void (*sched_in)(struct preempt_notifier *, int); ++ void (*sched_out)(struct preempt_notifier *, struct task_struct *); ++}; ++ ++struct preempt_notifier { ++ struct hlist_node link; ++ struct preempt_ops *ops; ++}; ++ ++struct pdev_archdata {}; ++ ++struct platform_device_id { ++ char name[20]; ++ kernel_ulong_t driver_data; ++}; ++ ++struct mmu_notifier_mm { ++ struct hlist_head list; ++ spinlock_t lock; ++}; ++ ++enum irqreturn { ++ IRQ_NONE = 0, ++ IRQ_HANDLED = 1, ++ IRQ_WAKE_THREAD = 2, ++}; ++ ++typedef enum irqreturn irqreturn_t; ++ ++struct mmu_notifier; ++ ++struct mmu_notifier_ops { ++ int flags; ++ void (*release)(struct mmu_notifier *, struct mm_struct *); ++ int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); ++ int (*clear_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); ++ int (*test_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int); ++ void (*change_pte)(struct mmu_notifier *, struct mm_struct *, long unsigned int, pte_t); ++ int (*invalidate_range_start)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int, bool); ++ void (*invalidate_range_end)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); ++ void (*invalidate_range)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct mmu_notifier { ++ struct hlist_node hlist; ++ const struct mmu_notifier_ops *ops; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct swait_queue_head { ++ raw_spinlock_t lock; ++ struct list_head task_list; ++}; ++ ++struct kvm_regs { ++ struct user_pt_regs regs; ++ __u64 sp_el1; ++ __u64 elr_el1; ++ __u64 spsr[5]; ++ long: 64; ++ struct user_fpsimd_state fp_regs; ++}; ++ ++struct kvm_guest_debug_arch { ++ __u64 dbg_bcr[16]; ++ __u64 dbg_bvr[16]; ++ __u64 dbg_wcr[16]; ++ __u64 dbg_wvr[16]; ++}; ++ ++struct kvm_debug_exit_arch { ++ __u32 hsr; ++ __u64 far; ++}; ++ ++struct kvm_sync_regs { ++ __u64 device_irq_level; ++}; ++ ++struct kvm_arch_memory_slot {}; ++ ++struct kvm_irq_level { ++ union { ++ __u32 irq; ++ __s32 status; ++ }; ++ __u32 level; ++}; ++ ++struct kvm_hyperv_exit { ++ __u32 type; ++ union { ++ struct { ++ __u32 msr; ++ __u64 control; ++ __u64 evt_page; ++ __u64 msg_page; ++ } synic; ++ struct { ++ __u64 input; ++ __u64 result; ++ __u64 params[2]; ++ } hcall; ++ } u; ++}; ++ ++struct kvm_run { ++ __u8 request_interrupt_window; ++ __u8 immediate_exit; ++ __u8 padding1[6]; ++ __u32 exit_reason; ++ __u8 ready_for_interrupt_injection; ++ __u8 if_flag; ++ __u16 flags; ++ __u64 cr8; ++ __u64 apic_base; ++ union { ++ struct { ++ __u64 hardware_exit_reason; ++ } hw; ++ struct { ++ __u64 hardware_entry_failure_reason; ++ } fail_entry; ++ struct { ++ __u32 exception; ++ __u32 error_code; ++ } ex; ++ struct { ++ __u8 direction; ++ __u8 size; ++ __u16 port; ++ __u32 count; ++ __u64 data_offset; ++ } io; ++ struct { ++ struct kvm_debug_exit_arch arch; ++ } debug; ++ struct { ++ __u64 phys_addr; ++ __u8 data[8]; ++ __u32 len; ++ __u8 is_write; ++ } mmio; ++ struct { ++ __u64 nr; ++ __u64 args[6]; ++ __u64 ret; ++ __u32 longmode; ++ __u32 pad; ++ } hypercall; ++ struct { ++ __u64 rip; ++ __u32 is_write; ++ __u32 pad; ++ } tpr_access; ++ struct { ++ __u8 icptcode; ++ __u16 ipa; ++ __u32 ipb; ++ } s390_sieic; ++ __u64 s390_reset_flags; ++ struct { ++ __u64 trans_exc_code; ++ __u32 pgm_code; ++ } s390_ucontrol; ++ struct { ++ __u32 dcrn; ++ __u32 data; ++ __u8 is_write; ++ } dcr; ++ struct { ++ __u32 suberror; ++ __u32 ndata; ++ __u64 data[16]; ++ } internal; ++ struct { ++ __u64 gprs[32]; ++ } osi; ++ struct { ++ __u64 nr; ++ __u64 ret; ++ __u64 args[9]; ++ } papr_hcall; ++ struct { ++ __u16 subchannel_id; ++ __u16 subchannel_nr; ++ __u32 io_int_parm; ++ __u32 io_int_word; ++ __u32 ipb; ++ __u8 dequeued; ++ } s390_tsch; ++ struct { ++ __u32 epr; ++ } epr; ++ struct { ++ __u32 type; ++ __u64 flags; ++ } system_event; ++ struct { ++ __u64 addr; ++ __u8 ar; ++ __u8 reserved; ++ __u8 fc; ++ __u8 sel1; ++ __u16 sel2; ++ } s390_stsi; ++ struct { ++ __u8 vector; ++ } eoi; ++ struct kvm_hyperv_exit hyperv; ++ char padding[256]; ++ }; ++ __u64 kvm_valid_regs; ++ __u64 kvm_dirty_regs; ++ union { ++ struct kvm_sync_regs regs; ++ char padding[2048]; ++ } s; ++}; ++ ++struct kvm_coalesced_mmio { ++ __u64 phys_addr; ++ __u32 len; ++ __u32 pad; ++ __u8 data[8]; ++}; ++ ++struct kvm_coalesced_mmio_ring { ++ __u32 first; ++ __u32 last; ++ struct kvm_coalesced_mmio coalesced_mmio[0]; ++}; ++ ++struct kvm_device_attr { ++ __u32 flags; ++ __u32 group; ++ __u64 attr; ++ __u64 addr; ++}; ++ ++struct id_reg_info { ++ __u64 sys_id; ++ __u64 sys_val; ++}; ++ ++struct id_registers { ++ struct id_reg_info regs[64]; ++ __u64 num; ++}; ++ ++typedef u64 gpa_t; ++ ++typedef u64 gfn_t; ++ ++struct kvm_memory_slot { ++ gfn_t base_gfn; ++ long unsigned int npages; ++ long unsigned int *dirty_bitmap; ++ struct kvm_arch_memory_slot arch; ++ long unsigned int userspace_addr; ++ u32 flags; ++ short int id; ++}; ++ ++struct kvm_decode { ++ long unsigned int rt; ++ bool sign_extend; ++}; ++ ++enum hisi_cpu_type { ++ HI_1612 = 0, ++ HI_1616 = 1, ++ HI_1620 = 2, ++ UNKNOWN_HI_TYPE = 3, ++}; ++ ++struct kvm_vcpu; ++ ++struct kvm_io_device; ++ ++struct kvm_io_device_ops { ++ int (*read)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, void *); ++ int (*write)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, const void *); ++ void (*destructor)(struct kvm_io_device *); ++}; ++ ++struct kvm_vcpu_stat { ++ u64 pid; ++ u64 halt_successful_poll; ++ u64 halt_attempted_poll; ++ u64 halt_poll_invalid; ++ u64 halt_wakeup; ++ u64 hvc_exit_stat; ++ u64 wfe_exit_stat; ++ u64 wfi_exit_stat; ++ u64 mmio_exit_user; ++ u64 mmio_exit_kernel; ++ u64 exits; ++ u64 fp_asimd_exit_stat; ++ u64 irq_exit_stat; ++ u64 sys64_exit_stat; ++ u64 mabt_exit_stat; ++ u64 fail_entry_exit_stat; ++ u64 internal_error_exit_stat; ++ u64 unknown_ec_exit_stat; ++ u64 cp15_32_exit_stat; ++ u64 cp15_64_exit_stat; ++ u64 cp14_mr_exit_stat; ++ u64 cp14_ls_exit_stat; ++ u64 cp14_64_exit_stat; ++ u64 smc_exit_stat; ++ u64 sve_exit_stat; ++ u64 debug_exit_stat; ++ u64 steal; ++ u64 st_max; ++ u64 utime; ++ u64 stime; ++ u64 gtime; ++}; ++ ++struct kvm_mmio_fragment { ++ gpa_t gpa; ++ void *data; ++ unsigned int len; ++}; ++ ++struct kvm_cpu_context { ++ struct kvm_regs gp_regs; ++ union { ++ u64 sys_regs[100]; ++ u32 copro[200]; ++ }; ++ struct kvm_vcpu *__hyp_running_vcpu; ++ long: 64; ++}; ++ ++struct kvm_vcpu_fault_info { ++ u32 esr_el2; ++ u64 far_el2; ++ u64 hpfar_el2; ++ u64 disr_el1; ++}; ++ ++struct vgic_v2_cpu_if { ++ u32 vgic_hcr; ++ u32 vgic_vmcr; ++ u32 vgic_apr; ++ u32 vgic_lr[64]; ++}; ++ ++struct its_vm; ++ ++struct its_vpe { ++ struct page *vpt_page; ++ struct its_vm *its_vm; ++ int irq; ++ irq_hw_number_t vpe_db_lpi; ++ int vpe_proxy_event; ++ u16 col_idx; ++ u16 vpe_id; ++ bool idai; ++ bool pending_last; ++}; ++ ++struct vgic_v3_cpu_if { ++ u32 vgic_hcr; ++ u32 vgic_vmcr; ++ u32 vgic_sre; ++ u32 vgic_ap0r[4]; ++ u32 vgic_ap1r[4]; ++ u64 vgic_lr[16]; ++ struct its_vpe its_vpe; ++}; ++ ++enum vgic_irq_config { ++ VGIC_CONFIG_EDGE = 0, ++ VGIC_CONFIG_LEVEL = 1, ++}; ++ ++struct vgic_irq { ++ raw_spinlock_t irq_lock; ++ struct list_head lpi_list; ++ struct list_head ap_list; ++ struct kvm_vcpu *vcpu; ++ struct kvm_vcpu *target_vcpu; ++ u32 intid; ++ bool line_level; ++ bool pending_latch; ++ bool active; ++ bool enabled; ++ bool hw; ++ struct kref refcount; ++ u32 hwintid; ++ unsigned int host_irq; ++ union { ++ u8 targets; ++ u32 mpidr; ++ }; ++ u8 source; ++ u8 active_source; ++ u8 priority; ++ u8 group; ++ enum vgic_irq_config config; ++ bool (*get_input_level)(int); ++ void *owner; ++}; ++ ++enum iodev_type { ++ IODEV_CPUIF = 0, ++ IODEV_DIST = 1, ++ IODEV_REDIST = 2, ++ IODEV_ITS = 3, ++}; ++ ++struct kvm_io_device { ++ const struct kvm_io_device_ops *ops; ++}; ++ ++struct vgic_its; ++ ++struct vgic_register_region; ++ ++struct vgic_io_device { ++ gpa_t base_addr; ++ union { ++ struct kvm_vcpu *redist_vcpu; ++ struct vgic_its *its; ++ }; ++ const struct vgic_register_region *regions; ++ enum iodev_type iodev_type; ++ int nr_regions; ++ struct kvm_io_device dev; ++}; ++ ++struct vgic_redist_region; ++ ++struct vgic_cpu { ++ union { ++ struct vgic_v2_cpu_if vgic_v2; ++ struct vgic_v3_cpu_if vgic_v3; ++ }; ++ unsigned int used_lrs; ++ struct vgic_irq private_irqs[32]; ++ raw_spinlock_t ap_list_lock; ++ struct list_head ap_list_head; ++ struct vgic_io_device rd_iodev; ++ struct vgic_redist_region *rdreg; ++ u64 pendbaser; ++ bool lpis_enabled; ++ u32 num_pri_bits; ++ u32 num_id_bits; ++}; ++ ++struct arch_timer_context { ++ u32 cnt_ctl; ++ u64 cnt_cval; ++ struct kvm_irq_level irq; ++ bool loaded; ++ u64 cntvoff; ++}; ++ ++struct arch_timer_cpu { ++ struct arch_timer_context vtimer; ++ struct arch_timer_context ptimer; ++ struct hrtimer bg_timer; ++ struct hrtimer phys_timer; ++ bool enabled; ++}; ++ ++struct kvm_pmc { ++ u8 idx; ++ struct perf_event *perf_event; ++ u64 bitmask; ++}; ++ ++struct kvm_pmu { ++ int irq_num; ++ struct kvm_pmc pmc[32]; ++ bool ready; ++ bool created; ++ bool irq_level; ++ struct irq_work overflow_work; ++}; ++ ++struct kvm_mmu_memory_cache { ++ int nobjs; ++ void *objects[40]; ++}; ++ ++struct vcpu_reset_state { ++ long unsigned int pc; ++ long unsigned int r0; ++ bool be; ++ bool reset; ++}; ++ ++struct kvm_vcpu_arch { ++ struct kvm_cpu_context ctxt; ++ u64 hcr_el2; ++ u32 mdcr_el2; ++ struct kvm_vcpu_fault_info fault; ++ u64 workaround_flags; ++ u64 flags; ++ struct kvm_guest_debug_arch *debug_ptr; ++ struct kvm_guest_debug_arch vcpu_debug_state; ++ struct kvm_guest_debug_arch external_debug_state; ++ struct kvm_cpu_context *host_cpu_context; ++ struct thread_info *host_thread_info; ++ struct user_fpsimd_state *host_fpsimd_state; ++ struct { ++ struct kvm_guest_debug_arch regs; ++ u64 pmscr_el1; ++ } host_debug_state; ++ struct vgic_cpu vgic_cpu; ++ struct arch_timer_cpu timer_cpu; ++ struct kvm_pmu pmu; ++ struct { ++ u32 mdscr_el1; ++ } guest_debug_preserved; ++ bool power_off; ++ bool pause; ++ struct kvm_decode mmio_decode; ++ struct kvm_mmu_memory_cache mmu_page_cache; ++ int target; ++ long unsigned int features[1]; ++ bool has_run_once; ++ u64 vsesr_el2; ++ struct vcpu_reset_state reset_state; ++ bool sysregs_loaded_on_cpu; ++ struct id_registers idregs; ++ long: 64; ++}; ++ ++struct kvm; ++ ++struct kvm_vcpu { ++ struct kvm *kvm; ++ struct preempt_notifier preempt_notifier; ++ int cpu; ++ int vcpu_id; ++ int srcu_idx; ++ int mode; ++ u64 requests; ++ long unsigned int guest_debug; ++ int pre_pcpu; ++ struct list_head blocked_vcpu_list; ++ struct mutex mutex; ++ struct kvm_run *run; ++ int guest_xcr0_loaded; ++ struct swait_queue_head wq; ++ struct pid *pid; ++ int sigset_active; ++ sigset_t sigset; ++ struct kvm_vcpu_stat stat; ++ unsigned int halt_poll_ns; ++ bool valid_wakeup; ++ int mmio_needed; ++ int mmio_read_completed; ++ int mmio_is_write; ++ int mmio_cur_fragment; ++ int mmio_nr_fragments; ++ struct kvm_mmio_fragment mmio_fragments[2]; ++ struct { ++ bool in_spin_loop; ++ bool dy_eligible; ++ } spin_loop; ++ bool preempted; ++ long: 40; ++ long: 64; ++ struct kvm_vcpu_arch arch; ++ struct dentry *debugfs_dentry; ++ long: 64; ++}; ++ ++struct its_vm { ++ struct fwnode_handle *fwnode; ++ struct irq_domain *domain; ++ struct page *vprop_page; ++ struct its_vpe **vpes; ++ int nr_vpes; ++ irq_hw_number_t db_lpi_base; ++ long unsigned int *db_bitmap; ++ int nr_db_lpis; ++ u32 vlpi_count[16]; ++}; ++ ++enum vgic_type { ++ VGIC_V2 = 0, ++ VGIC_V3 = 1, ++}; ++ ++struct vgic_global { ++ enum vgic_type type; ++ phys_addr_t vcpu_base; ++ void *vcpu_base_va; ++ void *vcpu_hyp_va; ++ void *vctrl_base; ++ void *vctrl_hyp; ++ int nr_lr; ++ unsigned int maint_irq; ++ int max_gic_vcpus; ++ bool can_emulate_gicv2; ++ bool has_gicv4; ++ struct static_key_false gicv3_cpuif; ++ u32 ich_vtr_el2; ++}; ++ ++struct kvm_device; ++ ++struct vgic_its { ++ gpa_t vgic_its_base; ++ bool enabled; ++ struct vgic_io_device iodev; ++ struct kvm_device *dev; ++ u64 baser_device_table; ++ u64 baser_coll_table; ++ struct mutex cmd_lock; ++ u64 cbaser; ++ u32 creadr; ++ u32 cwriter; ++ u32 abi_rev; ++ struct mutex its_lock; ++ struct list_head device_list; ++ struct list_head collection_list; ++}; ++ ++struct vgic_register_region { ++ unsigned int reg_offset; ++ unsigned int len; ++ unsigned int bits_per_irq; ++ unsigned int access_flags; ++ union { ++ long unsigned int (*read)(struct kvm_vcpu *, gpa_t, unsigned int); ++ long unsigned int (*its_read)(struct kvm *, struct vgic_its *, gpa_t, unsigned int); ++ }; ++ union { ++ void (*write)(struct kvm_vcpu *, gpa_t, unsigned int, long unsigned int); ++ void (*its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, long unsigned int); ++ }; ++ long unsigned int (*uaccess_read)(struct kvm_vcpu *, gpa_t, unsigned int); ++ union { ++ int (*uaccess_write)(struct kvm_vcpu *, gpa_t, unsigned int, long unsigned int); ++ int (*uaccess_its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, long unsigned int); ++ }; ++}; ++ ++struct its_trans_cache { ++ struct list_head lpi_cache; ++ raw_spinlock_t lpi_cache_lock; ++}; ++ ++struct kvm_device_ops; ++ ++struct kvm_device { ++ struct kvm_device_ops *ops; ++ struct kvm *kvm; ++ void *private; ++ struct list_head vm_node; ++}; ++ ++struct vgic_redist_region { ++ u32 index; ++ gpa_t base; ++ u32 count; ++ u32 free_index; ++ struct list_head list; ++}; ++ ++struct vgic_state_iter; ++ ++struct vgic_dist { ++ bool in_kernel; ++ bool ready; ++ bool initialized; ++ u32 vgic_model; ++ u32 implementation_rev; ++ bool v2_groups_user_writable; ++ bool msis_require_devid; ++ int nr_spis; ++ gpa_t vgic_dist_base; ++ union { ++ gpa_t vgic_cpu_base; ++ struct list_head rd_regions; ++ }; ++ bool enabled; ++ struct vgic_irq *spis; ++ struct vgic_io_device dist_iodev; ++ bool has_its; ++ u64 propbaser; ++ raw_spinlock_t lpi_list_lock; ++ struct list_head lpi_list_head; ++ int lpi_list_count; ++ struct its_trans_cache lpi_translation_cache[8]; ++ struct vgic_state_iter *iter; ++ struct its_vm its_vm; ++}; ++ ++struct perf_event_mmap_page { ++ __u32 version; ++ __u32 compat_version; ++ __u32 lock; ++ __u32 index; ++ __s64 offset; ++ __u64 time_enabled; ++ __u64 time_running; ++ union { ++ __u64 capabilities; ++ struct { ++ __u64 cap_bit0: 1; ++ __u64 cap_bit0_is_deprecated: 1; ++ __u64 cap_user_rdpmc: 1; ++ __u64 cap_user_time: 1; ++ __u64 cap_user_time_zero: 1; ++ __u64 cap_____res: 59; ++ }; ++ }; ++ __u16 pmc_width; ++ __u16 time_shift; ++ __u32 time_mult; ++ __u64 time_offset; ++ __u64 time_zero; ++ __u32 size; ++ __u8 __reserved[948]; ++ __u64 data_head; ++ __u64 data_tail; ++ __u64 data_offset; ++ __u64 data_size; ++ __u64 aux_head; ++ __u64 aux_tail; ++ __u64 aux_offset; ++ __u64 aux_size; ++}; ++ ++struct perf_pmu_events_attr { ++ struct device_attribute attr; ++ u64 id; ++ const char *event_str; ++}; ++ ++struct kvm_arch { ++ u64 vmid_gen; ++ u32 vmid; ++ pgd_t *pgd; ++ u64 vttbr; ++ u64 vtcr; ++ int *last_vcpu_ran; ++ int max_vcpus; ++ struct vgic_dist vgic; ++ u32 psci_version; ++}; ++ ++enum vcpu_sysreg { ++ __INVALID_SYSREG__ = 0, ++ MPIDR_EL1 = 1, ++ CSSELR_EL1 = 2, ++ SCTLR_EL1 = 3, ++ ACTLR_EL1 = 4, ++ CPACR_EL1 = 5, ++ TTBR0_EL1 = 6, ++ TTBR1_EL1 = 7, ++ TCR_EL1 = 8, ++ ESR_EL1 = 9, ++ AFSR0_EL1 = 10, ++ AFSR1_EL1 = 11, ++ FAR_EL1 = 12, ++ MAIR_EL1 = 13, ++ VBAR_EL1 = 14, ++ CONTEXTIDR_EL1 = 15, ++ TPIDR_EL0 = 16, ++ TPIDRRO_EL0 = 17, ++ TPIDR_EL1 = 18, ++ AMAIR_EL1 = 19, ++ CNTKCTL_EL1 = 20, ++ PAR_EL1 = 21, ++ MDSCR_EL1 = 22, ++ MDCCINT_EL1 = 23, ++ DISR_EL1 = 24, ++ PMCR_EL0 = 25, ++ PMSELR_EL0 = 26, ++ PMEVCNTR0_EL0 = 27, ++ PMEVCNTR30_EL0 = 57, ++ PMCCNTR_EL0 = 58, ++ PMEVTYPER0_EL0 = 59, ++ PMEVTYPER30_EL0 = 89, ++ PMCCFILTR_EL0 = 90, ++ PMCNTENSET_EL0 = 91, ++ PMINTENSET_EL1 = 92, ++ PMOVSSET_EL0 = 93, ++ PMSWINC_EL0 = 94, ++ PMUSERENR_EL0 = 95, ++ DACR32_EL2 = 96, ++ IFSR32_EL2 = 97, ++ FPEXC32_EL2 = 98, ++ DBGVCR32_EL2 = 99, ++ NR_SYS_REGS = 100, ++}; ++ ++struct kvm_pmu_events { ++ u32 events_host; ++ u32 events_guest; ++}; ++ ++struct kvm_host_data { ++ struct kvm_cpu_context host_ctxt; ++ struct kvm_pmu_events pmu_events; ++ long: 64; ++}; ++ ++typedef struct kvm_host_data kvm_host_data_t; ++ ++struct kvm_vm_stat { ++ ulong remote_tlb_flush; ++}; ++ ++struct kvm_io_range { ++ gpa_t addr; ++ int len; ++ struct kvm_io_device *dev; ++}; ++ ++struct kvm_io_bus { ++ int dev_count; ++ int ioeventfd_count; ++ struct kvm_io_range range[0]; ++}; ++ ++enum kvm_bus { ++ KVM_MMIO_BUS = 0, ++ KVM_PIO_BUS = 1, ++ KVM_VIRTIO_CCW_NOTIFY_BUS = 2, ++ KVM_FAST_MMIO_BUS = 3, ++ KVM_NR_BUSES = 4, ++}; ++ ++struct kvm_memslots; ++ ++struct kvm_irq_routing_table; ++ ++struct kvm_stat_data; ++ ++struct kvm { ++ spinlock_t mmu_lock; ++ struct mutex slots_lock; ++ struct mm_struct *mm; ++ struct kvm_memslots *memslots[1]; ++ struct kvm_vcpu *vcpus[512]; ++ atomic_t online_vcpus; ++ int created_vcpus; ++ int last_boosted_vcpu; ++ struct list_head vm_list; ++ struct mutex lock; ++ struct kvm_io_bus *buses[4]; ++ struct { ++ spinlock_t lock; ++ struct list_head items; ++ struct list_head resampler_list; ++ struct mutex resampler_lock; ++ } irqfds; ++ struct list_head ioeventfds; ++ struct kvm_vm_stat stat; ++ struct kvm_arch arch; ++ refcount_t users_count; ++ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; ++ spinlock_t ring_lock; ++ struct list_head coalesced_zones; ++ struct mutex irq_lock; ++ struct kvm_irq_routing_table *irq_routing; ++ struct hlist_head irq_ack_notifier_list; ++ struct mmu_notifier mmu_notifier; ++ long unsigned int mmu_notifier_seq; ++ long int mmu_notifier_count; ++ long int tlbs_dirty; ++ struct list_head devices; ++ struct dentry *debugfs_dentry; ++ struct kvm_stat_data **debugfs_stat_data; ++ struct srcu_struct srcu; ++ struct srcu_struct irq_srcu; ++ pid_t userspace_pid; ++}; ++ ++struct kvm_irq_routing_table { ++ int chip[988]; ++ u32 nr_rt_entries; ++ struct hlist_head map[0]; ++}; ++ ++struct kvm_memslots { ++ u64 generation; ++ struct kvm_memory_slot memslots[512]; ++ short int id_to_index[512]; ++ atomic_t lru_slot; ++ int used_slots; ++}; ++ ++struct kvm_stat_data { ++ int offset; ++ int mode; ++ struct kvm *kvm; ++}; ++ ++enum kvm_stat_kind { ++ KVM_STAT_VM = 0, ++ KVM_STAT_VCPU = 1, ++ KVM_STAT_DFX = 2, ++}; ++ ++struct kvm_stats_debugfs_item { ++ const char *name; ++ int offset; ++ enum kvm_stat_kind kind; ++ int mode; ++}; ++ ++enum dfx_stat_kind { ++ DFX_STAT_U64 = 0, ++ DFX_STAT_CPUTIME = 1, ++}; ++ ++struct dfx_kvm_stats_debugfs_item { ++ const char *name; ++ int offset; ++ enum dfx_stat_kind dfx_kind; ++ struct dentry *dentry; ++}; ++ ++struct kvm_device_ops { ++ const char *name; ++ int (*create)(struct kvm_device *, u32); ++ void (*init)(struct kvm_device *); ++ void (*destroy)(struct kvm_device *); ++ int (*set_attr)(struct kvm_device *, struct kvm_device_attr *); ++ int (*get_attr)(struct kvm_device *, struct kvm_device_attr *); ++ int (*has_attr)(struct kvm_device *, struct kvm_device_attr *); ++ long int (*ioctl)(struct kvm_device *, unsigned int, long unsigned int); ++}; ++ ++struct mfd_cell; ++ ++struct platform_device { ++ const char *name; ++ int id; ++ bool id_auto; ++ struct device dev; ++ u32 num_resources; ++ struct resource *resource; ++ const struct platform_device_id *id_entry; ++ char *driver_override; ++ struct mfd_cell *mfd_cell; ++ struct pdev_archdata archdata; ++}; ++ ++struct platform_driver { ++ int (*probe)(struct platform_device *); ++ int (*remove)(struct platform_device *); ++ void (*shutdown)(struct platform_device *); ++ int (*suspend)(struct platform_device *, pm_message_t); ++ int (*resume)(struct platform_device *); ++ struct device_driver driver; ++ const struct platform_device_id *id_table; ++ bool prevent_deferred_probe; ++}; ++ ++struct arm_pmu; ++ ++struct pmu_hw_events { ++ struct perf_event *events[32]; ++ long unsigned int used_mask[1]; ++ raw_spinlock_t pmu_lock; ++ struct arm_pmu *percpu_pmu; ++ int irq; ++}; ++ ++struct arm_pmu { ++ struct pmu pmu; ++ cpumask_t supported_cpus; ++ char *name; ++ irqreturn_t (*handle_irq)(struct arm_pmu *); ++ void (*enable)(struct perf_event *); ++ void (*disable)(struct perf_event *); ++ int (*get_event_idx)(struct pmu_hw_events *, struct perf_event *); ++ void (*clear_event_idx)(struct pmu_hw_events *, struct perf_event *); ++ int (*set_event_filter)(struct hw_perf_event *, struct perf_event_attr *); ++ u64 (*read_counter)(struct perf_event *); ++ void (*write_counter)(struct perf_event *, u64); ++ void (*start)(struct arm_pmu *); ++ void (*stop)(struct arm_pmu *); ++ void (*reset)(void *); ++ int (*map_event)(struct perf_event *); ++ int (*filter_match)(struct perf_event *); ++ int num_events; ++ bool secure_access; ++ long unsigned int pmceid_bitmap[1]; ++ struct platform_device *plat_device; ++ struct pmu_hw_events *hw_events; ++ struct hlist_node node; ++ struct notifier_block cpu_pm_nb; ++ const struct attribute_group *attr_groups[4]; ++ long unsigned int acpi_cpuid; ++}; ++ ++struct armv8pmu_probe_info { ++ struct arm_pmu *pmu; ++ bool present; ++}; ++ ++enum hw_breakpoint_ops { ++ HW_BREAKPOINT_INSTALL = 0, ++ HW_BREAKPOINT_UNINSTALL = 1, ++ HW_BREAKPOINT_RESTORE = 2, ++}; ++ ++struct cpu_suspend_ctx { ++ u64 ctx_regs[12]; ++ u64 sp; ++ long: 64; ++}; ++ ++struct sleep_stack_data { ++ struct cpu_suspend_ctx system_regs; ++ long unsigned int callee_saved_regs[12]; ++}; ++ ++typedef void *acpi_handle; ++ ++typedef u64 phys_cpuid_t; ++ ++struct cpuidle_state_usage { ++ long long unsigned int disable; ++ long long unsigned int usage; ++ long long unsigned int time; ++ long long unsigned int s2idle_usage; ++ long long unsigned int s2idle_time; ++}; ++ ++struct cpuidle_device; ++ ++struct cpuidle_driver; ++ ++struct cpuidle_state { ++ char name[16]; ++ char desc[32]; ++ unsigned int flags; ++ unsigned int exit_latency; ++ int power_usage; ++ unsigned int target_residency; ++ bool disabled; ++ int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); ++ int (*enter_dead)(struct cpuidle_device *, int); ++ void (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); ++}; ++ ++struct cpuidle_driver_kobj; ++ ++struct cpuidle_state_kobj; ++ ++struct cpuidle_device_kobj; ++ ++struct cpuidle_device { ++ unsigned int registered: 1; ++ unsigned int enabled: 1; ++ unsigned int use_deepest_state: 1; ++ unsigned int poll_time_limit: 1; ++ unsigned int cpu; ++ int last_residency; ++ struct cpuidle_state_usage states_usage[10]; ++ struct cpuidle_state_kobj *kobjs[10]; ++ struct cpuidle_driver_kobj *kobj_driver; ++ struct cpuidle_device_kobj *kobj_dev; ++ struct list_head device_list; ++}; ++ ++struct cpuidle_driver { ++ const char *name; ++ struct module *owner; ++ int refcnt; ++ unsigned int bctimer: 1; ++ struct cpuidle_state states[10]; ++ int state_count; ++ int safe_state_index; ++ struct cpumask *cpumask; ++}; ++ ++enum thermal_device_mode { ++ THERMAL_DEVICE_DISABLED = 0, ++ THERMAL_DEVICE_ENABLED = 1, ++}; ++ ++enum thermal_trip_type { ++ THERMAL_TRIP_ACTIVE = 0, ++ THERMAL_TRIP_PASSIVE = 1, ++ THERMAL_TRIP_HOT = 2, ++ THERMAL_TRIP_CRITICAL = 3, ++}; ++ ++enum thermal_trend { ++ THERMAL_TREND_STABLE = 0, ++ THERMAL_TREND_RAISING = 1, ++ THERMAL_TREND_DROPPING = 2, ++ THERMAL_TREND_RAISE_FULL = 3, ++ THERMAL_TREND_DROP_FULL = 4, ++}; ++ ++enum thermal_notify_event { ++ THERMAL_EVENT_UNSPECIFIED = 0, ++ THERMAL_EVENT_TEMP_SAMPLE = 1, ++ THERMAL_TRIP_VIOLATED = 2, ++ THERMAL_TRIP_CHANGED = 3, ++ THERMAL_DEVICE_DOWN = 4, ++ THERMAL_DEVICE_UP = 5, ++ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, ++ THERMAL_TABLE_CHANGED = 7, ++}; ++ ++struct thermal_zone_device; ++ ++struct thermal_cooling_device; ++ ++struct thermal_zone_device_ops { ++ int (*bind)(struct thermal_zone_device *, struct thermal_cooling_device *); ++ int (*unbind)(struct thermal_zone_device *, struct thermal_cooling_device *); ++ int (*get_temp)(struct thermal_zone_device *, int *); ++ int (*set_trips)(struct thermal_zone_device *, int, int); ++ int (*get_mode)(struct thermal_zone_device *, enum thermal_device_mode *); ++ int (*set_mode)(struct thermal_zone_device *, enum thermal_device_mode); ++ int (*get_trip_type)(struct thermal_zone_device *, int, enum thermal_trip_type *); ++ int (*get_trip_temp)(struct thermal_zone_device *, int, int *); ++ int (*set_trip_temp)(struct thermal_zone_device *, int, int); ++ int (*get_trip_hyst)(struct thermal_zone_device *, int, int *); ++ int (*set_trip_hyst)(struct thermal_zone_device *, int, int); ++ int (*get_crit_temp)(struct thermal_zone_device *, int *); ++ int (*set_emul_temp)(struct thermal_zone_device *, int); ++ int (*get_trend)(struct thermal_zone_device *, int, enum thermal_trend *); ++ int (*notify)(struct thermal_zone_device *, int, enum thermal_trip_type); ++}; ++ ++struct thermal_attr; ++ ++struct thermal_zone_params; ++ ++struct thermal_governor; ++ ++struct thermal_zone_device { ++ int id; ++ char type[20]; ++ struct device device; ++ struct attribute_group trips_attribute_group; ++ struct thermal_attr *trip_temp_attrs; ++ struct thermal_attr *trip_type_attrs; ++ struct thermal_attr *trip_hyst_attrs; ++ void *devdata; ++ int trips; ++ long unsigned int trips_disabled; ++ int passive_delay; ++ int polling_delay; ++ int temperature; ++ int last_temperature; ++ int emul_temperature; ++ int passive; ++ int prev_low_trip; ++ int prev_high_trip; ++ unsigned int forced_passive; ++ atomic_t need_update; ++ struct thermal_zone_device_ops *ops; ++ struct thermal_zone_params *tzp; ++ struct thermal_governor *governor; ++ void *governor_data; ++ struct list_head thermal_instances; ++ struct ida ida; ++ struct mutex lock; ++ struct list_head node; ++ struct delayed_work poll_queue; ++ enum thermal_notify_event notify_event; ++}; ++ ++struct thermal_cooling_device_ops; ++ ++struct thermal_cooling_device { ++ int id; ++ char type[20]; ++ struct device device; ++ struct device_node *np; ++ void *devdata; ++ void *stats; ++ const struct thermal_cooling_device_ops *ops; ++ bool updated; ++ struct mutex lock; ++ struct list_head thermal_instances; ++ struct list_head node; ++}; ++ ++struct thermal_cooling_device_ops { ++ int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); ++ int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); ++ int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); ++ int (*get_requested_power)(struct thermal_cooling_device *, struct thermal_zone_device *, u32 *); ++ int (*state2power)(struct thermal_cooling_device *, struct thermal_zone_device *, long unsigned int, u32 *); ++ int (*power2state)(struct thermal_cooling_device *, struct thermal_zone_device *, u32, long unsigned int *); ++}; ++ ++struct thermal_attr { ++ struct device_attribute attr; ++ char name[20]; ++}; ++ ++struct thermal_bind_params; ++ ++struct thermal_zone_params { ++ char governor_name[20]; ++ bool no_hwmon; ++ int num_tbps; ++ struct thermal_bind_params *tbp; ++ u32 sustainable_power; ++ s32 k_po; ++ s32 k_pu; ++ s32 k_i; ++ s32 k_d; ++ s32 integral_cutoff; ++ int slope; ++ int offset; ++}; ++ ++struct thermal_governor { ++ char name[20]; ++ int (*bind_to_tz)(struct thermal_zone_device *); ++ void (*unbind_from_tz)(struct thermal_zone_device *); ++ int (*throttle)(struct thermal_zone_device *, int); ++ struct list_head governor_list; ++}; ++ ++struct thermal_bind_params { ++ struct thermal_cooling_device *cdev; ++ int weight; ++ int trip_mask; ++ long unsigned int *binding_limits; ++ int (*match)(struct thermal_zone_device *, struct thermal_cooling_device *); ++}; ++ ++struct acpi_processor_cx { ++ u8 valid; ++ u8 type; ++ u32 address; ++ u8 entry_method; ++ u8 index; ++ u32 latency; ++ u8 bm_sts_skip; ++ char desc[32]; ++}; ++ ++struct acpi_lpi_state { ++ u32 min_residency; ++ u32 wake_latency; ++ u32 flags; ++ u32 arch_flags; ++ u32 res_cnt_freq; ++ u32 enable_parent_state; ++ u64 address; ++ u8 index; ++ u8 entry_method; ++ char desc[32]; ++}; ++ ++struct acpi_processor_power { ++ int count; ++ union { ++ struct acpi_processor_cx states[8]; ++ struct acpi_lpi_state lpi_states[8]; ++ }; ++ int timer_broadcast_on_state; ++}; ++ ++struct acpi_psd_package { ++ u64 num_entries; ++ u64 revision; ++ u64 domain; ++ u64 coord_type; ++ u64 num_processors; ++}; ++ ++struct acpi_pct_register { ++ u8 descriptor; ++ u16 length; ++ u8 space_id; ++ u8 bit_width; ++ u8 bit_offset; ++ u8 reserved; ++ u64 address; ++} __attribute__((packed)); ++ ++struct acpi_processor_px { ++ u64 core_frequency; ++ u64 power; ++ u64 transition_latency; ++ u64 bus_master_latency; ++ u64 control; ++ u64 status; ++}; ++ ++struct acpi_processor_performance { ++ unsigned int state; ++ unsigned int platform_limit; ++ struct acpi_pct_register control_register; ++ struct acpi_pct_register status_register; ++ short: 16; ++ unsigned int state_count; ++ int: 32; ++ struct acpi_processor_px *states; ++ struct acpi_psd_package domain_info; ++ cpumask_var_t shared_cpu_map; ++ unsigned int shared_type; ++ int: 32; ++} __attribute__((packed)); ++ ++struct acpi_tsd_package { ++ u64 num_entries; ++ u64 revision; ++ u64 domain; ++ u64 coord_type; ++ u64 num_processors; ++}; ++ ++struct acpi_processor_tx_tss { ++ u64 freqpercentage; ++ u64 power; ++ u64 transition_latency; ++ u64 control; ++ u64 status; ++}; ++ ++struct acpi_processor_tx { ++ u16 power; ++ u16 performance; ++}; ++ ++struct acpi_processor; ++ ++struct acpi_processor_throttling { ++ unsigned int state; ++ unsigned int platform_limit; ++ struct acpi_pct_register control_register; ++ struct acpi_pct_register status_register; ++ short: 16; ++ unsigned int state_count; ++ int: 32; ++ struct acpi_processor_tx_tss *states_tss; ++ struct acpi_tsd_package domain_info; ++ cpumask_var_t shared_cpu_map; ++ int (*acpi_processor_get_throttling)(struct acpi_processor *); ++ int (*acpi_processor_set_throttling)(struct acpi_processor *, int, bool); ++ u32 address; ++ u8 duty_offset; ++ u8 duty_width; ++ u8 tsd_valid_flag; ++ char: 8; ++ unsigned int shared_type; ++ struct acpi_processor_tx states[16]; ++ int: 32; ++} __attribute__((packed)); ++ ++struct acpi_processor_flags { ++ u8 power: 1; ++ u8 performance: 1; ++ u8 throttling: 1; ++ u8 limit: 1; ++ u8 bm_control: 1; ++ u8 bm_check: 1; ++ u8 has_cst: 1; ++ u8 has_lpi: 1; ++ u8 power_setup_done: 1; ++ u8 bm_rld_set: 1; ++ u8 need_hotplug_init: 1; ++}; ++ ++struct acpi_processor_lx { ++ int px; ++ int tx; ++}; ++ ++struct acpi_processor_limit { ++ struct acpi_processor_lx state; ++ struct acpi_processor_lx thermal; ++ struct acpi_processor_lx user; ++}; ++ ++struct acpi_processor { ++ acpi_handle handle; ++ u32 acpi_id; ++ phys_cpuid_t phys_id; ++ u32 id; ++ u32 pblk; ++ int performance_platform_limit; ++ int throttling_platform_limit; ++ struct acpi_processor_flags flags; ++ struct acpi_processor_power power; ++ struct acpi_processor_performance *performance; ++ struct acpi_processor_throttling throttling; ++ struct acpi_processor_limit limit; ++ struct thermal_cooling_device *cdev; ++ struct device *dev; ++}; ++ ++struct acpi_processor_errata { ++ u8 smp; ++ struct { ++ u8 throttle: 1; ++ u8 fdma: 1; ++ u8 reserved: 6; ++ u32 bmisx; ++ } piix4; ++}; ++ ++struct klp_func { ++ const char *old_name; ++ void *new_func; ++ long unsigned int old_sympos; ++ int force; ++ long unsigned int old_addr; ++ struct kobject kobj; ++ struct list_head stack_node; ++ long unsigned int old_size; ++ long unsigned int new_size; ++ bool patched; ++}; ++ ++struct klp_hook { ++ void (*hook)(); ++}; ++ ++struct klp_object; ++ ++struct klp_callbacks { ++ int (*pre_patch)(struct klp_object *); ++ void (*post_patch)(struct klp_object *); ++ void (*pre_unpatch)(struct klp_object *); ++ void (*post_unpatch)(struct klp_object *); ++ bool post_unpatch_enabled; ++}; ++ ++struct klp_object { ++ const char *name; ++ struct klp_func *funcs; ++ struct klp_hook *hooks_load; ++ struct klp_hook *hooks_unload; ++ struct klp_callbacks callbacks; ++ struct kobject kobj; ++ struct module *mod; ++ bool patched; ++}; ++ ++struct klp_patch { ++ struct module *mod; ++ struct klp_object *objs; ++ struct list_head list; ++ struct kobject kobj; ++ bool enabled; ++ struct completion finish; ++}; ++ ++struct klp_func_node { ++ struct list_head node; ++ struct list_head func_stack; ++ long unsigned int old_addr; ++ u32 old_insns[4]; ++}; ++ ++struct walk_stackframe_args { ++ struct klp_patch *patch; ++ int enable; ++ int ret; ++}; ++ ++enum jump_label_type { ++ JUMP_LABEL_NOP = 0, ++ JUMP_LABEL_JMP = 1, ++}; ++ ++struct die_args { ++ struct pt_regs *regs; ++ const char *str; ++ long int err; ++ int trapnr; ++ int signr; ++}; ++ ++enum kgdb_bpstate { ++ BP_UNDEFINED = 0, ++ BP_REMOVED = 1, ++ BP_SET = 2, ++ BP_ACTIVE = 3, ++}; ++ ++struct kgdb_bkpt { ++ long unsigned int bpt_addr; ++ unsigned char saved_instr[4]; ++ enum kgdb_bptype type; ++ enum kgdb_bpstate state; ++}; ++ ++struct resource_entry { ++ struct list_head node; ++ struct resource *res; ++ resource_size_t offset; ++ struct resource __res; ++}; ++ ++typedef u64 acpi_io_address; ++ ++typedef u32 acpi_object_type; ++ ++union acpi_object { ++ acpi_object_type type; ++ struct { ++ acpi_object_type type; ++ u64 value; ++ } integer; ++ struct { ++ acpi_object_type type; ++ u32 length; ++ char *pointer; ++ } string; ++ struct { ++ acpi_object_type type; ++ u32 length; ++ u8 *pointer; ++ } buffer; ++ struct { ++ acpi_object_type type; ++ u32 count; ++ union acpi_object *elements; ++ } package; ++ struct { ++ acpi_object_type type; ++ acpi_object_type actual_type; ++ acpi_handle handle; ++ } reference; ++ struct { ++ acpi_object_type type; ++ u32 proc_id; ++ acpi_io_address pblk_address; ++ u32 pblk_length; ++ } processor; ++ struct { ++ acpi_object_type type; ++ u32 system_level; ++ u32 resource_order; ++ } power_resource; ++}; ++ ++struct pci_device_id { ++ __u32 vendor; ++ __u32 device; ++ __u32 subvendor; ++ __u32 subdevice; ++ __u32 class; ++ __u32 class_mask; ++ kernel_ulong_t driver_data; ++}; ++ ++struct acpi_device; ++ ++struct acpi_hotplug_profile { ++ struct kobject kobj; ++ int (*scan_dependent)(struct acpi_device *); ++ void (*notify_online)(struct acpi_device *); ++ bool enabled: 1; ++ bool demand_offline: 1; ++}; ++ ++struct acpi_device_status { ++ u32 present: 1; ++ u32 enabled: 1; ++ u32 show_in_ui: 1; ++ u32 functional: 1; ++ u32 battery_present: 1; ++ u32 reserved: 27; ++}; ++ ++struct acpi_device_flags { ++ u32 dynamic_status: 1; ++ u32 removable: 1; ++ u32 ejectable: 1; ++ u32 power_manageable: 1; ++ u32 match_driver: 1; ++ u32 initialized: 1; ++ u32 visited: 1; ++ u32 hotplug_notify: 1; ++ u32 is_dock_station: 1; ++ u32 of_compatible_ok: 1; ++ u32 coherent_dma: 1; ++ u32 cca_seen: 1; ++ u32 enumeration_by_parent: 1; ++ u32 reserved: 19; ++}; ++ ++typedef char acpi_bus_id[8]; ++ ++struct acpi_pnp_type { ++ u32 hardware_id: 1; ++ u32 bus_address: 1; ++ u32 platform_id: 1; ++ u32 reserved: 29; ++}; ++ ++typedef long unsigned int acpi_bus_address; ++ ++typedef char acpi_device_name[40]; ++ ++typedef char acpi_device_class[20]; ++ ++struct acpi_device_pnp { ++ acpi_bus_id bus_id; ++ struct acpi_pnp_type type; ++ acpi_bus_address bus_address; ++ char *unique_id; ++ struct list_head ids; ++ acpi_device_name device_name; ++ acpi_device_class device_class; ++ union acpi_object *str_obj; ++}; ++ ++struct acpi_device_power_flags { ++ u32 explicit_get: 1; ++ u32 power_resources: 1; ++ u32 inrush_current: 1; ++ u32 power_removed: 1; ++ u32 ignore_parent: 1; ++ u32 dsw_present: 1; ++ u32 reserved: 26; ++}; ++ ++struct acpi_device_power_state { ++ struct { ++ u8 valid: 1; ++ u8 explicit_set: 1; ++ u8 reserved: 6; ++ } flags; ++ int power; ++ int latency; ++ struct list_head resources; ++}; ++ ++struct acpi_device_power { ++ int state; ++ struct acpi_device_power_flags flags; ++ struct acpi_device_power_state states[5]; ++}; ++ ++struct acpi_device_wakeup_flags { ++ u8 valid: 1; ++ u8 notifier_present: 1; ++}; ++ ++struct acpi_device_wakeup_context { ++ void (*func)(struct acpi_device_wakeup_context *); ++ struct device *dev; ++}; ++ ++struct acpi_device_wakeup { ++ acpi_handle gpe_device; ++ u64 gpe_number; ++ u64 sleep_state; ++ struct list_head resources; ++ struct acpi_device_wakeup_flags flags; ++ struct acpi_device_wakeup_context context; ++ struct wakeup_source *ws; ++ int prepare_count; ++ int enable_count; ++}; ++ ++struct acpi_device_perf_flags { ++ u8 reserved: 8; ++}; ++ ++struct acpi_device_perf_state; ++ ++struct acpi_device_perf { ++ int state; ++ struct acpi_device_perf_flags flags; ++ int state_count; ++ struct acpi_device_perf_state *states; ++}; ++ ++struct acpi_device_dir { ++ struct proc_dir_entry *entry; ++}; ++ ++struct acpi_device_data { ++ const union acpi_object *pointer; ++ const union acpi_object *properties; ++ const union acpi_object *of_compatible; ++ struct list_head subnodes; ++}; ++ ++struct acpi_scan_handler; ++ ++struct acpi_hotplug_context; ++ ++struct acpi_driver; ++ ++struct acpi_gpio_mapping; ++ ++struct acpi_device { ++ int device_type; ++ acpi_handle handle; ++ struct fwnode_handle fwnode; ++ struct acpi_device *parent; ++ struct list_head children; ++ struct list_head node; ++ struct list_head wakeup_list; ++ struct list_head del_list; ++ struct acpi_device_status status; ++ struct acpi_device_flags flags; ++ struct acpi_device_pnp pnp; ++ struct acpi_device_power power; ++ struct acpi_device_wakeup wakeup; ++ struct acpi_device_perf performance; ++ struct acpi_device_dir dir; ++ struct acpi_device_data data; ++ struct acpi_scan_handler *handler; ++ struct acpi_hotplug_context *hp; ++ struct acpi_driver *driver; ++ const struct acpi_gpio_mapping *driver_gpios; ++ void *driver_data; ++ struct device dev; ++ unsigned int physical_node_count; ++ unsigned int dep_unmet; ++ struct list_head physical_node_list; ++ struct mutex physical_node_lock; ++ void (*remove)(struct acpi_device *); ++}; ++ ++struct acpi_scan_handler { ++ const struct acpi_device_id *ids; ++ struct list_head list_node; ++ bool (*match)(const char *, const struct acpi_device_id **); ++ int (*attach)(struct acpi_device *, const struct acpi_device_id *); ++ void (*detach)(struct acpi_device *); ++ void (*bind)(struct device *); ++ void (*unbind)(struct device *); ++ struct acpi_hotplug_profile hotplug; ++}; ++ ++struct acpi_hotplug_context { ++ struct acpi_device *self; ++ int (*notify)(struct acpi_device *, u32); ++ void (*uevent)(struct acpi_device *, u32); ++ void (*fixup)(struct acpi_device *); ++}; ++ ++typedef int (*acpi_op_add)(struct acpi_device *); ++ ++typedef int (*acpi_op_remove)(struct acpi_device *); ++ ++typedef void (*acpi_op_notify)(struct acpi_device *, u32); ++ ++struct acpi_device_ops { ++ acpi_op_add add; ++ acpi_op_remove remove; ++ acpi_op_notify notify; ++}; ++ ++struct acpi_driver { ++ char name[80]; ++ char class[80]; ++ const struct acpi_device_id *ids; ++ unsigned int flags; ++ struct acpi_device_ops ops; ++ struct device_driver drv; ++ struct module *owner; ++}; ++ ++struct acpi_device_perf_state { ++ struct { ++ u8 valid: 1; ++ u8 reserved: 7; ++ } flags; ++ u8 power; ++ u8 performance; ++ int latency; ++}; ++ ++struct acpi_gpio_params; ++ ++struct acpi_gpio_mapping { ++ const char *name; ++ const struct acpi_gpio_params *data; ++ unsigned int size; ++ unsigned int quirks; ++}; ++ ++struct pci_bus; ++ ++struct acpi_pci_root { ++ struct acpi_device *device; ++ struct pci_bus *bus; ++ u16 segment; ++ struct resource secondary; ++ u32 osc_support_set; ++ u32 osc_control_set; ++ phys_addr_t mcfg_addr; ++}; ++ ++typedef short unsigned int pci_bus_flags_t; ++ ++struct pci_dev; ++ ++struct pci_ops; ++ ++struct msi_controller; ++ ++struct pci_bus { ++ struct list_head node; ++ struct pci_bus *parent; ++ struct list_head children; ++ struct list_head devices; ++ struct pci_dev *self; ++ struct list_head slots; ++ struct resource *resource[4]; ++ struct list_head resources; ++ struct resource busn_res; ++ struct pci_ops *ops; ++ struct pci_ops *backup_ops; ++ struct msi_controller *msi; ++ void *sysdata; ++ struct proc_dir_entry *procdir; ++ unsigned char number; ++ unsigned char primary; ++ unsigned char max_bus_speed; ++ unsigned char cur_bus_speed; ++ int domain_nr; ++ char name[48]; ++ short unsigned int bridge_ctl; ++ pci_bus_flags_t bus_flags; ++ struct device *bridge; ++ struct device dev; ++ struct bin_attribute *legacy_io; ++ struct bin_attribute *legacy_mem; ++ unsigned int is_added: 1; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct acpi_gpio_params { ++ unsigned int crs_entry_index; ++ unsigned int line_index; ++ bool active_low; ++}; ++ ++struct hotplug_slot; ++ ++struct pci_slot { ++ struct pci_bus *bus; ++ struct list_head list; ++ struct hotplug_slot *hotplug; ++ unsigned char number; ++ struct kobject kobj; ++}; ++ ++enum { ++ PCI_STD_RESOURCES = 0, ++ PCI_STD_RESOURCE_END = 5, ++ PCI_ROM_RESOURCE = 6, ++ PCI_IOV_RESOURCES = 7, ++ PCI_IOV_RESOURCE_END = 12, ++ PCI_BRIDGE_RESOURCES = 13, ++ PCI_BRIDGE_RESOURCE_END = 16, ++ PCI_NUM_RESOURCES = 17, ++ DEVICE_COUNT_RESOURCE = 17, ++}; ++ ++typedef int pci_power_t; ++ ++typedef unsigned int pci_channel_state_t; ++ ++enum pci_channel_state { ++ pci_channel_io_normal = 1, ++ pci_channel_io_frozen = 2, ++ pci_channel_io_perm_failure = 3, ++}; ++ ++typedef unsigned int pcie_reset_state_t; ++ ++typedef short unsigned int pci_dev_flags_t; ++ ++struct aer_stats; ++ ++struct pci_driver; ++ ++struct pcie_link_state; ++ ++struct pci_vpd; ++ ++struct pci_sriov; ++ ++struct pci_dev { ++ struct list_head bus_list; ++ struct pci_bus *bus; ++ struct pci_bus *subordinate; ++ void *sysdata; ++ struct proc_dir_entry *procent; ++ struct pci_slot *slot; ++ unsigned int devfn; ++ short unsigned int vendor; ++ short unsigned int device; ++ short unsigned int subsystem_vendor; ++ short unsigned int subsystem_device; ++ unsigned int class; ++ u8 revision; ++ u8 hdr_type; ++ u16 aer_cap; ++ struct aer_stats *aer_stats; ++ u8 pcie_cap; ++ u8 msi_cap; ++ u8 msix_cap; ++ u8 pcie_mpss: 3; ++ u8 rom_base_reg; ++ u8 pin; ++ u16 pcie_flags_reg; ++ long unsigned int *dma_alias_mask; ++ struct pci_driver *driver; ++ u64 dma_mask; ++ struct device_dma_parameters dma_parms; ++ pci_power_t current_state; ++ u8 pm_cap; ++ unsigned int pme_support: 5; ++ unsigned int pme_poll: 1; ++ unsigned int d1_support: 1; ++ unsigned int d2_support: 1; ++ unsigned int no_d1d2: 1; ++ unsigned int no_d3cold: 1; ++ unsigned int bridge_d3: 1; ++ unsigned int d3cold_allowed: 1; ++ unsigned int mmio_always_on: 1; ++ unsigned int wakeup_prepared: 1; ++ unsigned int runtime_d3cold: 1; ++ unsigned int skip_bus_pm: 1; ++ unsigned int ignore_hotplug: 1; ++ unsigned int hotplug_user_indicators: 1; ++ unsigned int clear_retrain_link: 1; ++ unsigned int d3_delay; ++ unsigned int d3cold_delay; ++ struct pcie_link_state *link_state; ++ unsigned int ltr_path: 1; ++ unsigned int eetlp_prefix_path: 1; ++ pci_channel_state_t error_state; ++ struct device dev; ++ int cfg_size; ++ unsigned int irq; ++ struct resource resource[17]; ++ bool match_driver; ++ unsigned int transparent: 1; ++ unsigned int io_window: 1; ++ unsigned int pref_window: 1; ++ unsigned int pref_64_window: 1; ++ unsigned int multifunction: 1; ++ unsigned int is_busmaster: 1; ++ unsigned int no_msi: 1; ++ unsigned int no_64bit_msi: 1; ++ unsigned int block_cfg_access: 1; ++ unsigned int broken_parity_status: 1; ++ unsigned int irq_reroute_variant: 2; ++ unsigned int msi_enabled: 1; ++ unsigned int msix_enabled: 1; ++ unsigned int ari_enabled: 1; ++ unsigned int ats_enabled: 1; ++ unsigned int pasid_enabled: 1; ++ unsigned int pri_enabled: 1; ++ unsigned int is_managed: 1; ++ unsigned int needs_freset: 1; ++ unsigned int state_saved: 1; ++ unsigned int is_physfn: 1; ++ unsigned int is_virtfn: 1; ++ unsigned int reset_fn: 1; ++ unsigned int is_hotplug_bridge: 1; ++ unsigned int shpc_managed: 1; ++ unsigned int is_thunderbolt: 1; ++ unsigned int __aer_firmware_first_valid: 1; ++ unsigned int __aer_firmware_first: 1; ++ unsigned int broken_intx_masking: 1; ++ unsigned int io_window_1k: 1; ++ unsigned int irq_managed: 1; ++ unsigned int has_secondary_link: 1; ++ unsigned int non_compliant_bars: 1; ++ unsigned int is_probed: 1; ++ pci_dev_flags_t dev_flags; ++ atomic_t enable_cnt; ++ u32 saved_config_space[16]; ++ struct hlist_head saved_cap_space; ++ struct bin_attribute *rom_attr; ++ int rom_attr_enabled; ++ struct bin_attribute *res_attr[17]; ++ struct bin_attribute *res_attr_wc[17]; ++ unsigned int broken_cmd_compl: 1; ++ const struct attribute_group **msi_irq_groups; ++ struct pci_vpd *vpd; ++ union { ++ struct pci_sriov *sriov; ++ struct pci_dev *physfn; ++ }; ++ u16 ats_cap; ++ u8 ats_stu; ++ atomic_t ats_ref_cnt; ++ u32 pri_reqs_alloc; ++ u16 pasid_features; ++ phys_addr_t rom; ++ size_t romlen; ++ char *driver_override; ++ long unsigned int priv_flags; ++ long unsigned int slot_being_removed_rescanned; ++ struct pci_dev *rpdev; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++}; ++ ++struct pci_dynids { ++ spinlock_t lock; ++ struct list_head list; ++}; ++ ++struct pci_error_handlers; ++ ++struct pci_driver { ++ struct list_head node; ++ const char *name; ++ const struct pci_device_id *id_table; ++ int (*probe)(struct pci_dev *, const struct pci_device_id *); ++ void (*remove)(struct pci_dev *); ++ int (*suspend)(struct pci_dev *, pm_message_t); ++ int (*suspend_late)(struct pci_dev *, pm_message_t); ++ int (*resume_early)(struct pci_dev *); ++ int (*resume)(struct pci_dev *); ++ void (*shutdown)(struct pci_dev *); ++ int (*sriov_configure)(struct pci_dev *, int); ++ const struct pci_error_handlers *err_handler; ++ const struct attribute_group **groups; ++ struct device_driver driver; ++ struct pci_dynids dynids; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct pci_host_bridge { ++ struct device dev; ++ struct pci_bus *bus; ++ struct pci_ops *ops; ++ void *sysdata; ++ int busnr; ++ struct list_head windows; ++ u8 (*swizzle_irq)(struct pci_dev *, u8 *); ++ int (*map_irq)(const struct pci_dev *, u8, u8); ++ void (*release_fn)(struct pci_host_bridge *); ++ void *release_data; ++ struct msi_controller *msi; ++ unsigned int ignore_reset_delay: 1; ++ unsigned int no_ext_tags: 1; ++ unsigned int native_aer: 1; ++ unsigned int native_pcie_hotplug: 1; ++ unsigned int native_shpc_hotplug: 1; ++ unsigned int native_pme: 1; ++ unsigned int native_ltr: 1; ++ unsigned int preserve_config: 1; ++ resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); ++ long unsigned int private[0]; ++}; ++ ++struct pci_ops { ++ int (*add_bus)(struct pci_bus *); ++ void (*remove_bus)(struct pci_bus *); ++ void * (*map_bus)(struct pci_bus *, unsigned int, int); ++ int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); ++ int (*write)(struct pci_bus *, unsigned int, int, int, u32); ++}; ++ ++struct msi_controller { ++ struct module *owner; ++ struct device *dev; ++ struct device_node *of_node; ++ struct list_head list; ++ int (*setup_irq)(struct msi_controller *, struct pci_dev *, struct msi_desc *); ++ int (*setup_irqs)(struct msi_controller *, struct pci_dev *, int, int); ++ void (*teardown_irq)(struct msi_controller *, unsigned int); ++}; ++ ++typedef unsigned int pci_ers_result_t; ++ ++struct pci_error_handlers { ++ pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state); ++ pci_ers_result_t (*mmio_enabled)(struct pci_dev *); ++ pci_ers_result_t (*slot_reset)(struct pci_dev *); ++ void (*reset_prepare)(struct pci_dev *); ++ void (*reset_done)(struct pci_dev *); ++ void (*resume)(struct pci_dev *); ++}; ++ ++enum pcie_bus_config_types { ++ PCIE_BUS_TUNE_OFF = 0, ++ PCIE_BUS_DEFAULT = 1, ++ PCIE_BUS_SAFE = 2, ++ PCIE_BUS_PERFORMANCE = 3, ++ PCIE_BUS_PEER2PEER = 4, ++}; ++ ++struct msi_msg { ++ u32 address_lo; ++ u32 address_hi; ++ u32 data; ++}; ++ ++struct platform_msi_priv_data; ++ ++struct platform_msi_desc { ++ struct platform_msi_priv_data *msi_priv_data; ++ u16 msi_index; ++}; ++ ++struct fsl_mc_msi_desc { ++ u16 msi_index; ++}; ++ ++struct msi_desc { ++ struct list_head list; ++ unsigned int irq; ++ unsigned int nvec_used; ++ struct device *dev; ++ struct msi_msg msg; ++ struct cpumask *affinity; ++ union { ++ struct { ++ u32 masked; ++ struct { ++ __u8 is_msix: 1; ++ __u8 multiple: 3; ++ __u8 multi_cap: 3; ++ __u8 maskbit: 1; ++ __u8 is_64: 1; ++ __u16 entry_nr; ++ unsigned int default_irq; ++ } msi_attrib; ++ union { ++ u8 mask_pos; ++ void *mask_base; ++ }; ++ }; ++ struct platform_msi_desc platform; ++ struct fsl_mc_msi_desc fsl_mc; ++ }; ++}; ++ ++struct acpi_pci_root_ops; ++ ++struct acpi_pci_root_info { ++ struct acpi_pci_root *root; ++ struct acpi_device *bridge; ++ struct acpi_pci_root_ops *ops; ++ struct list_head resources; ++ char name[16]; ++}; ++ ++struct acpi_pci_root_ops { ++ struct pci_ops *pci_ops; ++ int (*init_info)(struct acpi_pci_root_info *); ++ void (*release_info)(struct acpi_pci_root_info *); ++ int (*prepare_resources)(struct acpi_pci_root_info *); ++}; ++ ++struct pci_config_window; ++ ++struct pci_ecam_ops { ++ unsigned int bus_shift; ++ struct pci_ops pci_ops; ++ int (*init)(struct pci_config_window *); ++}; ++ ++struct pci_config_window { ++ struct resource res; ++ struct resource busr; ++ void *priv; ++ struct pci_ecam_ops *ops; ++ union { ++ void *win; ++ void **winp; ++ }; ++ struct device *parent; ++}; ++ ++struct acpi_pci_generic_root_info { ++ struct acpi_pci_root_info common; ++ struct pci_config_window *cfg; ++}; ++ ++enum irq_domain_bus_token { ++ DOMAIN_BUS_ANY = 0, ++ DOMAIN_BUS_WIRED = 1, ++ DOMAIN_BUS_PCI_MSI = 2, ++ DOMAIN_BUS_PLATFORM_MSI = 3, ++ DOMAIN_BUS_NEXUS = 4, ++ DOMAIN_BUS_IPI = 5, ++ DOMAIN_BUS_FSL_MC_MSI = 6, ++}; ++ ++struct irq_domain_ops; ++ ++struct irq_domain_chip_generic; ++ ++struct irq_domain { ++ struct list_head link; ++ const char *name; ++ const struct irq_domain_ops *ops; ++ void *host_data; ++ unsigned int flags; ++ unsigned int mapcount; ++ struct fwnode_handle *fwnode; ++ enum irq_domain_bus_token bus_token; ++ struct irq_domain_chip_generic *gc; ++ struct irq_domain *parent; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ irq_hw_number_t hwirq_max; ++ unsigned int revmap_direct_max_irq; ++ unsigned int revmap_size; ++ struct radix_tree_root revmap_tree; ++ struct mutex revmap_tree_mutex; ++ unsigned int linear_revmap[0]; ++}; ++ ++typedef u32 acpi_status; ++ ++typedef irqreturn_t (*irq_handler_t)(int, void *); ++ ++struct irqaction { ++ irq_handler_t handler; ++ void *dev_id; ++ void *percpu_dev_id; ++ struct irqaction *next; ++ irq_handler_t thread_fn; ++ struct task_struct *thread; ++ struct irqaction *secondary; ++ unsigned int irq; ++ unsigned int flags; ++ long unsigned int thread_flags; ++ long unsigned int thread_mask; ++ const char *name; ++ struct proc_dir_entry *dir; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct irq_affinity_notify { ++ unsigned int irq; ++ struct kref kref; ++ struct work_struct work; ++ void (*notify)(struct irq_affinity_notify *, const cpumask_t *); ++ void (*release)(struct kref *); ++}; ++ ++struct irq_desc; ++ ++typedef void (*irq_flow_handler_t)(struct irq_desc *); ++ ++struct irq_desc { ++ struct irq_common_data irq_common_data; ++ struct irq_data irq_data; ++ unsigned int *kstat_irqs; ++ irq_flow_handler_t handle_irq; ++ struct irqaction *action; ++ unsigned int status_use_accessors; ++ unsigned int core_internal_state__do_not_mess_with_it; ++ unsigned int depth; ++ unsigned int wake_depth; ++ unsigned int tot_count; ++ unsigned int irq_count; ++ long unsigned int last_unhandled; ++ unsigned int irqs_unhandled; ++ atomic_t threads_handled; ++ int threads_handled_last; ++ raw_spinlock_t lock; ++ struct cpumask *percpu_enabled; ++ const struct cpumask *percpu_affinity; ++ const struct cpumask *affinity_hint; ++ struct irq_affinity_notify *affinity_notify; ++ long unsigned int threads_oneshot; ++ atomic_t threads_active; ++ wait_queue_head_t wait_for_threads; ++ unsigned int nr_actions; ++ unsigned int no_suspend_depth; ++ unsigned int cond_suspend_depth; ++ unsigned int force_resume_depth; ++ struct proc_dir_entry *dir; ++ struct callback_head rcu; ++ struct kobject kobj; ++ struct mutex request_mutex; ++ int parent_irq; ++ struct module *owner; ++ const char *name; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct irq_chip_regs { ++ long unsigned int enable; ++ long unsigned int disable; ++ long unsigned int mask; ++ long unsigned int ack; ++ long unsigned int eoi; ++ long unsigned int type; ++ long unsigned int polarity; ++}; ++ ++struct irq_chip_type { ++ struct irq_chip chip; ++ struct irq_chip_regs regs; ++ irq_flow_handler_t handler; ++ u32 type; ++ u32 mask_cache_priv; ++ u32 *mask_cache; ++}; ++ ++struct irq_chip_generic { ++ raw_spinlock_t lock; ++ void *reg_base; ++ u32 (*reg_readl)(void *); ++ void (*reg_writel)(u32, void *); ++ void (*suspend)(struct irq_chip_generic *); ++ void (*resume)(struct irq_chip_generic *); ++ unsigned int irq_base; ++ unsigned int irq_cnt; ++ u32 mask_cache; ++ u32 type_cache; ++ u32 polarity_cache; ++ u32 wake_enabled; ++ u32 wake_active; ++ unsigned int num_ct; ++ void *private; ++ long unsigned int installed; ++ long unsigned int unused; ++ struct irq_domain *domain; ++ struct list_head list; ++ struct irq_chip_type chip_types[0]; ++}; ++ ++enum irq_gc_flags { ++ IRQ_GC_INIT_MASK_CACHE = 1, ++ IRQ_GC_INIT_NESTED_LOCK = 2, ++ IRQ_GC_MASK_CACHE_PER_TYPE = 4, ++ IRQ_GC_NO_MASK = 8, ++ IRQ_GC_BE_IO = 16, ++}; ++ ++struct irq_domain_chip_generic { ++ unsigned int irqs_per_chip; ++ unsigned int num_chips; ++ unsigned int irq_flags_to_clear; ++ unsigned int irq_flags_to_set; ++ enum irq_gc_flags gc_flags; ++ struct irq_chip_generic *gc[0]; ++}; ++ ++struct irq_fwspec { ++ struct fwnode_handle *fwnode; ++ int param_count; ++ u32 param[16]; ++}; ++ ++struct irq_domain_ops { ++ int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); ++ int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); ++ int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); ++ void (*unmap)(struct irq_domain *, unsigned int); ++ int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); ++ int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); ++ void (*free)(struct irq_domain *, unsigned int, unsigned int); ++ int (*activate)(struct irq_domain *, struct irq_data *, bool); ++ void (*deactivate)(struct irq_domain *, struct irq_data *); ++ int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); ++}; ++ ++struct circ_buf { ++ char *buf; ++ int head; ++ int tail; ++}; ++ ++struct serial_icounter_struct { ++ int cts; ++ int dsr; ++ int rng; ++ int dcd; ++ int rx; ++ int tx; ++ int frame; ++ int overrun; ++ int parity; ++ int brk; ++ int buf_overrun; ++ int reserved[9]; ++}; ++ ++struct serial_struct { ++ int type; ++ int line; ++ unsigned int port; ++ int irq; ++ int flags; ++ int xmit_fifo_size; ++ int custom_divisor; ++ int baud_base; ++ short unsigned int close_delay; ++ char io_type; ++ char reserved_char[1]; ++ int hub6; ++ short unsigned int closing_wait; ++ short unsigned int closing_wait2; ++ unsigned char *iomem_base; ++ short unsigned int iomem_reg_shift; ++ unsigned int port_high; ++ long unsigned int iomap_base; ++}; ++ ++struct serial_rs485 { ++ __u32 flags; ++ __u32 delay_rts_before_send; ++ __u32 delay_rts_after_send; ++ __u32 padding[5]; ++}; ++ ++struct uart_port; ++ ++struct uart_ops { ++ unsigned int (*tx_empty)(struct uart_port *); ++ void (*set_mctrl)(struct uart_port *, unsigned int); ++ unsigned int (*get_mctrl)(struct uart_port *); ++ void (*stop_tx)(struct uart_port *); ++ void (*start_tx)(struct uart_port *); ++ void (*throttle)(struct uart_port *); ++ void (*unthrottle)(struct uart_port *); ++ void (*send_xchar)(struct uart_port *, char); ++ void (*stop_rx)(struct uart_port *); ++ void (*enable_ms)(struct uart_port *); ++ void (*break_ctl)(struct uart_port *, int); ++ int (*startup)(struct uart_port *); ++ void (*shutdown)(struct uart_port *); ++ void (*flush_buffer)(struct uart_port *); ++ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); ++ void (*set_ldisc)(struct uart_port *, struct ktermios *); ++ void (*pm)(struct uart_port *, unsigned int, unsigned int); ++ const char * (*type)(struct uart_port *); ++ void (*release_port)(struct uart_port *); ++ int (*request_port)(struct uart_port *); ++ void (*config_port)(struct uart_port *, int); ++ int (*verify_port)(struct uart_port *, struct serial_struct *); ++ int (*ioctl)(struct uart_port *, unsigned int, long unsigned int); ++ int (*poll_init)(struct uart_port *); ++ void (*poll_put_char)(struct uart_port *, unsigned char); ++ int (*poll_get_char)(struct uart_port *); ++}; ++ ++struct uart_icount { ++ __u32 cts; ++ __u32 dsr; ++ __u32 rng; ++ __u32 dcd; ++ __u32 rx; ++ __u32 tx; ++ __u32 frame; ++ __u32 overrun; ++ __u32 parity; ++ __u32 brk; ++ __u32 buf_overrun; ++}; ++ ++typedef unsigned int upf_t; ++ ++typedef unsigned int upstat_t; ++ ++struct uart_state; ++ ++struct uart_port { ++ spinlock_t lock; ++ long unsigned int iobase; ++ unsigned char *membase; ++ unsigned int (*serial_in)(struct uart_port *, int); ++ void (*serial_out)(struct uart_port *, int, int); ++ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); ++ void (*set_ldisc)(struct uart_port *, struct ktermios *); ++ unsigned int (*get_mctrl)(struct uart_port *); ++ void (*set_mctrl)(struct uart_port *, unsigned int); ++ unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); ++ void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); ++ int (*startup)(struct uart_port *); ++ void (*shutdown)(struct uart_port *); ++ void (*throttle)(struct uart_port *); ++ void (*unthrottle)(struct uart_port *); ++ int (*handle_irq)(struct uart_port *); ++ void (*pm)(struct uart_port *, unsigned int, unsigned int); ++ void (*handle_break)(struct uart_port *); ++ int (*rs485_config)(struct uart_port *, struct serial_rs485 *); ++ unsigned int irq; ++ long unsigned int irqflags; ++ unsigned int uartclk; ++ unsigned int fifosize; ++ unsigned char x_char; ++ unsigned char regshift; ++ unsigned char iotype; ++ unsigned char quirks; ++ unsigned int read_status_mask; ++ unsigned int ignore_status_mask; ++ struct uart_state *state; ++ struct uart_icount icount; ++ struct console *cons; ++ long unsigned int sysrq; ++ unsigned int sysrq_ch; ++ upf_t flags; ++ upstat_t status; ++ int hw_stopped; ++ unsigned int mctrl; ++ unsigned int timeout; ++ unsigned int type; ++ const struct uart_ops *ops; ++ unsigned int custom_divisor; ++ unsigned int line; ++ unsigned int minor; ++ resource_size_t mapbase; ++ resource_size_t mapsize; ++ struct device *dev; ++ unsigned char hub6; ++ unsigned char suspended; ++ unsigned char unused[2]; ++ const char *name; ++ struct attribute_group *attr_group; ++ const struct attribute_group **tty_groups; ++ struct serial_rs485 rs485; ++ void *private_data; ++}; ++ ++enum uart_pm_state { ++ UART_PM_STATE_ON = 0, ++ UART_PM_STATE_OFF = 3, ++ UART_PM_STATE_UNDEFINED = 4, ++}; ++ ++struct uart_state { ++ struct tty_port port; ++ enum uart_pm_state pm_state; ++ struct circ_buf xmit; ++ atomic_t refcount; ++ wait_queue_head_t remove_wait; ++ struct uart_port *uart_port; ++}; ++ ++struct earlycon_device { ++ struct console *con; ++ struct uart_port port; ++ char options[16]; ++ unsigned int baud; ++}; ++ ++struct earlycon_id { ++ char name[15]; ++ char name_term; ++ char compatible[128]; ++ int (*setup)(struct earlycon_device *, const char *); ++}; ++ ++struct cpuidle_driver___2; ++ ++enum { ++ CPER_SEV_RECOVERABLE = 0, ++ CPER_SEV_FATAL = 1, ++ CPER_SEV_CORRECTED = 2, ++ CPER_SEV_INFORMATIONAL = 3, ++}; ++ ++struct cper_sec_proc_arm { ++ __u32 validation_bits; ++ __u16 err_info_num; ++ __u16 context_info_num; ++ __u32 section_length; ++ __u8 affinity_level; ++ __u8 reserved[3]; ++ __u64 mpidr; ++ __u64 midr; ++ __u32 running_state; ++ __u32 psci_state; ++}; ++ ++struct cper_arm_err_info { ++ __u8 version; ++ __u8 length; ++ __u16 validation_bits; ++ __u8 type; ++ __u16 multiple_error; ++ __u8 flags; ++ __u64 error_info; ++ __u64 virt_fault_addr; ++ __u64 physical_fault_addr; ++} __attribute__((packed)); ++ ++struct cper_sec_mem_err { ++ __u64 validation_bits; ++ __u64 error_status; ++ __u64 physical_addr; ++ __u64 physical_addr_mask; ++ __u16 node; ++ __u16 card; ++ __u16 module; ++ __u16 bank; ++ __u16 device; ++ __u16 row; ++ __u16 column; ++ __u16 bit_pos; ++ __u64 requestor_id; ++ __u64 responder_id; ++ __u64 target_id; ++ __u8 error_type; ++ __u8 reserved; ++ __u16 rank; ++ __u16 mem_array_handle; ++ __u16 mem_dev_handle; ++}; ++ ++enum mf_flags { ++ MF_COUNT_INCREASED = 1, ++ MF_ACTION_REQUIRED = 2, ++ MF_MUST_KILL = 4, ++ MF_SOFT_OFFLINE = 8, ++}; ++ ++struct acpi_hest_header { ++ u16 type; ++ u16 source_id; ++}; ++ ++struct acpi_hest_notify { ++ u8 type; ++ u8 length; ++ u16 config_write_enable; ++ u32 poll_interval; ++ u32 vector; ++ u32 polling_threshold_value; ++ u32 polling_threshold_window; ++ u32 error_threshold_value; ++ u32 error_threshold_window; ++}; ++ ++enum acpi_hest_notify_types { ++ ACPI_HEST_NOTIFY_POLLED = 0, ++ ACPI_HEST_NOTIFY_EXTERNAL = 1, ++ ACPI_HEST_NOTIFY_LOCAL = 2, ++ ACPI_HEST_NOTIFY_SCI = 3, ++ ACPI_HEST_NOTIFY_NMI = 4, ++ ACPI_HEST_NOTIFY_CMCI = 5, ++ ACPI_HEST_NOTIFY_MCE = 6, ++ ACPI_HEST_NOTIFY_GPIO = 7, ++ ACPI_HEST_NOTIFY_SEA = 8, ++ ACPI_HEST_NOTIFY_SEI = 9, ++ ACPI_HEST_NOTIFY_GSIV = 10, ++ ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, ++ ACPI_HEST_NOTIFY_RESERVED = 12, ++}; ++ ++struct acpi_hest_generic { ++ struct acpi_hest_header header; ++ u16 related_source_id; ++ u8 reserved; ++ u8 enabled; ++ u32 records_to_preallocate; ++ u32 max_sections_per_record; ++ u32 max_raw_data_length; ++ struct acpi_generic_address error_status_address; ++ struct acpi_hest_notify notify; ++ u32 error_block_length; ++} __attribute__((packed)); ++ ++struct acpi_hest_generic_v2 { ++ struct acpi_hest_header header; ++ u16 related_source_id; ++ u8 reserved; ++ u8 enabled; ++ u32 records_to_preallocate; ++ u32 max_sections_per_record; ++ u32 max_raw_data_length; ++ struct acpi_generic_address error_status_address; ++ struct acpi_hest_notify notify; ++ u32 error_block_length; ++ struct acpi_generic_address read_ack_register; ++ u64 read_ack_preserve; ++ u64 read_ack_write; ++} __attribute__((packed)); ++ ++struct acpi_hest_generic_status { ++ u32 block_status; ++ u32 raw_data_offset; ++ u32 raw_data_length; ++ u32 data_length; ++ u32 error_severity; ++}; ++ ++struct ghes { ++ union { ++ struct acpi_hest_generic *generic; ++ struct acpi_hest_generic_v2 *generic_v2; ++ }; ++ struct acpi_hest_generic_status *estatus; ++ u64 buffer_paddr; ++ long unsigned int flags; ++ union { ++ struct list_head list; ++ struct timer_list timer; ++ unsigned int irq; ++ }; ++}; ++ ++struct ghes_mem_err { ++ int notify_type; ++ int severity; ++ struct cper_sec_mem_err *mem_err; ++}; ++ ++struct sea_info { ++ atomic_t inuse; ++ struct task_struct *t; ++ __u64 paddr; ++}; ++ ++enum acpi_srat_type { ++ ACPI_SRAT_TYPE_CPU_AFFINITY = 0, ++ ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, ++ ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, ++ ACPI_SRAT_TYPE_GICC_AFFINITY = 3, ++ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, ++ ACPI_SRAT_TYPE_RESERVED = 5, ++}; ++ ++struct acpi_srat_gicc_affinity { ++ struct acpi_subtable_header header; ++ u32 proximity_domain; ++ u32 acpi_processor_uid; ++ u32 flags; ++ u32 clock_domain; ++} __attribute__((packed)); ++ ++struct parking_protocol_mailbox { ++ __le32 cpu_id; ++ __le32 reserved; ++ __le64 entry_point; ++}; ++ ++struct cpu_mailbox_entry { ++ struct parking_protocol_mailbox *mailbox; ++ phys_addr_t mailbox_addr; ++ u8 version; ++ u8 gic_cpu_id; ++}; ++ ++struct pv_time_ops { ++ long long unsigned int (*steal_clock)(int); ++}; ++ ++typedef __be64 fdt64_t; ++ ++struct kobj_attribute___2; ++ ++typedef u64 pudval_t; ++ ++struct arch_hibernate_hdr_invariants { ++ char uts_version[65]; ++}; ++ ++struct arch_hibernate_hdr { ++ struct arch_hibernate_hdr_invariants invariants; ++ phys_addr_t ttbr1_el1; ++ void (*reenter_kernel)(); ++ phys_addr_t __hyp_stub_vectors; ++ u64 sleep_cpu_mpidr; ++}; ++ ++enum { ++ IRQD_TRIGGER_MASK = 15, ++ IRQD_SETAFFINITY_PENDING = 256, ++ IRQD_ACTIVATED = 512, ++ IRQD_NO_BALANCING = 1024, ++ IRQD_PER_CPU = 2048, ++ IRQD_AFFINITY_SET = 4096, ++ IRQD_LEVEL = 8192, ++ IRQD_WAKEUP_STATE = 16384, ++ IRQD_MOVE_PCNTXT = 32768, ++ IRQD_IRQ_DISABLED = 65536, ++ IRQD_IRQ_MASKED = 131072, ++ IRQD_IRQ_INPROGRESS = 262144, ++ IRQD_WAKEUP_ARMED = 524288, ++ IRQD_FORWARDED_TO_VCPU = 1048576, ++ IRQD_AFFINITY_MANAGED = 2097152, ++ IRQD_IRQ_STARTED = 4194304, ++ IRQD_MANAGED_SHUTDOWN = 8388608, ++ IRQD_SINGLE_TARGET = 16777216, ++ IRQD_DEFAULT_TRIGGER_SET = 33554432, ++ IRQD_CAN_RESERVE = 67108864, ++ IRQD_MSI_NOMASK_QUIRK = 134217728, ++ IRQD_AFFINITY_ON_ACTIVATE = 536870912, ++}; ++ ++enum { ++ MEMREMAP_WB = 1, ++ MEMREMAP_WT = 2, ++ MEMREMAP_WC = 4, ++ MEMREMAP_ENC = 8, ++ MEMREMAP_DEC = 16, ++}; ++ ++enum sdei_conduit_types { ++ CONDUIT_INVALID = 0, ++ CONDUIT_SMC = 1, ++ CONDUIT_HVC = 2, ++}; ++ ++typedef int sdei_event_callback(u32, struct pt_regs *, void *); ++ ++struct sdei_registered_event { ++ struct pt_regs interrupted_regs; ++ sdei_event_callback *callback; ++ void *callback_arg; ++ u32 event_num; ++ u8 priority; ++}; ++ ++struct __va_list { ++ void *__stack; ++ void *__gr_top; ++ void *__vr_top; ++ int __gr_offs; ++ int __vr_offs; ++}; ++ ++typedef struct __va_list __gnuc_va_list; ++ ++typedef __gnuc_va_list va_list; ++ ++struct resctrl_resource { ++ int rid; ++ bool alloc_enabled; ++ bool mon_enabled; ++ bool alloc_capable; ++ bool mon_capable; ++ char *name; ++ struct list_head domains; ++ struct list_head evt_list; ++ long unsigned int fflags; ++ void *res; ++}; ++ ++struct rftype { ++ char *name; ++ umode_t mode; ++ struct kernfs_ops *kf_ops; ++ long unsigned int flags; ++ long unsigned int fflags; ++ bool (*enable)(void *); ++ int (*seq_show)(struct kernfs_open_file *, struct seq_file *, void *); ++ ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); ++}; ++ ++enum rdt_group_type { ++ RDTCTRL_GROUP = 0, ++ RDTMON_GROUP = 1, ++ RDT_NUM_GROUP = 2, ++}; ++ ++struct rdtgroup; ++ ++struct mongroup { ++ struct kernfs_node *mon_data_kn; ++ struct rdtgroup *parent; ++ struct list_head crdtgrp_list; ++ u32 rmid; ++ u32 mon; ++ int init; ++}; ++ ++struct rdtgroup { ++ struct kernfs_node *kn; ++ struct list_head resctrl_group_list; ++ u32 closid; ++ struct cpumask cpu_mask; ++ int flags; ++ atomic_t waitcount; ++ enum rdt_group_type type; ++ struct mongroup mon; ++}; ++ ++struct mpam_node { ++ u32 component_id; ++ u8 type; ++ u64 addr; ++ void *base; ++ struct cpumask cpu_mask; ++ u64 default_ctrl; ++ char *cpus_list; ++ char *name; ++ struct list_head list; ++}; ++ ++struct rdt_domain { ++ struct list_head list; ++ int id; ++ struct cpumask cpu_mask; ++ void *base; ++ u32 *ctrl_val; ++ u32 new_ctrl; ++ bool have_new_ctrl; ++ char *cpus_list; ++}; ++ ++enum { ++ MPAM_RESOURCE_SMMU = 0, ++ MPAM_RESOURCE_CACHE = 1, ++ MPAM_RESOURCE_MC = 2, ++ MPAM_NUM_RESOURCES = 3, ++}; ++ ++struct raw_resctrl_resource { ++ int num_partid; ++ u32 default_ctrl; ++ void (*msr_update)(struct rdt_domain *, int); ++ u64 (*msr_read)(struct rdt_domain *, int); ++ int data_width; ++ const char *format_str; ++ int (*parse_ctrlval)(char *, struct raw_resctrl_resource *, struct rdt_domain *); ++ int num_pmg; ++ int num_mon; ++ u64 (*mon_read)(struct rdt_domain *, struct rdtgroup *); ++ int (*mon_write)(struct rdt_domain *, struct rdtgroup *, bool); ++}; ++ ++struct task_move_callback { ++ struct callback_head work; ++ struct rdtgroup *rdtgrp; ++}; ++ ++union mon_data_bits { ++ void *priv; ++ struct { ++ u8 rid; ++ u8 domid; ++ u8 partid; ++ u8 pmg; ++ } u; ++}; ++ ++struct kretprobe_instance; ++ ++typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); ++ ++struct kretprobe; ++ ++struct kretprobe_instance { ++ struct hlist_node hlist; ++ struct kretprobe *rp; ++ kprobe_opcode_t *ret_addr; ++ struct task_struct *task; ++ void *fp; ++ char data[0]; ++}; ++ ++struct kretprobe { ++ struct kprobe kp; ++ kretprobe_handler_t handler; ++ kretprobe_handler_t entry_handler; ++ int maxactive; ++ int nmissed; ++ size_t data_size; ++ struct hlist_head free_instances; ++ raw_spinlock_t lock; ++}; ++ ++enum probe_insn { ++ INSN_REJECTED = 0, ++ INSN_GOOD_NO_SLOT = 1, ++ INSN_GOOD = 2, ++}; ++ ++enum aarch64_insn_special_register { ++ AARCH64_INSN_SPCLREG_SPSR_EL1 = 49664, ++ AARCH64_INSN_SPCLREG_ELR_EL1 = 49665, ++ AARCH64_INSN_SPCLREG_SP_EL0 = 49672, ++ AARCH64_INSN_SPCLREG_SPSEL = 49680, ++ AARCH64_INSN_SPCLREG_CURRENTEL = 49682, ++ AARCH64_INSN_SPCLREG_DAIF = 55825, ++ AARCH64_INSN_SPCLREG_NZCV = 55824, ++ AARCH64_INSN_SPCLREG_FPCR = 55840, ++ AARCH64_INSN_SPCLREG_DSPSR_EL0 = 55848, ++ AARCH64_INSN_SPCLREG_DLR_EL0 = 55849, ++ AARCH64_INSN_SPCLREG_SPSR_EL2 = 57856, ++ AARCH64_INSN_SPCLREG_ELR_EL2 = 57857, ++ AARCH64_INSN_SPCLREG_SP_EL1 = 57864, ++ AARCH64_INSN_SPCLREG_SPSR_INQ = 57880, ++ AARCH64_INSN_SPCLREG_SPSR_ABT = 57881, ++ AARCH64_INSN_SPCLREG_SPSR_UND = 57882, ++ AARCH64_INSN_SPCLREG_SPSR_FIQ = 57883, ++ AARCH64_INSN_SPCLREG_SPSR_EL3 = 61952, ++ AARCH64_INSN_SPCLREG_ELR_EL3 = 61953, ++ AARCH64_INSN_SPCLREG_SP_EL2 = 61968, ++}; ++ ++struct arch_uprobe { ++ union { ++ u8 insn[4]; ++ u8 ixol[4]; ++ }; ++ struct arch_probe_insn api; ++ bool simulate; ++}; ++ ++enum rp_check { ++ RP_CHECK_CALL = 0, ++ RP_CHECK_CHAIN_CALL = 1, ++ RP_CHECK_RET = 2, ++}; ++ ++struct iommu_fwspec { ++ const struct iommu_ops *ops; ++ struct fwnode_handle *iommu_fwnode; ++ void *iommu_priv; ++ u32 flags; ++ unsigned int num_ids; ++ unsigned int num_pasid_bits; ++ bool can_stall; ++ u32 ids[1]; ++}; ++ ++struct iommu_fault_param; ++ ++struct iopf_device_param; ++ ++struct iommu_param { ++ struct mutex lock; ++ struct iommu_fault_param *fault_param; ++ struct iommu_sva_param *sva_param; ++ struct iopf_device_param *iopf_param; ++}; ++ ++struct gen_pool; ++ ++typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); ++ ++struct gen_pool { ++ spinlock_t lock; ++ struct list_head chunks; ++ int min_alloc_order; ++ genpool_algo_t algo; ++ void *data; ++ const char *name; ++}; ++ ++enum swiotlb_force { ++ SWIOTLB_NORMAL = 0, ++ SWIOTLB_FORCE = 1, ++ SWIOTLB_NO_FORCE = 2, ++}; ++ ++struct of_phandle_args { ++ struct device_node *np; ++ int args_count; ++ uint32_t args[16]; ++}; ++ ++struct pasid_table_config { ++ __u32 version; ++ __u32 bytes; ++ __u64 base_ptr; ++ __u8 pasid_bits; ++}; ++ ++enum iommu_inv_granularity { ++ IOMMU_INV_GRANU_DOMAIN = 1, ++ IOMMU_INV_GRANU_DEVICE = 2, ++ IOMMU_INV_GRANU_DOMAIN_PAGE = 3, ++ IOMMU_INV_GRANU_ALL_PASID = 4, ++ IOMMU_INV_GRANU_PASID_SEL = 5, ++ IOMMU_INV_GRANU_NG_ALL_PASID = 6, ++ IOMMU_INV_GRANU_NG_PASID = 7, ++ IOMMU_INV_GRANU_PAGE_PASID = 8, ++ IOMMU_INV_NR_GRANU = 9, ++}; ++ ++enum iommu_inv_type { ++ IOMMU_INV_TYPE_DTLB = 0, ++ IOMMU_INV_TYPE_TLB = 1, ++ IOMMU_INV_TYPE_PASID = 2, ++ IOMMU_INV_TYPE_CONTEXT = 3, ++ IOMMU_INV_NR_TYPE = 4, ++}; ++ ++struct tlb_invalidate_hdr { ++ __u32 version; ++ enum iommu_inv_type type; ++}; ++ ++struct tlb_invalidate_info { ++ struct tlb_invalidate_hdr hdr; ++ enum iommu_inv_granularity granularity; ++ __u32 flags; ++ __u8 size; ++ __u32 pasid; ++ __u64 addr; ++}; ++ ++typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); ++ ++struct iommu_domain_geometry { ++ dma_addr_t aperture_start; ++ dma_addr_t aperture_end; ++ bool force_aperture; ++}; ++ ++struct iommu_domain { ++ unsigned int type; ++ const struct iommu_ops *ops; ++ long unsigned int pgsize_bitmap; ++ iommu_fault_handler_t handler; ++ void *handler_token; ++ struct iommu_domain_geometry geometry; ++ void *iova_cookie; ++ struct list_head mm_list; ++}; ++ ++struct iommu_fault_event; ++ ++typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault_event *, void *); ++ ++enum iommu_fault_type { ++ IOMMU_FAULT_DMA_UNRECOV = 1, ++ IOMMU_FAULT_PAGE_REQ = 2, ++}; ++ ++enum iommu_fault_reason { ++ IOMMU_FAULT_REASON_UNKNOWN = 0, ++ IOMMU_FAULT_REASON_INTERNAL = 1, ++ IOMMU_FAULT_REASON_PASID_FETCH = 2, ++ IOMMU_FAULT_REASON_PASID_INVALID = 3, ++ IOMMU_FAULT_REASON_PGD_FETCH = 4, ++ IOMMU_FAULT_REASON_PTE_FETCH = 5, ++ IOMMU_FAULT_REASON_PERMISSION = 6, ++}; ++ ++struct iommu_fault_event { ++ struct list_head list; ++ enum iommu_fault_type type; ++ enum iommu_fault_reason reason; ++ u64 addr; ++ u32 pasid; ++ u32 page_req_group_id; ++ u32 last_req: 1; ++ u32 pasid_valid: 1; ++ u32 prot; ++ u64 device_private; ++ u64 iommu_private; ++ u64 expire; ++}; ++ ++typedef int (*iommu_mm_exit_handler_t)(struct device *, int, void *); ++ ++struct io_mm { ++ int pasid; ++ long unsigned int flags; ++ struct list_head devices; ++ struct kref kref; ++ struct mmu_notifier notifier; ++ struct mm_struct *mm; ++ void (*release)(struct io_mm *); ++ struct callback_head rcu; ++}; ++ ++enum iommu_resv_type { ++ IOMMU_RESV_DIRECT = 0, ++ IOMMU_RESV_RESERVED = 1, ++ IOMMU_RESV_MSI = 2, ++ IOMMU_RESV_SW_MSI = 3, ++}; ++ ++struct iommu_resv_region { ++ struct list_head list; ++ phys_addr_t start; ++ size_t length; ++ int prot; ++ enum iommu_resv_type type; ++}; ++ ++enum page_response_code { ++ IOMMU_PAGE_RESP_SUCCESS = 0, ++ IOMMU_PAGE_RESP_INVALID = 1, ++ IOMMU_PAGE_RESP_FAILURE = 2, ++}; ++ ++struct page_response_msg { ++ u64 addr; ++ u32 pasid; ++ enum page_response_code resp_code; ++ u32 pasid_present: 1; ++ u32 page_req_group_id; ++ u64 private_data; ++}; ++ ++struct iommu_sva_param { ++ long unsigned int features; ++ unsigned int min_pasid; ++ unsigned int max_pasid; ++ struct list_head mm_list; ++ iommu_mm_exit_handler_t mm_exit; ++}; ++ ++struct iommu_fault_param { ++ iommu_dev_fault_handler_t handler; ++ struct list_head faults; ++ struct timer_list timer; ++ struct mutex lock; ++ void *data; ++}; ++ ++struct hstate { ++ int next_nid_to_alloc; ++ int next_nid_to_free; ++ unsigned int order; ++ long unsigned int mask; ++ long unsigned int max_huge_pages; ++ long unsigned int nr_huge_pages; ++ long unsigned int free_huge_pages; ++ long unsigned int resv_huge_pages; ++ long unsigned int surplus_huge_pages; ++ long unsigned int nr_overcommit_huge_pages; ++ struct list_head hugepage_activelist; ++ struct list_head hugepage_freelists[16]; ++ unsigned int nr_huge_pages_node[16]; ++ unsigned int free_huge_pages_node[16]; ++ unsigned int surplus_huge_pages_node[16]; ++ unsigned int resv_huge_pages_node[16]; ++ struct cftype cgroup_files[5]; ++ char name[32]; ++}; ++ ++struct fault_info { ++ int (*fn)(long unsigned int, unsigned int, struct pt_regs *); ++ int sig; ++ int code; ++ const char *name; ++}; ++ ++typedef __be32 fdt32_t; ++ ++struct fdt_header { ++ fdt32_t magic; ++ fdt32_t totalsize; ++ fdt32_t off_dt_struct; ++ fdt32_t off_dt_strings; ++ fdt32_t off_mem_rsvmap; ++ fdt32_t version; ++ fdt32_t last_comp_version; ++ fdt32_t boot_cpuid_phys; ++ fdt32_t size_dt_strings; ++ fdt32_t size_dt_struct; ++}; ++ ++typedef void ttbr_replace_func(phys_addr_t); ++ ++struct page_change_data { ++ pgprot_t set_mask; ++ pgprot_t clear_mask; ++}; ++ ++struct hugepage_subpool { ++ spinlock_t lock; ++ long int count; ++ long int max_hpages; ++ long int used_hpages; ++ struct hstate *hstate; ++ long int min_hpages; ++ long int rsv_hpages; ++}; ++ ++struct hugetlbfs_sb_info { ++ long int max_inodes; ++ long int free_inodes; ++ spinlock_t stat_lock; ++ struct hstate *hstate; ++ struct hugepage_subpool *spool; ++ kuid_t uid; ++ kgid_t gid; ++ umode_t mode; ++}; ++ ++enum { ++ BPF_REG_0 = 0, ++ BPF_REG_1 = 1, ++ BPF_REG_2 = 2, ++ BPF_REG_3 = 3, ++ BPF_REG_4 = 4, ++ BPF_REG_5 = 5, ++ BPF_REG_6 = 6, ++ BPF_REG_7 = 7, ++ BPF_REG_8 = 8, ++ BPF_REG_9 = 9, ++ BPF_REG_10 = 10, ++ __MAX_BPF_REG = 11, ++}; ++ ++struct bpf_cgroup_storage_key { ++ __u64 cgroup_inode_id; ++ __u32 attach_type; ++}; ++ ++enum bpf_map_type { ++ BPF_MAP_TYPE_UNSPEC = 0, ++ BPF_MAP_TYPE_HASH = 1, ++ BPF_MAP_TYPE_ARRAY = 2, ++ BPF_MAP_TYPE_PROG_ARRAY = 3, ++ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, ++ BPF_MAP_TYPE_PERCPU_HASH = 5, ++ BPF_MAP_TYPE_PERCPU_ARRAY = 6, ++ BPF_MAP_TYPE_STACK_TRACE = 7, ++ BPF_MAP_TYPE_CGROUP_ARRAY = 8, ++ BPF_MAP_TYPE_LRU_HASH = 9, ++ BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, ++ BPF_MAP_TYPE_LPM_TRIE = 11, ++ BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, ++ BPF_MAP_TYPE_HASH_OF_MAPS = 13, ++ BPF_MAP_TYPE_DEVMAP = 14, ++ BPF_MAP_TYPE_SOCKMAP = 15, ++ BPF_MAP_TYPE_CPUMAP = 16, ++ BPF_MAP_TYPE_XSKMAP = 17, ++ BPF_MAP_TYPE_SOCKHASH = 18, ++ BPF_MAP_TYPE_CGROUP_STORAGE = 19, ++ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, ++}; ++ ++union bpf_attr { ++ struct { ++ __u32 map_type; ++ __u32 key_size; ++ __u32 value_size; ++ __u32 max_entries; ++ __u32 map_flags; ++ __u32 inner_map_fd; ++ __u32 numa_node; ++ char map_name[16]; ++ __u32 map_ifindex; ++ __u32 btf_fd; ++ __u32 btf_key_type_id; ++ __u32 btf_value_type_id; ++ }; ++ struct { ++ __u32 map_fd; ++ __u64 key; ++ union { ++ __u64 value; ++ __u64 next_key; ++ }; ++ __u64 flags; ++ }; ++ struct { ++ __u32 prog_type; ++ __u32 insn_cnt; ++ __u64 insns; ++ __u64 license; ++ __u32 log_level; ++ __u32 log_size; ++ __u64 log_buf; ++ __u32 kern_version; ++ __u32 prog_flags; ++ char prog_name[16]; ++ __u32 prog_ifindex; ++ __u32 expected_attach_type; ++ }; ++ struct { ++ __u64 pathname; ++ __u32 bpf_fd; ++ __u32 file_flags; ++ }; ++ struct { ++ __u32 target_fd; ++ __u32 attach_bpf_fd; ++ __u32 attach_type; ++ __u32 attach_flags; ++ }; ++ struct { ++ __u32 prog_fd; ++ __u32 retval; ++ __u32 data_size_in; ++ __u32 data_size_out; ++ __u64 data_in; ++ __u64 data_out; ++ __u32 repeat; ++ __u32 duration; ++ } test; ++ struct { ++ union { ++ __u32 start_id; ++ __u32 prog_id; ++ __u32 map_id; ++ __u32 btf_id; ++ }; ++ __u32 next_id; ++ __u32 open_flags; ++ }; ++ struct { ++ __u32 bpf_fd; ++ __u32 info_len; ++ __u64 info; ++ } info; ++ struct { ++ __u32 target_fd; ++ __u32 attach_type; ++ __u32 query_flags; ++ __u32 attach_flags; ++ __u64 prog_ids; ++ __u32 prog_cnt; ++ } query; ++ struct { ++ __u64 name; ++ __u32 prog_fd; ++ } raw_tracepoint; ++ struct { ++ __u64 btf; ++ __u64 btf_log_buf; ++ __u32 btf_size; ++ __u32 btf_log_size; ++ __u32 btf_log_level; ++ }; ++ struct { ++ __u32 pid; ++ __u32 fd; ++ __u32 flags; ++ __u32 buf_len; ++ __u64 buf; ++ __u32 prog_id; ++ __u32 fd_type; ++ __u64 probe_offset; ++ __u64 probe_addr; ++ } task_fd_query; ++}; ++ ++enum bpf_func_id { ++ BPF_FUNC_unspec = 0, ++ BPF_FUNC_map_lookup_elem = 1, ++ BPF_FUNC_map_update_elem = 2, ++ BPF_FUNC_map_delete_elem = 3, ++ BPF_FUNC_probe_read = 4, ++ BPF_FUNC_ktime_get_ns = 5, ++ BPF_FUNC_trace_printk = 6, ++ BPF_FUNC_get_prandom_u32 = 7, ++ BPF_FUNC_get_smp_processor_id = 8, ++ BPF_FUNC_skb_store_bytes = 9, ++ BPF_FUNC_l3_csum_replace = 10, ++ BPF_FUNC_l4_csum_replace = 11, ++ BPF_FUNC_tail_call = 12, ++ BPF_FUNC_clone_redirect = 13, ++ BPF_FUNC_get_current_pid_tgid = 14, ++ BPF_FUNC_get_current_uid_gid = 15, ++ BPF_FUNC_get_current_comm = 16, ++ BPF_FUNC_get_cgroup_classid = 17, ++ BPF_FUNC_skb_vlan_push = 18, ++ BPF_FUNC_skb_vlan_pop = 19, ++ BPF_FUNC_skb_get_tunnel_key = 20, ++ BPF_FUNC_skb_set_tunnel_key = 21, ++ BPF_FUNC_perf_event_read = 22, ++ BPF_FUNC_redirect = 23, ++ BPF_FUNC_get_route_realm = 24, ++ BPF_FUNC_perf_event_output = 25, ++ BPF_FUNC_skb_load_bytes = 26, ++ BPF_FUNC_get_stackid = 27, ++ BPF_FUNC_csum_diff = 28, ++ BPF_FUNC_skb_get_tunnel_opt = 29, ++ BPF_FUNC_skb_set_tunnel_opt = 30, ++ BPF_FUNC_skb_change_proto = 31, ++ BPF_FUNC_skb_change_type = 32, ++ BPF_FUNC_skb_under_cgroup = 33, ++ BPF_FUNC_get_hash_recalc = 34, ++ BPF_FUNC_get_current_task = 35, ++ BPF_FUNC_probe_write_user = 36, ++ BPF_FUNC_current_task_under_cgroup = 37, ++ BPF_FUNC_skb_change_tail = 38, ++ BPF_FUNC_skb_pull_data = 39, ++ BPF_FUNC_csum_update = 40, ++ BPF_FUNC_set_hash_invalid = 41, ++ BPF_FUNC_get_numa_node_id = 42, ++ BPF_FUNC_skb_change_head = 43, ++ BPF_FUNC_xdp_adjust_head = 44, ++ BPF_FUNC_probe_read_str = 45, ++ BPF_FUNC_get_socket_cookie = 46, ++ BPF_FUNC_get_socket_uid = 47, ++ BPF_FUNC_set_hash = 48, ++ BPF_FUNC_setsockopt = 49, ++ BPF_FUNC_skb_adjust_room = 50, ++ BPF_FUNC_redirect_map = 51, ++ BPF_FUNC_sk_redirect_map = 52, ++ BPF_FUNC_sock_map_update = 53, ++ BPF_FUNC_xdp_adjust_meta = 54, ++ BPF_FUNC_perf_event_read_value = 55, ++ BPF_FUNC_perf_prog_read_value = 56, ++ BPF_FUNC_getsockopt = 57, ++ BPF_FUNC_override_return = 58, ++ BPF_FUNC_sock_ops_cb_flags_set = 59, ++ BPF_FUNC_msg_redirect_map = 60, ++ BPF_FUNC_msg_apply_bytes = 61, ++ BPF_FUNC_msg_cork_bytes = 62, ++ BPF_FUNC_msg_pull_data = 63, ++ BPF_FUNC_bind = 64, ++ BPF_FUNC_xdp_adjust_tail = 65, ++ BPF_FUNC_skb_get_xfrm_state = 66, ++ BPF_FUNC_get_stack = 67, ++ BPF_FUNC_skb_load_bytes_relative = 68, ++ BPF_FUNC_fib_lookup = 69, ++ BPF_FUNC_sock_hash_update = 70, ++ BPF_FUNC_msg_redirect_hash = 71, ++ BPF_FUNC_sk_redirect_hash = 72, ++ BPF_FUNC_lwt_push_encap = 73, ++ BPF_FUNC_lwt_seg6_store_bytes = 74, ++ BPF_FUNC_lwt_seg6_adjust_srh = 75, ++ BPF_FUNC_lwt_seg6_action = 76, ++ BPF_FUNC_rc_repeat = 77, ++ BPF_FUNC_rc_keydown = 78, ++ BPF_FUNC_skb_cgroup_id = 79, ++ BPF_FUNC_get_current_cgroup_id = 80, ++ BPF_FUNC_get_local_storage = 81, ++ BPF_FUNC_sk_select_reuseport = 82, ++ BPF_FUNC_skb_ancestor_cgroup_id = 83, ++ __BPF_FUNC_MAX_ID = 84, ++}; ++ ++enum { ++ DUMP_PREFIX_NONE = 0, ++ DUMP_PREFIX_ADDRESS = 1, ++ DUMP_PREFIX_OFFSET = 2, ++}; ++ ++struct btf_type; ++ ++struct bpf_map_ops { ++ int (*map_alloc_check)(union bpf_attr *); ++ struct bpf_map * (*map_alloc)(union bpf_attr *); ++ void (*map_release)(struct bpf_map *, struct file *); ++ void (*map_free)(struct bpf_map *); ++ int (*map_get_next_key)(struct bpf_map *, void *, void *); ++ void (*map_release_uref)(struct bpf_map *); ++ void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); ++ void * (*map_lookup_elem)(struct bpf_map *, void *); ++ int (*map_update_elem)(struct bpf_map *, void *, void *, u64); ++ int (*map_delete_elem)(struct bpf_map *, void *); ++ void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); ++ void (*map_fd_put_ptr)(void *); ++ u32 (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); ++ u32 (*map_fd_sys_lookup_elem)(void *); ++ void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); ++ int (*map_check_btf)(const struct bpf_map *, const struct btf_type *, const struct btf_type *); ++}; ++ ++struct btf; ++ ++struct bpf_map { ++ const struct bpf_map_ops *ops; ++ struct bpf_map *inner_map_meta; ++ void *security; ++ enum bpf_map_type map_type; ++ u32 key_size; ++ u32 value_size; ++ u32 max_entries; ++ u32 map_flags; ++ u32 pages; ++ u32 id; ++ int numa_node; ++ u32 btf_key_type_id; ++ u32 btf_value_type_id; ++ struct btf *btf; ++ bool unpriv_array; ++ long: 56; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct user_struct *user; ++ atomic_t refcnt; ++ atomic_t usercnt; ++ struct work_struct work; ++ char name[16]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct btf_type { ++ __u32 name_off; ++ __u32 info; ++ union { ++ __u32 size; ++ __u32 type; ++ }; ++}; ++ ++struct bpf_map_dev_ops { ++ int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); ++ int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); ++ int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); ++ int (*map_delete_elem)(struct bpf_offloaded_map *, void *); ++}; ++ ++struct bpf_offloaded_map { ++ struct bpf_map map; ++ struct net_device *netdev; ++ const struct bpf_map_dev_ops *dev_ops; ++ void *dev_priv; ++ struct list_head offloads; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++enum bpf_arg_type { ++ ARG_DONTCARE = 0, ++ ARG_CONST_MAP_PTR = 1, ++ ARG_PTR_TO_MAP_KEY = 2, ++ ARG_PTR_TO_MAP_VALUE = 3, ++ ARG_PTR_TO_MEM = 4, ++ ARG_PTR_TO_MEM_OR_NULL = 5, ++ ARG_PTR_TO_UNINIT_MEM = 6, ++ ARG_CONST_SIZE = 7, ++ ARG_CONST_SIZE_OR_ZERO = 8, ++ ARG_PTR_TO_CTX = 9, ++ ARG_ANYTHING = 10, ++}; ++ ++enum bpf_return_type { ++ RET_INTEGER = 0, ++ RET_VOID = 1, ++ RET_PTR_TO_MAP_VALUE = 2, ++ RET_PTR_TO_MAP_VALUE_OR_NULL = 3, ++}; ++ ++struct bpf_func_proto { ++ u64 (*func)(u64, u64, u64, u64, u64); ++ bool gpl_only; ++ bool pkt_access; ++ enum bpf_return_type ret_type; ++ enum bpf_arg_type arg1_type; ++ enum bpf_arg_type arg2_type; ++ enum bpf_arg_type arg3_type; ++ enum bpf_arg_type arg4_type; ++ enum bpf_arg_type arg5_type; ++}; ++ ++enum bpf_access_type { ++ BPF_READ = 1, ++ BPF_WRITE = 2, ++}; ++ ++enum bpf_reg_type { ++ NOT_INIT = 0, ++ SCALAR_VALUE = 1, ++ PTR_TO_CTX = 2, ++ CONST_PTR_TO_MAP = 3, ++ PTR_TO_MAP_VALUE = 4, ++ PTR_TO_MAP_VALUE_OR_NULL = 5, ++ PTR_TO_STACK = 6, ++ PTR_TO_PACKET_META = 7, ++ PTR_TO_PACKET = 8, ++ PTR_TO_PACKET_END = 9, ++}; ++ ++struct bpf_insn_access_aux { ++ enum bpf_reg_type reg_type; ++ int ctx_field_size; ++}; ++ ++struct bpf_prog_ops { ++ int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); ++}; ++ ++struct bpf_verifier_ops { ++ const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); ++ bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); ++ int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); ++ int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); ++ u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); ++}; ++ ++struct bpf_prog_offload { ++ struct bpf_prog *prog; ++ struct net_device *netdev; ++ void *dev_priv; ++ struct list_head offloads; ++ bool dev_state; ++ const struct bpf_prog_offload_ops *dev_ops; ++ void *jited_image; ++ u32 jited_len; ++}; ++ ++struct bpf_prog_aux { ++ atomic_t refcnt; ++ u32 used_map_cnt; ++ u32 max_ctx_offset; ++ u32 stack_depth; ++ u32 id; ++ u32 func_cnt; ++ bool offload_requested; ++ struct bpf_prog **func; ++ void *jit_data; ++ struct latch_tree_node ksym_tnode; ++ struct list_head ksym_lnode; ++ const struct bpf_prog_ops *ops; ++ struct bpf_map **used_maps; ++ struct bpf_prog *prog; ++ struct user_struct *user; ++ u64 load_time; ++ struct bpf_map *cgroup_storage; ++ char name[16]; ++ void *security; ++ struct bpf_prog_offload *offload; ++ union { ++ struct work_struct work; ++ struct callback_head rcu; ++ }; ++}; ++ ++struct bpf_cgroup_storage; ++ ++struct bpf_prog_array_item { ++ struct bpf_prog *prog; ++ struct bpf_cgroup_storage *cgroup_storage; ++}; ++ ++struct bpf_storage_buffer; ++ ++struct bpf_cgroup_storage_map; ++ ++struct bpf_cgroup_storage { ++ struct bpf_storage_buffer *buf; ++ struct bpf_cgroup_storage_map *map; ++ struct bpf_cgroup_storage_key key; ++ struct list_head list; ++ struct rb_node node; ++ struct callback_head rcu; ++}; ++ ++struct bpf_prog_array { ++ struct callback_head rcu; ++ struct bpf_prog_array_item items[0]; ++}; ++ ++struct bpf_storage_buffer { ++ struct callback_head rcu; ++ char data[0]; ++}; ++ ++struct bpf_binary_header { ++ u32 pages; ++ u8 image[0]; ++}; ++ ++struct jit_ctx { ++ const struct bpf_prog *prog; ++ int idx; ++ int epilogue_offset; ++ int *offset; ++ __le32 *image; ++ u32 stack_size; ++}; ++ ++struct arm64_jit_data { ++ struct bpf_binary_header *header; ++ u8 *image; ++ struct jit_ctx ctx; ++}; ++ ++typedef u64 hfn_t; ++ ++typedef hfn_t kvm_pfn_t; ++ ++struct gfn_to_hva_cache { ++ u64 generation; ++ gpa_t gpa; ++ long unsigned int hva; ++ long unsigned int len; ++ struct kvm_memory_slot *memslot; ++}; ++ ++struct trace_print_flags { ++ long unsigned int mask; ++ const char *name; ++}; ++ ++enum kobject_action { ++ KOBJ_ADD = 0, ++ KOBJ_REMOVE = 1, ++ KOBJ_CHANGE = 2, ++ KOBJ_MOVE = 3, ++ KOBJ_ONLINE = 4, ++ KOBJ_OFFLINE = 5, ++ KOBJ_BIND = 6, ++ KOBJ_UNBIND = 7, ++ KOBJ_MAX = 8, ++}; ++ ++struct swait_queue { ++ struct task_struct *task; ++ struct list_head task_list; ++}; ++ ++struct kvm_sregs {}; ++ ++struct kvm_fpu {}; ++ ++struct kvm_userspace_memory_region { ++ __u32 slot; ++ __u32 flags; ++ __u64 guest_phys_addr; ++ __u64 memory_size; ++ __u64 userspace_addr; ++}; ++ ++struct kvm_coalesced_mmio_zone { ++ __u64 addr; ++ __u32 size; ++ __u32 pad; ++}; ++ ++struct kvm_translation { ++ __u64 linear_address; ++ __u64 physical_address; ++ __u8 valid; ++ __u8 writeable; ++ __u8 usermode; ++ __u8 pad[5]; ++}; ++ ++struct kvm_dirty_log { ++ __u32 slot; ++ __u32 padding1; ++ union { ++ void *dirty_bitmap; ++ __u64 padding2; ++ }; ++}; ++ ++struct kvm_signal_mask { ++ __u32 len; ++ __u8 sigset[0]; ++}; ++ ++struct kvm_mp_state { ++ __u32 mp_state; ++}; ++ ++struct kvm_guest_debug { ++ __u32 control; ++ __u32 pad; ++ struct kvm_guest_debug_arch arch; ++}; ++ ++struct kvm_ioeventfd { ++ __u64 datamatch; ++ __u64 addr; ++ __u32 len; ++ __s32 fd; ++ __u32 flags; ++ __u8 pad[36]; ++}; ++ ++struct kvm_irq_routing_irqchip { ++ __u32 irqchip; ++ __u32 pin; ++}; ++ ++struct kvm_irq_routing_msi { ++ __u32 address_lo; ++ __u32 address_hi; ++ __u32 data; ++ union { ++ __u32 pad; ++ __u32 devid; ++ }; ++}; ++ ++struct kvm_irq_routing_s390_adapter { ++ __u64 ind_addr; ++ __u64 summary_addr; ++ __u64 ind_offset; ++ __u32 summary_offset; ++ __u32 adapter_id; ++}; ++ ++struct kvm_irq_routing_hv_sint { ++ __u32 vcpu; ++ __u32 sint; ++}; ++ ++struct kvm_irq_routing_entry { ++ __u32 gsi; ++ __u32 type; ++ __u32 flags; ++ __u32 pad; ++ union { ++ struct kvm_irq_routing_irqchip irqchip; ++ struct kvm_irq_routing_msi msi; ++ struct kvm_irq_routing_s390_adapter adapter; ++ struct kvm_irq_routing_hv_sint hv_sint; ++ __u32 pad[8]; ++ } u; ++}; ++ ++struct kvm_irq_routing { ++ __u32 nr; ++ __u32 flags; ++ struct kvm_irq_routing_entry entries[0]; ++}; ++ ++struct kvm_irqfd { ++ __u32 fd; ++ __u32 gsi; ++ __u32 flags; ++ __u32 resamplefd; ++ __u8 pad[16]; ++}; ++ ++struct kvm_msi { ++ __u32 address_lo; ++ __u32 address_hi; ++ __u32 data; ++ __u32 flags; ++ __u32 devid; ++ __u8 pad[12]; ++}; ++ ++struct kvm_create_device { ++ __u32 type; ++ __u32 fd; ++ __u32 flags; ++}; ++ ++enum kvm_device_type { ++ KVM_DEV_TYPE_FSL_MPIC_20 = 1, ++ KVM_DEV_TYPE_FSL_MPIC_42 = 2, ++ KVM_DEV_TYPE_XICS = 3, ++ KVM_DEV_TYPE_VFIO = 4, ++ KVM_DEV_TYPE_ARM_VGIC_V2 = 5, ++ KVM_DEV_TYPE_FLIC = 6, ++ KVM_DEV_TYPE_ARM_VGIC_V3 = 7, ++ KVM_DEV_TYPE_ARM_VGIC_ITS = 8, ++ KVM_DEV_TYPE_MAX = 9, ++}; ++ ++enum { ++ OUTSIDE_GUEST_MODE = 0, ++ IN_GUEST_MODE = 1, ++ EXITING_GUEST_MODE = 2, ++ READING_SHADOW_PAGE_TABLES = 3, ++}; ++ ++enum kvm_mr_change { ++ KVM_MR_CREATE = 0, ++ KVM_MR_DELETE = 1, ++ KVM_MR_MOVE = 2, ++ KVM_MR_FLAGS_ONLY = 3, ++}; ++ ++typedef int (*kvm_vm_thread_fn_t)(struct kvm *, uintptr_t); ++ ++struct miscdevice { ++ int minor; ++ const char *name; ++ const struct file_operations *fops; ++ struct list_head list; ++ struct device *parent; ++ struct device *this_device; ++ const struct attribute_group **groups; ++ const char *nodename; ++ umode_t mode; ++}; ++ ++struct syscore_ops { ++ struct list_head node; ++ int (*suspend)(); ++ void (*resume)(); ++ void (*shutdown)(); ++}; ++ ++struct trace_event_raw_kvm_userspace_exit { ++ struct trace_entry ent; ++ __u32 reason; ++ int errno; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_vcpu_wakeup { ++ struct trace_entry ent; ++ __u64 ns; ++ bool waited; ++ bool valid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_set_irq { ++ struct trace_entry ent; ++ unsigned int gsi; ++ int level; ++ int irq_source_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_ack_irq { ++ struct trace_entry ent; ++ unsigned int irqchip; ++ unsigned int pin; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_mmio { ++ struct trace_entry ent; ++ u32 type; ++ u32 len; ++ u64 gpa; ++ u64 val; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_fpu { ++ struct trace_entry ent; ++ u32 load; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_age_page { ++ struct trace_entry ent; ++ u64 hva; ++ u64 gfn; ++ u8 level; ++ u8 referenced; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_halt_poll_ns { ++ struct trace_entry ent; ++ bool grow; ++ unsigned int vcpu_id; ++ unsigned int new; ++ unsigned int old; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_kvm_userspace_exit {}; ++ ++struct trace_event_data_offsets_kvm_vcpu_wakeup {}; ++ ++struct trace_event_data_offsets_kvm_set_irq {}; ++ ++struct trace_event_data_offsets_kvm_ack_irq {}; ++ ++struct trace_event_data_offsets_kvm_mmio {}; ++ ++struct trace_event_data_offsets_kvm_fpu {}; ++ ++struct trace_event_data_offsets_kvm_age_page {}; ++ ++struct trace_event_data_offsets_kvm_halt_poll_ns {}; ++ ++struct kvm_vm_worker_thread_context { ++ struct kvm *kvm; ++ struct task_struct *parent; ++ struct completion init_done; ++ kvm_vm_thread_fn_t thread_fn; ++ uintptr_t data; ++ int err; ++}; ++ ++struct kvm_coalesced_mmio_dev { ++ struct list_head list; ++ struct kvm_io_device dev; ++ struct kvm *kvm; ++ struct kvm_coalesced_mmio_zone zone; ++}; ++ ++typedef struct wait_queue_entry wait_queue_entry_t; ++ ++struct irq_bypass_consumer; ++ ++struct irq_bypass_producer { ++ struct list_head node; ++ void *token; ++ int irq; ++ int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); ++ void (*del_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); ++ void (*stop)(struct irq_bypass_producer *); ++ void (*start)(struct irq_bypass_producer *); ++}; ++ ++struct irq_bypass_consumer { ++ struct list_head node; ++ void *token; ++ int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); ++ void (*del_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); ++ void (*stop)(struct irq_bypass_consumer *); ++ void (*start)(struct irq_bypass_consumer *); ++}; ++ ++enum { ++ kvm_ioeventfd_flag_nr_datamatch = 0, ++ kvm_ioeventfd_flag_nr_pio = 1, ++ kvm_ioeventfd_flag_nr_deassign = 2, ++ kvm_ioeventfd_flag_nr_virtio_ccw_notify = 3, ++ kvm_ioeventfd_flag_nr_fast_mmio = 4, ++ kvm_ioeventfd_flag_nr_max = 5, ++}; ++ ++struct kvm_s390_adapter_int { ++ u64 ind_addr; ++ u64 summary_addr; ++ u64 ind_offset; ++ u32 summary_offset; ++ u32 adapter_id; ++}; ++ ++struct kvm_hv_sint { ++ u32 vcpu; ++ u32 sint; ++}; ++ ++struct kvm_kernel_irq_routing_entry { ++ u32 gsi; ++ u32 type; ++ int (*set)(struct kvm_kernel_irq_routing_entry *, struct kvm *, int, int, bool); ++ union { ++ struct { ++ unsigned int irqchip; ++ unsigned int pin; ++ } irqchip; ++ struct { ++ u32 address_lo; ++ u32 address_hi; ++ u32 data; ++ u32 flags; ++ u32 devid; ++ } msi; ++ struct kvm_s390_adapter_int adapter; ++ struct kvm_hv_sint hv_sint; ++ }; ++ struct hlist_node link; ++}; ++ ++struct kvm_irq_ack_notifier { ++ struct hlist_node link; ++ unsigned int gsi; ++ void (*irq_acked)(struct kvm_irq_ack_notifier *); ++}; ++ ++typedef struct poll_table_struct poll_table; ++ ++struct kvm_kernel_irqfd_resampler { ++ struct kvm *kvm; ++ struct list_head list; ++ struct kvm_irq_ack_notifier notifier; ++ struct list_head link; ++}; ++ ++struct kvm_kernel_irqfd { ++ struct kvm *kvm; ++ wait_queue_entry_t wait; ++ struct kvm_kernel_irq_routing_entry irq_entry; ++ seqcount_t irq_entry_sc; ++ int gsi; ++ struct work_struct inject; ++ struct kvm_kernel_irqfd_resampler *resampler; ++ struct eventfd_ctx *resamplefd; ++ struct list_head resampler_link; ++ struct eventfd_ctx *eventfd; ++ struct list_head list; ++ poll_table pt; ++ struct work_struct shutdown; ++ struct irq_bypass_consumer consumer; ++ struct irq_bypass_producer *producer; ++}; ++ ++struct fd { ++ struct file *file; ++ unsigned int flags; ++}; ++ ++struct _ioeventfd { ++ struct list_head list; ++ u64 addr; ++ int length; ++ struct eventfd_ctx *eventfd; ++ u64 datamatch; ++ struct kvm_io_device dev; ++ u8 bus_idx; ++ bool wildcard; ++}; ++ ++struct vfio_group; ++ ++struct kvm_vfio_group { ++ struct list_head node; ++ struct vfio_group *vfio_group; ++}; ++ ++struct kvm_vfio { ++ struct list_head group_list; ++ struct mutex lock; ++ bool noncoherent; ++}; ++ ++struct kvm_vcpu_init { ++ __u32 target; ++ __u32 features[7]; ++}; ++ ++struct kvm_vcpu_events { ++ struct { ++ __u8 serror_pending; ++ __u8 serror_has_esr; ++ __u8 pad[6]; ++ __u64 serror_esr; ++ } exception; ++ __u32 reserved[12]; ++}; ++ ++struct kvm_reg_list { ++ __u64 n; ++ __u64 reg[0]; ++}; ++ ++struct kvm_one_reg { ++ __u64 id; ++ __u64 addr; ++}; ++ ++struct kvm_arm_device_addr { ++ __u64 id; ++ __u64 addr; ++}; ++ ++struct trace_event_raw_kvm_entry { ++ struct trace_entry ent; ++ unsigned int vcpu_id; ++ long unsigned int vcpu_pc; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_exit { ++ struct trace_entry ent; ++ int ret; ++ unsigned int esr_ec; ++ long unsigned int vcpu_pc; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_guest_fault { ++ struct trace_entry ent; ++ long unsigned int vcpu_pc; ++ long unsigned int hsr; ++ long unsigned int hxfar; ++ long long unsigned int ipa; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_access_fault { ++ struct trace_entry ent; ++ long unsigned int ipa; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_irq_line { ++ struct trace_entry ent; ++ unsigned int type; ++ int vcpu_idx; ++ int irq_num; ++ int level; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_mmio_emulate { ++ struct trace_entry ent; ++ long unsigned int vcpu_pc; ++ long unsigned int instr; ++ long unsigned int cpsr; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_unmap_hva_range { ++ struct trace_entry ent; ++ long unsigned int start; ++ long unsigned int end; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_set_spte_hva { ++ struct trace_entry ent; ++ long unsigned int hva; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_age_hva { ++ struct trace_entry ent; ++ long unsigned int start; ++ long unsigned int end; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_test_age_hva { ++ struct trace_entry ent; ++ long unsigned int hva; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_set_way_flush { ++ struct trace_entry ent; ++ long unsigned int vcpu_pc; ++ bool cache; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_toggle_cache { ++ struct trace_entry ent; ++ long unsigned int vcpu_pc; ++ bool was; ++ bool now; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_timer_update_irq { ++ struct trace_entry ent; ++ long unsigned int vcpu_id; ++ __u32 irq; ++ int level; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_kvm_entry {}; ++ ++struct trace_event_data_offsets_kvm_exit {}; ++ ++struct trace_event_data_offsets_kvm_guest_fault {}; ++ ++struct trace_event_data_offsets_kvm_access_fault {}; ++ ++struct trace_event_data_offsets_kvm_irq_line {}; ++ ++struct trace_event_data_offsets_kvm_mmio_emulate {}; ++ ++struct trace_event_data_offsets_kvm_unmap_hva_range {}; ++ ++struct trace_event_data_offsets_kvm_set_spte_hva {}; ++ ++struct trace_event_data_offsets_kvm_age_hva {}; ++ ++struct trace_event_data_offsets_kvm_test_age_hva {}; ++ ++struct trace_event_data_offsets_kvm_set_way_flush {}; ++ ++struct trace_event_data_offsets_kvm_toggle_cache {}; ++ ++struct trace_event_data_offsets_kvm_timer_update_irq {}; ++ ++typedef long unsigned int hva_t; ++ ++enum exception_type { ++ except_type_sync = 0, ++ except_type_irq = 128, ++ except_type_fiq = 256, ++ except_type_serror = 384, ++}; ++ ++struct sys_reg_params; ++ ++struct sys_reg_desc { ++ const char *name; ++ u8 Op0; ++ u8 Op1; ++ u8 CRn; ++ u8 CRm; ++ u8 Op2; ++ bool (*access)(struct kvm_vcpu *, struct sys_reg_params *, const struct sys_reg_desc *); ++ void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *); ++ int reg; ++ u64 val; ++ int (*__get_user)(struct kvm_vcpu *, const struct sys_reg_desc *, const struct kvm_one_reg *, void *); ++ int (*set_user)(struct kvm_vcpu *, const struct sys_reg_desc *, const struct kvm_one_reg *, void *); ++}; ++ ++struct sys_reg_params { ++ u8 Op0; ++ u8 Op1; ++ u8 CRn; ++ u8 CRm; ++ u8 Op2; ++ u64 regval; ++ bool is_write; ++ bool is_aarch32; ++ bool is_32bit; ++}; ++ ++struct trace_event_raw_kvm_trap_enter { ++ struct trace_entry ent; ++ unsigned int vcpu_id; ++ unsigned int esr_ec; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_trap_exit { ++ struct trace_entry ent; ++ unsigned int vcpu_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_wfx_arm64 { ++ struct trace_entry ent; ++ long unsigned int vcpu_pc; ++ bool is_wfe; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_hvc_arm64 { ++ struct trace_entry ent; ++ long unsigned int vcpu_pc; ++ long unsigned int r0; ++ long unsigned int imm; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_arm_setup_debug { ++ struct trace_entry ent; ++ struct kvm_vcpu *vcpu; ++ __u32 guest_debug; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_arm_clear_debug { ++ struct trace_entry ent; ++ __u32 guest_debug; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_arm_set_dreg32 { ++ struct trace_entry ent; ++ const char *name; ++ __u32 value; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_arm_set_regset { ++ struct trace_entry ent; ++ const char *name; ++ int len; ++ u64 ctrls[16]; ++ u64 values[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_trap_reg { ++ struct trace_entry ent; ++ const char *fn; ++ int reg; ++ bool is_write; ++ u64 write_value; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_handle_sys_reg { ++ struct trace_entry ent; ++ long unsigned int hsr; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_sys_access { ++ struct trace_entry ent; ++ long unsigned int vcpu_pc; ++ bool is_write; ++ const char *name; ++ u8 Op0; ++ u8 Op1; ++ u8 CRn; ++ u8 CRm; ++ u8 Op2; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kvm_set_guest_debug { ++ struct trace_entry ent; ++ struct kvm_vcpu *vcpu; ++ __u32 guest_debug; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_kvm_trap_enter {}; ++ ++struct trace_event_data_offsets_kvm_trap_exit {}; ++ ++struct trace_event_data_offsets_kvm_wfx_arm64 {}; ++ ++struct trace_event_data_offsets_kvm_hvc_arm64 {}; ++ ++struct trace_event_data_offsets_kvm_arm_setup_debug {}; ++ ++struct trace_event_data_offsets_kvm_arm_clear_debug {}; ++ ++struct trace_event_data_offsets_kvm_arm_set_dreg32 {}; ++ ++struct trace_event_data_offsets_kvm_arm_set_regset {}; ++ ++struct trace_event_data_offsets_trap_reg {}; ++ ++struct trace_event_data_offsets_kvm_handle_sys_reg {}; ++ ++struct trace_event_data_offsets_kvm_sys_access {}; ++ ++struct trace_event_data_offsets_kvm_set_guest_debug {}; ++ ++typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); ++ ++struct kvm_sys_reg_table { ++ const struct sys_reg_desc *table; ++ size_t num; ++}; ++ ++struct kvm_sys_reg_target_table { ++ struct kvm_sys_reg_table table64; ++ struct kvm_sys_reg_table table32; ++}; ++ ++struct vgic_vmcr { ++ u32 grpen0; ++ u32 grpen1; ++ u32 ackctl; ++ u32 fiqen; ++ u32 cbpr; ++ u32 eoim; ++ u32 abpr; ++ u32 bpr; ++ u32 pmr; ++}; ++ ++struct trace_event_raw_vgic_update_irq_pending { ++ struct trace_entry ent; ++ long unsigned int vcpu_id; ++ __u32 irq; ++ bool level; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_vgic_update_irq_pending {}; ++ ++enum gic_type { ++ GIC_V2 = 0, ++ GIC_V3 = 1, ++}; ++ ++struct gic_kvm_info { ++ enum gic_type type; ++ struct resource vcpu; ++ unsigned int maint_irq; ++ struct resource vctrl; ++ bool has_v4; ++}; ++ ++enum { ++ IRQ_TYPE_NONE = 0, ++ IRQ_TYPE_EDGE_RISING = 1, ++ IRQ_TYPE_EDGE_FALLING = 2, ++ IRQ_TYPE_EDGE_BOTH = 3, ++ IRQ_TYPE_LEVEL_HIGH = 4, ++ IRQ_TYPE_LEVEL_LOW = 8, ++ IRQ_TYPE_LEVEL_MASK = 12, ++ IRQ_TYPE_SENSE_MASK = 15, ++ IRQ_TYPE_DEFAULT = 15, ++ IRQ_TYPE_PROBE = 16, ++ IRQ_LEVEL = 256, ++ IRQ_PER_CPU = 512, ++ IRQ_NOPROBE = 1024, ++ IRQ_NOREQUEST = 2048, ++ IRQ_NOAUTOEN = 4096, ++ IRQ_NO_BALANCING = 8192, ++ IRQ_MOVE_PCNTXT = 16384, ++ IRQ_NESTED_THREAD = 32768, ++ IRQ_NOTHREAD = 65536, ++ IRQ_PER_CPU_DEVID = 131072, ++ IRQ_IS_POLLED = 262144, ++ IRQ_DISABLE_UNLAZY = 524288, ++}; ++ ++struct its_vlpi_map { ++ struct its_vm *vm; ++ struct its_vpe *vpe; ++ u32 vintid; ++ u8 properties; ++ bool db_enabled; ++}; ++ ++struct vgic_reg_attr { ++ struct kvm_vcpu *vcpu; ++ gpa_t addr; ++}; ++ ++struct its_device { ++ struct list_head dev_list; ++ struct list_head itt_head; ++ u32 num_eventid_bits; ++ gpa_t itt_addr; ++ u32 device_id; ++}; ++ ++struct its_collection { ++ struct list_head coll_list; ++ u32 collection_id; ++ u32 target_addr; ++}; ++ ++struct its_ite { ++ struct list_head ite_list; ++ struct vgic_irq *irq; ++ struct its_collection *collection; ++ u32 event_id; ++}; ++ ++struct vgic_translation_cache_entry { ++ struct list_head entry; ++ phys_addr_t db; ++ u32 devid; ++ u32 eventid; ++ struct vgic_irq *irq; ++}; ++ ++struct vgic_its_abi { ++ int cte_esz; ++ int dte_esz; ++ int ite_esz; ++ int (*save_tables)(struct vgic_its *); ++ int (*restore_tables)(struct vgic_its *); ++ int (*commit)(struct vgic_its *); ++}; ++ ++typedef int (*entry_fn_t)(struct vgic_its *, u32, void *, void *); ++ ++struct vgic_state_iter { ++ int nr_cpus; ++ int nr_spis; ++ int nr_lpis; ++ int dist_id; ++ int vcpu_id; ++ int intid; ++ int lpi_idx; ++ u32 *lpi_array; ++}; ++ ++struct cyclecounter { ++ u64 (*read)(const struct cyclecounter *); ++ u64 mask; ++ u32 mult; ++ u32 shift; ++}; ++ ++struct timecounter { ++ const struct cyclecounter *cc; ++ u64 cycle_last; ++ u64 nsec; ++ u64 mask; ++ u64 frac; ++}; ++ ++struct arch_timer_kvm_info { ++ struct timecounter timecounter; ++ int virtual_irq; ++}; ++ ++enum hrtimer_mode { ++ HRTIMER_MODE_ABS = 0, ++ HRTIMER_MODE_REL = 1, ++ HRTIMER_MODE_PINNED = 2, ++ HRTIMER_MODE_SOFT = 4, ++ HRTIMER_MODE_ABS_PINNED = 2, ++ HRTIMER_MODE_REL_PINNED = 3, ++ HRTIMER_MODE_ABS_SOFT = 4, ++ HRTIMER_MODE_REL_SOFT = 5, ++ HRTIMER_MODE_ABS_PINNED_SOFT = 6, ++ HRTIMER_MODE_REL_PINNED_SOFT = 7, ++}; ++ ++struct crypto_async_request; ++ ++typedef void (*crypto_completion_t)(struct crypto_async_request *, int); ++ ++struct crypto_tfm; ++ ++struct crypto_async_request { ++ struct list_head list; ++ crypto_completion_t complete; ++ void *data; ++ struct crypto_tfm *tfm; ++ u32 flags; ++}; ++ ++struct crypto_ablkcipher; ++ ++struct ablkcipher_request; ++ ++struct ablkcipher_tfm { ++ int (*setkey)(struct crypto_ablkcipher *, const u8 *, unsigned int); ++ int (*encrypt)(struct ablkcipher_request *); ++ int (*decrypt)(struct ablkcipher_request *); ++ struct crypto_ablkcipher *base; ++ unsigned int ivsize; ++ unsigned int reqsize; ++}; ++ ++struct blkcipher_desc; ++ ++struct blkcipher_tfm { ++ void *iv; ++ int (*setkey)(struct crypto_tfm *, const u8 *, unsigned int); ++ int (*encrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); ++ int (*decrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); ++}; ++ ++struct cipher_tfm { ++ int (*cit_setkey)(struct crypto_tfm *, const u8 *, unsigned int); ++ void (*cit_encrypt_one)(struct crypto_tfm *, u8 *, const u8 *); ++ void (*cit_decrypt_one)(struct crypto_tfm *, u8 *, const u8 *); ++}; ++ ++struct compress_tfm { ++ int (*cot_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); ++ int (*cot_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); ++}; ++ ++struct crypto_alg; ++ ++struct crypto_tfm { ++ u32 crt_flags; ++ union { ++ struct ablkcipher_tfm ablkcipher; ++ struct blkcipher_tfm blkcipher; ++ struct cipher_tfm cipher; ++ struct compress_tfm compress; ++ } crt_u; ++ void (*exit)(struct crypto_tfm *); ++ struct crypto_alg *__crt_alg; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__crt_ctx[0]; ++}; ++ ++struct ablkcipher_request { ++ struct crypto_async_request base; ++ unsigned int nbytes; ++ void *info; ++ struct scatterlist *src; ++ struct scatterlist *dst; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_blkcipher; ++ ++struct blkcipher_desc { ++ struct crypto_blkcipher *tfm; ++ void *info; ++ u32 flags; ++}; ++ ++struct crypto_blkcipher { ++ struct crypto_tfm base; ++}; ++ ++struct skcipher_givcrypt_request; ++ ++struct ablkcipher_alg { ++ int (*setkey)(struct crypto_ablkcipher *, const u8 *, unsigned int); ++ int (*encrypt)(struct ablkcipher_request *); ++ int (*decrypt)(struct ablkcipher_request *); ++ int (*givencrypt)(struct skcipher_givcrypt_request *); ++ int (*givdecrypt)(struct skcipher_givcrypt_request *); ++ const char *geniv; ++ unsigned int min_keysize; ++ unsigned int max_keysize; ++ unsigned int ivsize; ++}; ++ ++struct crypto_ablkcipher { ++ struct crypto_tfm base; ++}; ++ ++struct blkcipher_alg { ++ int (*setkey)(struct crypto_tfm *, const u8 *, unsigned int); ++ int (*encrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); ++ int (*decrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int); ++ const char *geniv; ++ unsigned int min_keysize; ++ unsigned int max_keysize; ++ unsigned int ivsize; ++}; ++ ++struct cipher_alg { ++ unsigned int cia_min_keysize; ++ unsigned int cia_max_keysize; ++ int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); ++ void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); ++ void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); ++}; ++ ++struct compress_alg { ++ int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); ++ int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); ++}; ++ ++struct crypto_type; ++ ++struct crypto_alg { ++ struct list_head cra_list; ++ struct list_head cra_users; ++ u32 cra_flags; ++ unsigned int cra_blocksize; ++ unsigned int cra_ctxsize; ++ unsigned int cra_alignmask; ++ int cra_priority; ++ refcount_t cra_refcnt; ++ char cra_name[128]; ++ char cra_driver_name[128]; ++ const struct crypto_type *cra_type; ++ union { ++ struct ablkcipher_alg ablkcipher; ++ struct blkcipher_alg blkcipher; ++ struct cipher_alg cipher; ++ struct compress_alg compress; ++ } cra_u; ++ int (*cra_init)(struct crypto_tfm *); ++ void (*cra_exit)(struct crypto_tfm *); ++ void (*cra_destroy)(struct crypto_alg *); ++ struct module *cra_module; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct sk_buff___2; ++ ++struct crypto_instance; ++ ++struct crypto_type { ++ unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); ++ unsigned int (*extsize)(struct crypto_alg *); ++ int (*init)(struct crypto_tfm *, u32, u32); ++ int (*init_tfm)(struct crypto_tfm *); ++ void (*show)(struct seq_file *, struct crypto_alg *); ++ int (*report)(struct sk_buff___2 *, struct crypto_alg *); ++ void (*free)(struct crypto_instance *); ++ unsigned int type; ++ unsigned int maskclear; ++ unsigned int maskset; ++ unsigned int tfmsize; ++}; ++ ++struct crypto_aes_ctx { ++ u32 key_enc[60]; ++ u32 key_dec[60]; ++ u32 key_length; ++}; ++ ++typedef struct { ++ u32 revision; ++ void *parent_handle; ++ efi_system_table_t *system_table; ++ void *device_handle; ++ void *file_path; ++ void *reserved; ++ u32 load_options_size; ++ void *load_options; ++ void *image_base; ++ __u64 image_size; ++ unsigned int image_code_type; ++ unsigned int image_data_type; ++ long unsigned int unload; ++} efi_loaded_image_t; ++ ++struct efi_simple_text_output_protocol { ++ void *reset; ++ efi_status_t (*output_string)(void *, void *); ++ void *test_string; ++}; ++ ++enum efi_secureboot_mode { ++ efi_secureboot_mode_unset = 0, ++ efi_secureboot_mode_unknown = 1, ++ efi_secureboot_mode_disabled = 2, ++ efi_secureboot_mode_enabled = 3, ++}; ++ ++struct linux_efi_memreserve { ++ int size; ++ atomic_t count; ++ phys_addr_t next; ++ struct { ++ phys_addr_t base; ++ phys_addr_t size; ++ } entry[0]; ++}; ++ ++struct efi_boot_memmap { ++ efi_memory_desc_t **map; ++ long unsigned int *map_size; ++ long unsigned int *desc_size; ++ u32 *desc_ver; ++ long unsigned int *key_ptr; ++ long unsigned int *buff_size; ++}; ++ ++typedef struct { ++ u64 size; ++ u64 file_size; ++ u64 phys_size; ++ efi_time_t create_time; ++ efi_time_t last_access_time; ++ efi_time_t modification_time; ++ __u64 attribute; ++ efi_char16_t filename[1]; ++} efi_file_info_t; ++ ++struct _efi_file_handle { ++ u64 revision; ++ efi_status_t (*open)(struct _efi_file_handle *, struct _efi_file_handle **, efi_char16_t *, u64, u64); ++ efi_status_t (*close)(struct _efi_file_handle *); ++ void *delete; ++ efi_status_t (*read)(struct _efi_file_handle *, long unsigned int *, void *); ++ void *write; ++ void *get_position; ++ void *set_position; ++ efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *, long unsigned int *, void *); ++ void *set_info; ++ void *flush; ++}; ++ ++typedef struct _efi_file_handle efi_file_handle_t; ++ ++struct _efi_file_io_interface { ++ u64 revision; ++ int (*open_volume)(struct _efi_file_io_interface *, efi_file_handle_t **); ++}; ++ ++typedef struct _efi_file_io_interface efi_file_io_interface_t; ++ ++typedef efi_status_t (*efi_exit_boot_map_processing)(efi_system_table_t *, struct efi_boot_memmap *, void *); ++ ++struct file_info { ++ efi_file_handle_t *handle; ++ u64 size; ++}; ++ ++typedef struct { ++ efi_guid_t guid; ++ long unsigned int table; ++} efi_config_table_t; ++ ++struct exit_boot_struct { ++ efi_memory_desc_t *runtime_map; ++ int *runtime_entry_count; ++ void *new_fdt_addr; ++}; ++ ++struct efi_pixel_bitmask { ++ u32 red_mask; ++ u32 green_mask; ++ u32 blue_mask; ++ u32 reserved_mask; ++}; ++ ++struct efi_graphics_output_mode_info { ++ u32 version; ++ u32 horizontal_resolution; ++ u32 vertical_resolution; ++ int pixel_format; ++ struct efi_pixel_bitmask pixel_information; ++ u32 pixels_per_scan_line; ++}; ++ ++struct efi_graphics_output_protocol_mode_64 { ++ u32 max_mode; ++ u32 mode; ++ u64 info; ++ u64 size_of_info; ++ u64 frame_buffer_base; ++ u64 frame_buffer_size; ++}; ++ ++struct efi_graphics_output_protocol_64 { ++ u64 query_mode; ++ u64 set_mode; ++ u64 blt; ++ u64 mode; ++}; ++ ++struct fdt_reserve_entry { ++ fdt64_t address; ++ fdt64_t size; ++}; ++ ++struct fdt_node_header { ++ fdt32_t tag; ++ char name[0]; ++}; ++ ++struct fdt_property { ++ fdt32_t tag; ++ fdt32_t len; ++ fdt32_t nameoff; ++ char data[0]; ++}; ++ ++struct linux_efi_random_seed { ++ u32 size; ++ u8 bits[0]; ++}; ++ ++struct efi_rng_protocol { ++ efi_status_t (*get_info)(struct efi_rng_protocol *, long unsigned int *, efi_guid_t *); ++ efi_status_t (*get_rng)(struct efi_rng_protocol *, efi_guid_t *, long unsigned int, u8 *); ++}; ++ ++typedef u32 efi_tcg2_event_log_format; ++ ++typedef struct { ++ void *get_capability; ++ efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); ++ void *hash_log_extend_event; ++ void *submit_command; ++ void *get_active_pcr_banks; ++ void *set_active_pcr_banks; ++ void *get_result_of_set_active_pcr_banks; ++} efi_tcg2_protocol_t; ++ ++struct linux_efi_tpm_eventlog { ++ u32 size; ++ u8 version; ++ u8 log[0]; ++}; ++ ++struct tcpa_event { ++ u32 pcr_index; ++ u32 event_type; ++ u8 pcr_value[20]; ++ u32 event_size; ++ u8 event_data[0]; ++}; ++ ++enum tk_offsets { ++ TK_OFFS_REAL = 0, ++ TK_OFFS_BOOT = 1, ++ TK_OFFS_TAI = 2, ++ TK_OFFS_MAX = 3, ++}; ++ ++struct fdtable { ++ unsigned int max_fds; ++ struct file **fd; ++ long unsigned int *close_on_exec; ++ long unsigned int *open_fds; ++ long unsigned int *full_fds_bits; ++ struct callback_head rcu; ++}; ++ ++struct files_cgroup; ++ ++struct files_struct { ++ atomic_t count; ++ bool resize_in_progress; ++ wait_queue_head_t resize_wait; ++ struct fdtable *fdt; ++ struct fdtable fdtab; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t file_lock; ++ unsigned int next_fd; ++ long unsigned int close_on_exec_init[1]; ++ long unsigned int open_fds_init[1]; ++ long unsigned int full_fds_bits_init[1]; ++ struct file *fd_array[64]; ++ struct files_cgroup *files_cgroup; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct robust_list { ++ struct robust_list *next; ++}; ++ ++struct robust_list_head { ++ struct robust_list list; ++ long int futex_offset; ++ struct robust_list *list_op_pending; ++}; ++ ++struct multiprocess_signals { ++ sigset_t signal; ++ struct hlist_node node; ++}; ++ ++typedef int (*proc_visitor)(struct task_struct *, void *); ++ ++enum { ++ IOPRIO_CLASS_NONE = 0, ++ IOPRIO_CLASS_RT = 1, ++ IOPRIO_CLASS_BE = 2, ++ IOPRIO_CLASS_IDLE = 3, ++}; ++ ++struct trace_event_raw_task_newtask { ++ struct trace_entry ent; ++ pid_t pid; ++ char comm[16]; ++ long unsigned int clone_flags; ++ short int oom_score_adj; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_task_rename { ++ struct trace_entry ent; ++ pid_t pid; ++ char oldcomm[16]; ++ char newcomm[16]; ++ short int oom_score_adj; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_task_newtask {}; ++ ++struct trace_event_data_offsets_task_rename {}; ++ ++enum kmsg_dump_reason { ++ KMSG_DUMP_UNDEF = 0, ++ KMSG_DUMP_PANIC = 1, ++ KMSG_DUMP_OOPS = 2, ++ KMSG_DUMP_EMERG = 3, ++ KMSG_DUMP_RESTART = 4, ++ KMSG_DUMP_HALT = 5, ++ KMSG_DUMP_POWEROFF = 6, ++}; ++ ++struct vt_mode { ++ char mode; ++ char waitv; ++ short int relsig; ++ short int acqsig; ++ short int frsig; ++}; ++ ++struct console_font { ++ unsigned int width; ++ unsigned int height; ++ unsigned int charcount; ++ unsigned char *data; ++}; ++ ++struct uni_pagedir; ++ ++struct uni_screen; ++ ++struct vc_data { ++ struct tty_port port; ++ short unsigned int vc_num; ++ unsigned int vc_cols; ++ unsigned int vc_rows; ++ unsigned int vc_size_row; ++ unsigned int vc_scan_lines; ++ long unsigned int vc_origin; ++ long unsigned int vc_scr_end; ++ long unsigned int vc_visible_origin; ++ unsigned int vc_top; ++ unsigned int vc_bottom; ++ const struct consw *vc_sw; ++ short unsigned int *vc_screenbuf; ++ unsigned int vc_screenbuf_size; ++ unsigned char vc_mode; ++ unsigned char vc_attr; ++ unsigned char vc_def_color; ++ unsigned char vc_color; ++ unsigned char vc_s_color; ++ unsigned char vc_ulcolor; ++ unsigned char vc_itcolor; ++ unsigned char vc_halfcolor; ++ unsigned int vc_cursor_type; ++ short unsigned int vc_complement_mask; ++ short unsigned int vc_s_complement_mask; ++ unsigned int vc_x; ++ unsigned int vc_y; ++ unsigned int vc_saved_x; ++ unsigned int vc_saved_y; ++ long unsigned int vc_pos; ++ short unsigned int vc_hi_font_mask; ++ struct console_font vc_font; ++ short unsigned int vc_video_erase_char; ++ unsigned int vc_state; ++ unsigned int vc_npar; ++ unsigned int vc_par[16]; ++ struct vt_mode vt_mode; ++ struct pid *vt_pid; ++ int vt_newvt; ++ wait_queue_head_t paste_wait; ++ unsigned int vc_charset: 1; ++ unsigned int vc_s_charset: 1; ++ unsigned int vc_disp_ctrl: 1; ++ unsigned int vc_toggle_meta: 1; ++ unsigned int vc_decscnm: 1; ++ unsigned int vc_decom: 1; ++ unsigned int vc_decawm: 1; ++ unsigned int vc_deccm: 1; ++ unsigned int vc_decim: 1; ++ unsigned int vc_intensity: 2; ++ unsigned int vc_italic: 1; ++ unsigned int vc_underline: 1; ++ unsigned int vc_blink: 1; ++ unsigned int vc_reverse: 1; ++ unsigned int vc_s_intensity: 2; ++ unsigned int vc_s_italic: 1; ++ unsigned int vc_s_underline: 1; ++ unsigned int vc_s_blink: 1; ++ unsigned int vc_s_reverse: 1; ++ unsigned int vc_ques: 1; ++ unsigned int vc_need_wrap: 1; ++ unsigned int vc_can_do_color: 1; ++ unsigned int vc_report_mouse: 2; ++ unsigned char vc_utf: 1; ++ unsigned char vc_utf_count; ++ int vc_utf_char; ++ unsigned int vc_tab_stop[8]; ++ unsigned char vc_palette[48]; ++ short unsigned int *vc_translate; ++ unsigned char vc_G0_charset; ++ unsigned char vc_G1_charset; ++ unsigned char vc_saved_G0; ++ unsigned char vc_saved_G1; ++ unsigned int vc_resize_user; ++ unsigned int vc_bell_pitch; ++ unsigned int vc_bell_duration; ++ short unsigned int vc_cur_blink_ms; ++ struct vc_data **vc_display_fg; ++ struct uni_pagedir *vc_uni_pagedir; ++ struct uni_pagedir **vc_uni_pagedir_loc; ++ struct uni_screen *vc_uni_screen; ++ bool vc_panic_force_write; ++}; ++ ++struct vc { ++ struct vc_data *d; ++ struct work_struct SAK_work; ++}; ++ ++struct vt_spawn_console { ++ spinlock_t lock; ++ struct pid *pid; ++ int sig; ++}; ++ ++struct warn_args { ++ const char *fmt; ++ va_list args; ++}; ++ ++struct smp_hotplug_thread { ++ struct task_struct **store; ++ struct list_head list; ++ int (*thread_should_run)(unsigned int); ++ void (*thread_fn)(unsigned int); ++ void (*create)(unsigned int); ++ void (*setup)(unsigned int); ++ void (*cleanup)(unsigned int, bool); ++ void (*park)(unsigned int); ++ void (*unpark)(unsigned int); ++ bool selfparking; ++ const char *thread_comm; ++}; ++ ++struct trace_event_raw_cpuhp_enter { ++ struct trace_entry ent; ++ unsigned int cpu; ++ int target; ++ int idx; ++ void *fun; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_cpuhp_multi_enter { ++ struct trace_entry ent; ++ unsigned int cpu; ++ int target; ++ int idx; ++ void *fun; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_cpuhp_exit { ++ struct trace_entry ent; ++ unsigned int cpu; ++ int state; ++ int idx; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_cpuhp_enter {}; ++ ++struct trace_event_data_offsets_cpuhp_multi_enter {}; ++ ++struct trace_event_data_offsets_cpuhp_exit {}; ++ ++struct cpuhp_cpu_state { ++ enum cpuhp_state state; ++ enum cpuhp_state target; ++ enum cpuhp_state fail; ++ struct task_struct *thread; ++ bool should_run; ++ bool rollback; ++ bool single; ++ bool bringup; ++ bool booted_once; ++ struct hlist_node *node; ++ struct hlist_node *last; ++ enum cpuhp_state cb_state; ++ int result; ++ struct completion done_up; ++ struct completion done_down; ++}; ++ ++struct cpuhp_step { ++ const char *name; ++ union { ++ int (*single)(unsigned int); ++ int (*multi)(unsigned int, struct hlist_node *); ++ } startup; ++ union { ++ int (*single)(unsigned int); ++ int (*multi)(unsigned int, struct hlist_node *); ++ } teardown; ++ struct hlist_head list; ++ bool cant_stop; ++ bool multi_instance; ++}; ++ ++enum cpu_mitigations { ++ CPU_MITIGATIONS_OFF = 0, ++ CPU_MITIGATIONS_AUTO = 1, ++ CPU_MITIGATIONS_AUTO_NOSMT = 2, ++}; ++ ++struct rusage { ++ struct timeval ru_utime; ++ struct timeval ru_stime; ++ __kernel_long_t ru_maxrss; ++ __kernel_long_t ru_ixrss; ++ __kernel_long_t ru_idrss; ++ __kernel_long_t ru_isrss; ++ __kernel_long_t ru_minflt; ++ __kernel_long_t ru_majflt; ++ __kernel_long_t ru_nswap; ++ __kernel_long_t ru_inblock; ++ __kernel_long_t ru_oublock; ++ __kernel_long_t ru_msgsnd; ++ __kernel_long_t ru_msgrcv; ++ __kernel_long_t ru_nsignals; ++ __kernel_long_t ru_nvcsw; ++ __kernel_long_t ru_nivcsw; ++}; ++ ++typedef u32 compat_uint_t; ++ ++struct compat_rusage { ++ struct compat_timeval ru_utime; ++ struct compat_timeval ru_stime; ++ compat_long_t ru_maxrss; ++ compat_long_t ru_ixrss; ++ compat_long_t ru_idrss; ++ compat_long_t ru_isrss; ++ compat_long_t ru_minflt; ++ compat_long_t ru_majflt; ++ compat_long_t ru_nswap; ++ compat_long_t ru_inblock; ++ compat_long_t ru_oublock; ++ compat_long_t ru_msgsnd; ++ compat_long_t ru_msgrcv; ++ compat_long_t ru_nsignals; ++ compat_long_t ru_nvcsw; ++ compat_long_t ru_nivcsw; ++}; ++ ++struct waitid_info { ++ pid_t pid; ++ uid_t uid; ++ int status; ++ int cause; ++}; ++ ++struct wait_opts { ++ enum pid_type wo_type; ++ int wo_flags; ++ struct pid *wo_pid; ++ struct waitid_info *wo_info; ++ int wo_stat; ++ struct rusage *wo_rusage; ++ wait_queue_entry_t child_wait; ++ int notask_error; ++}; ++ ++struct softirq_action { ++ void (*action)(struct softirq_action *); ++}; ++ ++struct tasklet_struct { ++ struct tasklet_struct *next; ++ long unsigned int state; ++ atomic_t count; ++ void (*func)(long unsigned int); ++ long unsigned int data; ++}; ++ ++enum { ++ TASKLET_STATE_SCHED = 0, ++ TASKLET_STATE_RUN = 1, ++}; ++ ++struct tasklet_hrtimer { ++ struct hrtimer timer; ++ struct tasklet_struct tasklet; ++ enum hrtimer_restart (*function)(struct hrtimer *); ++}; ++ ++struct trace_event_raw_irq_handler_entry { ++ struct trace_entry ent; ++ int irq; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_irq_handler_exit { ++ struct trace_entry ent; ++ int irq; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_softirq { ++ struct trace_entry ent; ++ unsigned int vec; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_irq_handler_entry { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_irq_handler_exit {}; ++ ++struct trace_event_data_offsets_softirq {}; ++ ++struct tasklet_head { ++ struct tasklet_struct *head; ++ struct tasklet_struct **tail; ++}; ++ ++enum { ++ IORES_DESC_NONE = 0, ++ IORES_DESC_CRASH_KERNEL = 1, ++ IORES_DESC_ACPI_TABLES = 2, ++ IORES_DESC_ACPI_NV_STORAGE = 3, ++ IORES_DESC_PERSISTENT_MEMORY = 4, ++ IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, ++ IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, ++ IORES_DESC_DEVICE_PUBLIC_MEMORY = 7, ++}; ++ ++typedef void (*dr_release_t)(struct device *, void *); ++ ++enum { ++ REGION_INTERSECTS = 0, ++ REGION_DISJOINT = 1, ++ REGION_MIXED = 2, ++}; ++ ++struct resource_constraint { ++ resource_size_t min; ++ resource_size_t max; ++ resource_size_t align; ++ resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t); ++ void *alignf_data; ++}; ++ ++enum { ++ MAX_IORES_LEVEL = 5, ++}; ++ ++struct region_devres { ++ struct resource *parent; ++ resource_size_t start; ++ resource_size_t n; ++}; ++ ++enum sysctl_writes_mode { ++ SYSCTL_WRITES_LEGACY = 4294967295, ++ SYSCTL_WRITES_WARN = 0, ++ SYSCTL_WRITES_STRICT = 1, ++}; ++ ++struct do_proc_dointvec_minmax_conv_param { ++ int *min; ++ int *max; ++}; ++ ++struct do_proc_douintvec_minmax_conv_param { ++ unsigned int *min; ++ unsigned int *max; ++}; ++ ++struct __sysctl_args { ++ int *name; ++ int nlen; ++ void *oldval; ++ size_t *oldlenp; ++ void *newval; ++ size_t newlen; ++ long unsigned int __unused[4]; ++}; ++ ++enum { ++ CTL_KERN = 1, ++ CTL_VM = 2, ++ CTL_NET = 3, ++ CTL_PROC = 4, ++ CTL_FS = 5, ++ CTL_DEBUG = 6, ++ CTL_DEV = 7, ++ CTL_BUS = 8, ++ CTL_ABI = 9, ++ CTL_CPU = 10, ++ CTL_ARLAN = 254, ++ CTL_S390DBF = 5677, ++ CTL_SUNRPC = 7249, ++ CTL_PM = 9899, ++ CTL_FRV = 9898, ++}; ++ ++enum { ++ KERN_OSTYPE = 1, ++ KERN_OSRELEASE = 2, ++ KERN_OSREV = 3, ++ KERN_VERSION = 4, ++ KERN_SECUREMASK = 5, ++ KERN_PROF = 6, ++ KERN_NODENAME = 7, ++ KERN_DOMAINNAME = 8, ++ KERN_PANIC = 15, ++ KERN_REALROOTDEV = 16, ++ KERN_SPARC_REBOOT = 21, ++ KERN_CTLALTDEL = 22, ++ KERN_PRINTK = 23, ++ KERN_NAMETRANS = 24, ++ KERN_PPC_HTABRECLAIM = 25, ++ KERN_PPC_ZEROPAGED = 26, ++ KERN_PPC_POWERSAVE_NAP = 27, ++ KERN_MODPROBE = 28, ++ KERN_SG_BIG_BUFF = 29, ++ KERN_ACCT = 30, ++ KERN_PPC_L2CR = 31, ++ KERN_RTSIGNR = 32, ++ KERN_RTSIGMAX = 33, ++ KERN_SHMMAX = 34, ++ KERN_MSGMAX = 35, ++ KERN_MSGMNB = 36, ++ KERN_MSGPOOL = 37, ++ KERN_SYSRQ = 38, ++ KERN_MAX_THREADS = 39, ++ KERN_RANDOM = 40, ++ KERN_SHMALL = 41, ++ KERN_MSGMNI = 42, ++ KERN_SEM = 43, ++ KERN_SPARC_STOP_A = 44, ++ KERN_SHMMNI = 45, ++ KERN_OVERFLOWUID = 46, ++ KERN_OVERFLOWGID = 47, ++ KERN_SHMPATH = 48, ++ KERN_HOTPLUG = 49, ++ KERN_IEEE_EMULATION_WARNINGS = 50, ++ KERN_S390_USER_DEBUG_LOGGING = 51, ++ KERN_CORE_USES_PID = 52, ++ KERN_TAINTED = 53, ++ KERN_CADPID = 54, ++ KERN_PIDMAX = 55, ++ KERN_CORE_PATTERN = 56, ++ KERN_PANIC_ON_OOPS = 57, ++ KERN_HPPA_PWRSW = 58, ++ KERN_HPPA_UNALIGNED = 59, ++ KERN_PRINTK_RATELIMIT = 60, ++ KERN_PRINTK_RATELIMIT_BURST = 61, ++ KERN_PTY = 62, ++ KERN_NGROUPS_MAX = 63, ++ KERN_SPARC_SCONS_PWROFF = 64, ++ KERN_HZ_TIMER = 65, ++ KERN_UNKNOWN_NMI_PANIC = 66, ++ KERN_BOOTLOADER_TYPE = 67, ++ KERN_RANDOMIZE = 68, ++ KERN_SETUID_DUMPABLE = 69, ++ KERN_SPIN_RETRY = 70, ++ KERN_ACPI_VIDEO_FLAGS = 71, ++ KERN_IA64_UNALIGNED = 72, ++ KERN_COMPAT_LOG = 73, ++ KERN_MAX_LOCK_DEPTH = 74, ++ KERN_NMI_WATCHDOG = 75, ++ KERN_PANIC_ON_NMI = 76, ++ KERN_PANIC_ON_WARN = 77, ++ KERN_PANIC_PRINT = 78, ++}; ++ ++struct xfs_sysctl_val { ++ int min; ++ int val; ++ int max; ++}; ++ ++typedef struct xfs_sysctl_val xfs_sysctl_val_t; ++ ++struct xfs_param { ++ xfs_sysctl_val_t sgid_inherit; ++ xfs_sysctl_val_t symlink_mode; ++ xfs_sysctl_val_t panic_mask; ++ xfs_sysctl_val_t error_level; ++ xfs_sysctl_val_t syncd_timer; ++ xfs_sysctl_val_t stats_clear; ++ xfs_sysctl_val_t inherit_sync; ++ xfs_sysctl_val_t inherit_nodump; ++ xfs_sysctl_val_t inherit_noatim; ++ xfs_sysctl_val_t xfs_buf_timer; ++ xfs_sysctl_val_t xfs_buf_age; ++ xfs_sysctl_val_t inherit_nosym; ++ xfs_sysctl_val_t rotorstep; ++ xfs_sysctl_val_t inherit_nodfrg; ++ xfs_sysctl_val_t fstrm_timer; ++ xfs_sysctl_val_t eofb_timer; ++ xfs_sysctl_val_t cowb_timer; ++}; ++ ++typedef struct xfs_param xfs_param_t; ++ ++struct xfs_globals { ++ int log_recovery_delay; ++ int mount_delay; ++ bool bug_on_assert; ++}; ++ ++enum { ++ NAPI_STATE_SCHED = 0, ++ NAPI_STATE_MISSED = 1, ++ NAPI_STATE_DISABLE = 2, ++ NAPI_STATE_NPSVC = 3, ++ NAPI_STATE_HASHED = 4, ++ NAPI_STATE_NO_BUSY_POLL = 5, ++ NAPI_STATE_IN_BUSY_POLL = 6, ++}; ++ ++struct compat_sysctl_args { ++ compat_uptr_t name; ++ int nlen; ++ compat_uptr_t oldval; ++ compat_uptr_t oldlenp; ++ compat_uptr_t newval; ++ compat_size_t newlen; ++ compat_ulong_t __unused[4]; ++}; ++ ++struct __user_cap_header_struct { ++ __u32 version; ++ int pid; ++}; ++ ++typedef struct __user_cap_header_struct *cap_user_header_t; ++ ++struct __user_cap_data_struct { ++ __u32 effective; ++ __u32 permitted; ++ __u32 inheritable; ++}; ++ ++typedef struct __user_cap_data_struct *cap_user_data_t; ++ ++struct sigqueue { ++ struct list_head list; ++ int flags; ++ siginfo_t info; ++ struct user_struct *user; ++}; ++ ++struct ptrace_peeksiginfo_args { ++ __u64 off; ++ __u32 flags; ++ __s32 nr; ++}; ++ ++struct compat_iovec { ++ compat_uptr_t iov_base; ++ compat_size_t iov_len; ++}; ++ ++struct seq_operations___2 { ++ void * (*start)(struct seq_file___2 *, loff_t *); ++ void (*stop)(struct seq_file___2 *, void *); ++ void * (*next)(struct seq_file___2 *, void *, loff_t *); ++ int (*show)(struct seq_file___2 *, void *); ++}; ++ ++typedef int __kernel_mqd_t; ++ ++typedef __kernel_mqd_t mqd_t; ++ ++typedef long unsigned int old_sigset_t; ++ ++enum audit_state { ++ AUDIT_DISABLED = 0, ++ AUDIT_BUILD_CONTEXT = 1, ++ AUDIT_RECORD_CONTEXT = 2, ++}; ++ ++struct audit_cap_data { ++ kernel_cap_t permitted; ++ kernel_cap_t inheritable; ++ union { ++ unsigned int fE; ++ kernel_cap_t effective; ++ }; ++ kernel_cap_t ambient; ++}; ++ ++struct filename; ++ ++struct audit_names { ++ struct list_head list; ++ struct filename *name; ++ int name_len; ++ bool hidden; ++ long unsigned int ino; ++ dev_t dev; ++ umode_t mode; ++ kuid_t uid; ++ kgid_t gid; ++ dev_t rdev; ++ u32 osid; ++ struct audit_cap_data fcap; ++ unsigned int fcap_ver; ++ unsigned char type; ++ bool should_free; ++}; ++ ++struct mq_attr { ++ __kernel_long_t mq_flags; ++ __kernel_long_t mq_maxmsg; ++ __kernel_long_t mq_msgsize; ++ __kernel_long_t mq_curmsgs; ++ __kernel_long_t __reserved[4]; ++}; ++ ++struct audit_proctitle { ++ int len; ++ char *value; ++}; ++ ++struct audit_aux_data; ++ ++struct audit_tree_refs; ++ ++struct audit_context { ++ int dummy; ++ int in_syscall; ++ enum audit_state state; ++ enum audit_state current_state; ++ unsigned int serial; ++ int major; ++ struct timespec64 ctime; ++ long unsigned int argv[4]; ++ long int return_code; ++ u64 prio; ++ int return_valid; ++ struct audit_names preallocated_names[5]; ++ int name_count; ++ struct list_head names_list; ++ char *filterkey; ++ struct path pwd; ++ struct audit_aux_data *aux; ++ struct audit_aux_data *aux_pids; ++ struct __kernel_sockaddr_storage *sockaddr; ++ size_t sockaddr_len; ++ pid_t pid; ++ pid_t ppid; ++ kuid_t uid; ++ kuid_t euid; ++ kuid_t suid; ++ kuid_t fsuid; ++ kgid_t gid; ++ kgid_t egid; ++ kgid_t sgid; ++ kgid_t fsgid; ++ long unsigned int personality; ++ int arch; ++ pid_t target_pid; ++ kuid_t target_auid; ++ kuid_t target_uid; ++ unsigned int target_sessionid; ++ u32 target_sid; ++ char target_comm[16]; ++ struct audit_tree_refs *trees; ++ struct audit_tree_refs *first_trees; ++ struct list_head killed_trees; ++ int tree_count; ++ int type; ++ union { ++ struct { ++ int nargs; ++ long int args[6]; ++ } socketcall; ++ struct { ++ kuid_t uid; ++ kgid_t gid; ++ umode_t mode; ++ u32 osid; ++ int has_perm; ++ uid_t perm_uid; ++ gid_t perm_gid; ++ umode_t perm_mode; ++ long unsigned int qbytes; ++ } ipc; ++ struct { ++ mqd_t mqdes; ++ struct mq_attr mqstat; ++ } mq_getsetattr; ++ struct { ++ mqd_t mqdes; ++ int sigev_signo; ++ } mq_notify; ++ struct { ++ mqd_t mqdes; ++ size_t msg_len; ++ unsigned int msg_prio; ++ struct timespec64 abs_timeout; ++ } mq_sendrecv; ++ struct { ++ int oflag; ++ umode_t mode; ++ struct mq_attr attr; ++ } mq_open; ++ struct { ++ pid_t pid; ++ struct audit_cap_data cap; ++ } capset; ++ struct { ++ int fd; ++ int flags; ++ } mmap; ++ struct { ++ int argc; ++ } execve; ++ struct { ++ char *name; ++ } module; ++ }; ++ int fds[2]; ++ struct audit_proctitle proctitle; ++}; ++ ++typedef u32 compat_old_sigset_t; ++ ++struct filename { ++ const char *name; ++ const char *uptr; ++ int refcnt; ++ struct audit_names *aname; ++ const char iname[0]; ++}; ++ ++struct compat_sigaction { ++ compat_uptr_t sa_handler; ++ compat_ulong_t sa_flags; ++ compat_uptr_t sa_restorer; ++ compat_sigset_t sa_mask; ++}; ++ ++struct compat_old_sigaction { ++ compat_uptr_t sa_handler; ++ compat_old_sigset_t sa_mask; ++ compat_ulong_t sa_flags; ++ compat_uptr_t sa_restorer; ++}; ++ ++enum { ++ TRACE_SIGNAL_DELIVERED = 0, ++ TRACE_SIGNAL_IGNORED = 1, ++ TRACE_SIGNAL_ALREADY_PENDING = 2, ++ TRACE_SIGNAL_OVERFLOW_FAIL = 3, ++ TRACE_SIGNAL_LOSE_INFO = 4, ++}; ++ ++struct trace_event_raw_signal_generate { ++ struct trace_entry ent; ++ int sig; ++ int errno; ++ int code; ++ char comm[16]; ++ pid_t pid; ++ int group; ++ int result; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_signal_deliver { ++ struct trace_entry ent; ++ int sig; ++ int errno; ++ int code; ++ long unsigned int sa_handler; ++ long unsigned int sa_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_signal_generate {}; ++ ++struct trace_event_data_offsets_signal_deliver {}; ++ ++typedef int (*get_char_func)(); ++ ++typedef __kernel_clock_t clock_t; ++ ++struct rlimit64 { ++ __u64 rlim_cur; ++ __u64 rlim_max; ++}; ++ ++enum uts_proc { ++ UTS_PROC_OSTYPE = 0, ++ UTS_PROC_OSRELEASE = 1, ++ UTS_PROC_VERSION = 2, ++ UTS_PROC_HOSTNAME = 3, ++ UTS_PROC_DOMAINNAME = 4, ++}; ++ ++struct prctl_mm_map { ++ __u64 start_code; ++ __u64 end_code; ++ __u64 start_data; ++ __u64 end_data; ++ __u64 start_brk; ++ __u64 brk; ++ __u64 start_stack; ++ __u64 arg_start; ++ __u64 arg_end; ++ __u64 env_start; ++ __u64 env_end; ++ __u64 *auxv; ++ __u32 auxv_size; ++ __u32 exe_fd; ++}; ++ ++struct compat_tms { ++ compat_clock_t tms_utime; ++ compat_clock_t tms_stime; ++ compat_clock_t tms_cutime; ++ compat_clock_t tms_cstime; ++}; ++ ++struct compat_rlimit { ++ compat_ulong_t rlim_cur; ++ compat_ulong_t rlim_max; ++}; ++ ++struct tms { ++ __kernel_clock_t tms_utime; ++ __kernel_clock_t tms_stime; ++ __kernel_clock_t tms_cutime; ++ __kernel_clock_t tms_cstime; ++}; ++ ++struct getcpu_cache { ++ long unsigned int blob[16]; ++}; ++ ++struct compat_sysinfo { ++ s32 uptime; ++ u32 loads[3]; ++ u32 totalram; ++ u32 freeram; ++ u32 sharedram; ++ u32 bufferram; ++ u32 totalswap; ++ u32 freeswap; ++ u16 procs; ++ u16 pad; ++ u32 totalhigh; ++ u32 freehigh; ++ u32 mem_unit; ++ char _f[8]; ++}; ++ ++struct umh_info { ++ const char *cmdline; ++ struct file *pipe_to_umh; ++ struct file *pipe_from_umh; ++ pid_t pid; ++}; ++ ++struct wq_flusher; ++ ++struct worker; ++ ++struct workqueue_attrs; ++ ++struct pool_workqueue; ++ ++struct wq_device; ++ ++struct workqueue_struct { ++ struct list_head pwqs; ++ struct list_head list; ++ struct mutex mutex; ++ int work_color; ++ int flush_color; ++ atomic_t nr_pwqs_to_flush; ++ struct wq_flusher *first_flusher; ++ struct list_head flusher_queue; ++ struct list_head flusher_overflow; ++ struct list_head maydays; ++ struct worker *rescuer; ++ int nr_drainers; ++ int saved_max_active; ++ struct workqueue_attrs *unbound_attrs; ++ struct pool_workqueue *dfl_pwq; ++ struct wq_device *wq_dev; ++ char name[24]; ++ struct callback_head rcu; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ unsigned int flags; ++ struct pool_workqueue *cpu_pwqs; ++ struct pool_workqueue *numa_pwq_tbl[0]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct workqueue_attrs { ++ int nice; ++ cpumask_var_t cpumask; ++ bool no_numa; ++}; ++ ++struct execute_work { ++ struct work_struct work; ++}; ++ ++enum { ++ WQ_UNBOUND = 2, ++ WQ_FREEZABLE = 4, ++ WQ_MEM_RECLAIM = 8, ++ WQ_HIGHPRI = 16, ++ WQ_CPU_INTENSIVE = 32, ++ WQ_SYSFS = 64, ++ WQ_POWER_EFFICIENT = 128, ++ __WQ_DRAINING = 65536, ++ __WQ_ORDERED = 131072, ++ __WQ_LEGACY = 262144, ++ __WQ_ORDERED_EXPLICIT = 524288, ++ __WQ_DYNAMIC = 33554432, ++ WQ_MAX_ACTIVE = 512, ++ WQ_MAX_UNBOUND_PER_CPU = 4, ++ WQ_DFL_ACTIVE = 256, ++}; ++ ++enum hk_flags { ++ HK_FLAG_TIMER = 1, ++ HK_FLAG_RCU = 2, ++ HK_FLAG_MISC = 4, ++ HK_FLAG_SCHED = 8, ++ HK_FLAG_TICK = 16, ++ HK_FLAG_DOMAIN = 32, ++ HK_FLAG_WQ = 64, ++}; ++ ++struct worker_pool; ++ ++struct worker { ++ union { ++ struct list_head entry; ++ struct hlist_node hentry; ++ }; ++ struct work_struct *current_work; ++ work_func_t current_func; ++ struct pool_workqueue *current_pwq; ++ struct list_head scheduled; ++ struct task_struct *task; ++ struct worker_pool *pool; ++ struct list_head node; ++ long unsigned int last_active; ++ unsigned int flags; ++ int id; ++ char desc[24]; ++ struct workqueue_struct *rescue_wq; ++}; ++ ++struct pool_workqueue { ++ struct worker_pool *pool; ++ struct workqueue_struct *wq; ++ int work_color; ++ int flush_color; ++ int refcnt; ++ int nr_in_flight[15]; ++ int nr_active; ++ int max_active; ++ struct list_head delayed_works; ++ struct list_head pwqs_node; ++ struct list_head mayday_node; ++ struct work_struct unbound_release_work; ++ struct callback_head rcu; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct worker_pool { ++ spinlock_t lock; ++ int cpu; ++ int node; ++ int id; ++ unsigned int flags; ++ long unsigned int watchdog_ts; ++ struct list_head worklist; ++ int nr_workers; ++ int nr_idle; ++ struct list_head idle_list; ++ struct timer_list idle_timer; ++ struct timer_list mayday_timer; ++ struct hlist_head busy_hash[64]; ++ struct worker *manager; ++ struct list_head workers; ++ struct completion *detach_completion; ++ struct ida worker_ida; ++ struct workqueue_attrs *attrs; ++ struct hlist_node hash_node; ++ int refcnt; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ atomic_t nr_running; ++ struct callback_head rcu; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++enum { ++ POOL_MANAGER_ACTIVE = 1, ++ POOL_DISASSOCIATED = 4, ++ WORKER_DIE = 2, ++ WORKER_IDLE = 4, ++ WORKER_PREP = 8, ++ WORKER_CPU_INTENSIVE = 64, ++ WORKER_UNBOUND = 128, ++ WORKER_REBOUND = 256, ++ WORKER_NICED = 512, ++ WORKER_NOT_RUNNING = 456, ++ NR_STD_WORKER_POOLS = 2, ++ UNBOUND_POOL_HASH_ORDER = 6, ++ BUSY_WORKER_HASH_ORDER = 6, ++ MAX_IDLE_WORKERS_RATIO = 4, ++ IDLE_WORKER_TIMEOUT = 75000, ++ MAYDAY_INITIAL_TIMEOUT = 2, ++ MAYDAY_INTERVAL = 25, ++ CREATE_COOLDOWN = 250, ++ RESCUER_NICE_LEVEL = 4294967276, ++ HIGHPRI_NICE_LEVEL = 4294967276, ++ WQ_NAME_LEN = 24, ++}; ++ ++struct wq_flusher { ++ struct list_head list; ++ int flush_color; ++ struct completion done; ++}; ++ ++struct wq_device { ++ struct workqueue_struct *wq; ++ struct device dev; ++}; ++ ++struct trace_event_raw_workqueue_work { ++ struct trace_entry ent; ++ void *work; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_workqueue_queue_work { ++ struct trace_entry ent; ++ void *work; ++ void *function; ++ void *workqueue; ++ unsigned int req_cpu; ++ unsigned int cpu; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_workqueue_execute_start { ++ struct trace_entry ent; ++ void *work; ++ void *function; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_workqueue_work {}; ++ ++struct trace_event_data_offsets_workqueue_queue_work {}; ++ ++struct trace_event_data_offsets_workqueue_execute_start {}; ++ ++struct wq_barrier { ++ struct work_struct work; ++ struct completion done; ++ struct task_struct *task; ++}; ++ ++struct nice_work { ++ struct work_struct work; ++ long int nice; ++}; ++ ++struct cwt_wait { ++ wait_queue_entry_t wait; ++ struct work_struct *work; ++}; ++ ++struct apply_wqattrs_ctx { ++ struct workqueue_struct *wq; ++ struct workqueue_attrs *attrs; ++ struct list_head list; ++ struct pool_workqueue *dfl_pwq; ++ struct pool_workqueue *pwq_tbl[0]; ++}; ++ ++struct work_for_cpu { ++ struct work_struct work; ++ long int (*fn)(void *); ++ void *arg; ++ long int ret; ++}; ++ ++struct ctl_path { ++ const char *procname; ++}; ++ ++typedef void (*task_work_func_t)(struct callback_head *); ++ ++enum { ++ KERNEL_PARAM_OPS_FL_NOARG = 1, ++}; ++ ++enum { ++ KERNEL_PARAM_FL_UNSAFE = 1, ++ KERNEL_PARAM_FL_HWPARAM = 2, ++}; ++ ++struct param_attribute { ++ struct module_attribute mattr; ++ const struct kernel_param *param; ++}; ++ ++struct module_param_attrs { ++ unsigned int num; ++ struct attribute_group grp; ++ struct param_attribute attrs[0]; ++}; ++ ++struct module_version_attribute { ++ struct module_attribute mattr; ++ const char *module_name; ++ const char *version; ++}; ++ ++struct kmalloced_param { ++ struct list_head list; ++ char val[0]; ++}; ++ ++struct sched_param { ++ int sched_priority; ++}; ++ ++enum { ++ CSS_NO_REF = 1, ++ CSS_ONLINE = 2, ++ CSS_RELEASED = 4, ++ CSS_VISIBLE = 8, ++ CSS_DYING = 16, ++}; ++ ++struct kthread_work; ++ ++typedef void (*kthread_work_func_t)(struct kthread_work *); ++ ++struct kthread_worker; ++ ++struct kthread_work { ++ struct list_head node; ++ kthread_work_func_t func; ++ struct kthread_worker *worker; ++ int canceling; ++}; ++ ++enum { ++ KTW_FREEZABLE = 1, ++}; ++ ++struct kthread_worker { ++ unsigned int flags; ++ spinlock_t lock; ++ struct list_head work_list; ++ struct list_head delayed_work_list; ++ struct task_struct *task; ++ struct kthread_work *current_work; ++}; ++ ++struct kthread_delayed_work { ++ struct kthread_work work; ++ struct timer_list timer; ++}; ++ ++struct kthread_create_info { ++ int (*threadfn)(void *); ++ void *data; ++ int node; ++ struct task_struct *result; ++ struct completion *done; ++ struct list_head list; ++}; ++ ++struct kthread { ++ long unsigned int flags; ++ unsigned int cpu; ++ void *data; ++ struct completion parked; ++ struct completion exited; ++ struct cgroup_subsys_state *blkcg_css; ++}; ++ ++enum KTHREAD_BITS { ++ KTHREAD_IS_PER_CPU = 0, ++ KTHREAD_SHOULD_STOP = 1, ++ KTHREAD_SHOULD_PARK = 2, ++}; ++ ++struct kthread_flush_work { ++ struct kthread_work work; ++ struct completion done; ++}; ++ ++struct ipc_ids { ++ int in_use; ++ short unsigned int seq; ++ struct rw_semaphore rwsem; ++ struct idr ipcs_idr; ++ int max_idx; ++ int next_id; ++ struct rhashtable key_ht; ++}; ++ ++struct ipc_namespace { ++ refcount_t count; ++ struct ipc_ids ids[3]; ++ int sem_ctls[4]; ++ int used_sems; ++ unsigned int msg_ctlmax; ++ unsigned int msg_ctlmnb; ++ unsigned int msg_ctlmni; ++ atomic_t msg_bytes; ++ atomic_t msg_hdrs; ++ size_t shm_ctlmax; ++ size_t shm_ctlall; ++ long unsigned int shm_tot; ++ int shm_ctlmni; ++ int shm_rmid_forced; ++ struct notifier_block ipcns_nb; ++ struct vfsmount *mq_mnt; ++ unsigned int mq_queues_count; ++ unsigned int mq_queues_max; ++ unsigned int mq_msg_max; ++ unsigned int mq_msgsize_max; ++ unsigned int mq_msg_default; ++ unsigned int mq_msgsize_default; ++ struct user_namespace *user_ns; ++ struct ucounts *ucounts; ++ struct ns_common ns; ++}; ++ ++struct raw_notifier_head { ++ struct notifier_block *head; ++}; ++ ++struct srcu_notifier_head { ++ struct mutex mutex; ++ struct srcu_struct srcu; ++ struct notifier_block *head; ++}; ++ ++enum what { ++ PROC_EVENT_NONE = 0, ++ PROC_EVENT_FORK = 1, ++ PROC_EVENT_EXEC = 2, ++ PROC_EVENT_UID = 4, ++ PROC_EVENT_GID = 64, ++ PROC_EVENT_SID = 128, ++ PROC_EVENT_PTRACE = 256, ++ PROC_EVENT_COMM = 512, ++ PROC_EVENT_COREDUMP = 1073741824, ++ PROC_EVENT_EXIT = 2147483648, ++}; ++ ++typedef u64 async_cookie_t; ++ ++typedef void (*async_func_t)(void *, async_cookie_t); ++ ++struct async_domain { ++ struct list_head pending; ++ unsigned int registered: 1; ++}; ++ ++struct async_entry { ++ struct list_head domain_list; ++ struct list_head global_list; ++ struct work_struct work; ++ async_cookie_t cookie; ++ async_func_t func; ++ void *data; ++ struct async_domain *domain; ++}; ++ ++struct range { ++ u64 start; ++ u64 end; ++}; ++ ++struct smpboot_thread_data { ++ unsigned int cpu; ++ unsigned int status; ++ struct smp_hotplug_thread *ht; ++}; ++ ++enum { ++ HP_THREAD_NONE = 0, ++ HP_THREAD_ACTIVE = 1, ++ HP_THREAD_PARKED = 2, ++}; ++ ++typedef void (*rcu_callback_t)(struct callback_head *); ++ ++typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); ++ ++struct pin_cookie {}; ++ ++struct cfs_rq { ++ struct load_weight load; ++ long unsigned int runnable_weight; ++ unsigned int nr_running; ++ unsigned int h_nr_running; ++ u64 exec_clock; ++ u64 min_vruntime; ++ struct rb_root_cached tasks_timeline; ++ struct sched_entity *curr; ++ struct sched_entity *next; ++ struct sched_entity *last; ++ struct sched_entity *skip; ++ unsigned int nr_spread_over; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sched_avg avg; ++ struct { ++ raw_spinlock_t lock; ++ int nr; ++ long unsigned int load_avg; ++ long unsigned int util_avg; ++ long unsigned int runnable_sum; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ } removed; ++ long unsigned int tg_load_avg_contrib; ++ long int propagate; ++ long int prop_runnable_sum; ++ long unsigned int h_load; ++ u64 last_h_load_update; ++ struct sched_entity *h_load_next; ++ struct rq *rq; ++ int on_list; ++ struct list_head leaf_cfs_rq_list; ++ struct task_group *tg; ++ int runtime_enabled; ++ s64 runtime_remaining; ++ u64 throttled_clock; ++ u64 throttled_clock_task; ++ u64 throttled_clock_task_time; ++ int throttled; ++ int throttle_count; ++ struct list_head throttled_list; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct rt_prio_array { ++ long unsigned int bitmap[2]; ++ struct list_head queue[100]; ++}; ++ ++struct rt_rq { ++ struct rt_prio_array active; ++ unsigned int rt_nr_running; ++ unsigned int rr_nr_running; ++ struct { ++ int curr; ++ int next; ++ } highest_prio; ++ long unsigned int rt_nr_migratory; ++ long unsigned int rt_nr_total; ++ int overloaded; ++ struct plist_head pushable_tasks; ++ int rt_queued; ++ int rt_throttled; ++ u64 rt_time; ++ u64 rt_runtime; ++ raw_spinlock_t rt_runtime_lock; ++ long unsigned int rt_nr_boosted; ++ struct rq *rq; ++ struct task_group *tg; ++}; ++ ++struct rt_bandwidth { ++ raw_spinlock_t rt_runtime_lock; ++ ktime_t rt_period; ++ u64 rt_runtime; ++ struct hrtimer rt_period_timer; ++ unsigned int rt_period_active; ++}; ++ ++struct cfs_bandwidth { ++ raw_spinlock_t lock; ++ ktime_t period; ++ u64 quota; ++ u64 runtime; ++ s64 hierarchical_quota; ++ short int idle; ++ short int period_active; ++ struct hrtimer period_timer; ++ struct hrtimer slack_timer; ++ struct list_head throttled_cfs_rq; ++ int nr_periods; ++ int nr_throttled; ++ u64 throttled_time; ++ bool distribute_running; ++}; ++ ++struct task_group { ++ struct cgroup_subsys_state css; ++ struct sched_entity **se; ++ struct cfs_rq **cfs_rq; ++ long unsigned int shares; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ atomic_long_t load_avg; ++ struct sched_rt_entity **rt_se; ++ struct rt_rq **rt_rq; ++ struct rt_bandwidth rt_bandwidth; ++ struct callback_head rcu; ++ struct list_head list; ++ struct task_group *parent; ++ struct list_head siblings; ++ struct list_head children; ++ struct autogroup *autogroup; ++ struct cfs_bandwidth cfs_bandwidth; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long: 64; ++ long: 64; ++}; ++ ++struct update_util_data { ++ void (*func)(struct update_util_data *, u64, unsigned int); ++}; ++ ++struct autogroup { ++ struct kref kref; ++ struct task_group *tg; ++ struct rw_semaphore lock; ++ long unsigned int id; ++ int nice; ++}; ++ ++enum { ++ MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, ++ MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, ++ MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, ++ MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, ++ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, ++ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, ++}; ++ ++struct sched_domain_shared { ++ atomic_t ref; ++ atomic_t nr_busy_cpus; ++ int has_idle_cores; ++}; ++ ++struct sched_group; ++ ++struct sched_domain { ++ struct sched_domain *parent; ++ struct sched_domain *child; ++ struct sched_group *groups; ++ long unsigned int min_interval; ++ long unsigned int max_interval; ++ unsigned int busy_factor; ++ unsigned int imbalance_pct; ++ unsigned int cache_nice_tries; ++ unsigned int busy_idx; ++ unsigned int idle_idx; ++ unsigned int newidle_idx; ++ unsigned int wake_idx; ++ unsigned int forkexec_idx; ++ unsigned int smt_gain; ++ int nohz_idle; ++ int flags; ++ int level; ++ long unsigned int last_balance; ++ unsigned int balance_interval; ++ unsigned int nr_balance_failed; ++ u64 max_newidle_lb_cost; ++ long unsigned int next_decay_max_lb_cost; ++ u64 avg_scan_cost; ++ unsigned int lb_count[3]; ++ unsigned int lb_failed[3]; ++ unsigned int lb_balanced[3]; ++ unsigned int lb_imbalance[3]; ++ unsigned int lb_gained[3]; ++ unsigned int lb_hot_gained[3]; ++ unsigned int lb_nobusyg[3]; ++ unsigned int lb_nobusyq[3]; ++ unsigned int alb_count; ++ unsigned int alb_failed; ++ unsigned int alb_pushed; ++ unsigned int sbe_count; ++ unsigned int sbe_balanced; ++ unsigned int sbe_pushed; ++ unsigned int sbf_count; ++ unsigned int sbf_balanced; ++ unsigned int sbf_pushed; ++ unsigned int ttwu_wake_remote; ++ unsigned int ttwu_move_affine; ++ unsigned int ttwu_move_balance; ++ char *name; ++ union { ++ void *private; ++ struct callback_head rcu; ++ }; ++ struct sched_domain_shared *shared; ++ unsigned int span_weight; ++ long unsigned int span[0]; ++}; ++ ++struct sched_group_capacity; ++ ++struct sched_group { ++ struct sched_group *next; ++ atomic_t ref; ++ unsigned int group_weight; ++ struct sched_group_capacity *sgc; ++ int asym_prefer_cpu; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int cpumask[0]; ++}; ++ ++struct sched_group_capacity { ++ atomic_t ref; ++ long unsigned int capacity; ++ long unsigned int min_capacity; ++ long unsigned int next_update; ++ int imbalance; ++ int id; ++ long unsigned int cpumask[0]; ++}; ++ ++struct wake_q_head { ++ struct wake_q_node *first; ++ struct wake_q_node **lastp; ++}; ++ ++struct sched_attr { ++ __u32 size; ++ __u32 sched_policy; ++ __u64 sched_flags; ++ __s32 sched_nice; ++ __u32 sched_priority; ++ __u64 sched_runtime; ++ __u64 sched_deadline; ++ __u64 sched_period; ++}; ++ ++enum { ++ CFTYPE_ONLY_ON_ROOT = 1, ++ CFTYPE_NOT_ON_ROOT = 2, ++ CFTYPE_NS_DELEGATABLE = 4, ++ CFTYPE_NO_PREFIX = 8, ++ CFTYPE_WORLD_WRITABLE = 16, ++ __CFTYPE_ONLY_ON_DFL = 65536, ++ __CFTYPE_NOT_ON_DFL = 131072, ++}; ++ ++struct rcu_synchronize { ++ struct callback_head head; ++ struct completion completion; ++}; ++ ++typedef int (*cpu_stop_fn_t)(void *); ++ ++struct cpu_stop_done; ++ ++struct cpu_stop_work { ++ struct list_head list; ++ cpu_stop_fn_t fn; ++ void *arg; ++ struct cpu_stop_done *done; ++}; ++ ++struct cpupri_vec { ++ atomic_t count; ++ cpumask_var_t mask; ++}; ++ ++struct cpupri { ++ struct cpupri_vec pri_to_cpu[102]; ++ int *cpu_to_pri; ++}; ++ ++struct cpudl_item { ++ u64 dl; ++ int cpu; ++ int idx; ++}; ++ ++struct cpudl { ++ raw_spinlock_t lock; ++ int size; ++ cpumask_var_t free_cpus; ++ struct cpudl_item *elements; ++}; ++ ++struct dl_bandwidth { ++ raw_spinlock_t dl_runtime_lock; ++ u64 dl_runtime; ++ u64 dl_period; ++}; ++ ++struct dl_bw { ++ raw_spinlock_t lock; ++ u64 bw; ++ u64 total_bw; ++}; ++ ++typedef int (*tg_visitor)(struct task_group *, void *); ++ ++struct dl_rq { ++ struct rb_root_cached root; ++ long unsigned int dl_nr_running; ++ struct { ++ u64 curr; ++ u64 next; ++ } earliest_dl; ++ long unsigned int dl_nr_migratory; ++ int overloaded; ++ struct rb_root_cached pushable_dl_tasks_root; ++ u64 running_bw; ++ u64 this_bw; ++ u64 extra_bw; ++ u64 bw_ratio; ++}; ++ ++struct root_domain; ++ ++struct rq { ++ raw_spinlock_t lock; ++ unsigned int nr_running; ++ unsigned int nr_numa_running; ++ unsigned int nr_preferred_running; ++ unsigned int numa_migrate_on; ++ long unsigned int cpu_load[5]; ++ long unsigned int last_load_update_tick; ++ long unsigned int last_blocked_load_update_tick; ++ unsigned int has_blocked_load; ++ unsigned int nohz_tick_stopped; ++ atomic_t nohz_flags; ++ struct load_weight load; ++ long unsigned int nr_load_updates; ++ u64 nr_switches; ++ struct cfs_rq cfs; ++ struct rt_rq rt; ++ struct dl_rq dl; ++ struct list_head leaf_cfs_rq_list; ++ struct list_head *tmp_alone_branch; ++ long unsigned int nr_uninterruptible; ++ struct task_struct *curr; ++ struct task_struct *idle; ++ struct task_struct *stop; ++ long unsigned int next_balance; ++ struct mm_struct *prev_mm; ++ unsigned int clock_update_flags; ++ u64 clock; ++ u64 clock_task; ++ atomic_t nr_iowait; ++ int membarrier_state; ++ struct root_domain *rd; ++ struct sched_domain *sd; ++ long unsigned int cpu_capacity; ++ long unsigned int cpu_capacity_orig; ++ struct callback_head *balance_callback; ++ unsigned char idle_balance; ++ int active_balance; ++ int push_cpu; ++ struct cpu_stop_work active_balance_work; ++ int cpu; ++ int online; ++ struct list_head cfs_tasks; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sched_avg avg_rt; ++ struct sched_avg avg_dl; ++ struct sched_avg avg_irq; ++ u64 idle_stamp; ++ u64 avg_idle; ++ u64 max_idle_balance_cost; ++ u64 prev_irq_time; ++ u64 prev_steal_time; ++ u64 prev_steal_time_rq; ++ long unsigned int calc_load_update; ++ long int calc_load_active; ++ int hrtick_csd_pending; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ call_single_data_t hrtick_csd; ++ struct hrtimer hrtick_timer; ++ struct sched_info rq_sched_info; ++ long long unsigned int rq_cpu_time; ++ unsigned int yld_count; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++ struct llist_head wake_list; ++ struct cpuidle_state *idle_state; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct root_domain { ++ atomic_t refcount; ++ atomic_t rto_count; ++ struct callback_head rcu; ++ cpumask_var_t span; ++ cpumask_var_t online; ++ bool overload; ++ cpumask_var_t dlo_mask; ++ atomic_t dlo_count; ++ struct dl_bw dl_bw; ++ struct cpudl cpudl; ++ struct irq_work rto_push_work; ++ raw_spinlock_t rto_lock; ++ int rto_loop; ++ int rto_cpu; ++ atomic_t rto_loop_next; ++ atomic_t rto_loop_start; ++ cpumask_var_t rto_mask; ++ struct cpupri cpupri; ++ long unsigned int max_cpu_capacity; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct cputime { ++ u64 utime; ++ u64 stime; ++}; ++ ++struct rq_cputime { ++ raw_spinlock_t lock; ++ long long unsigned int sum_idle_time; ++ long long unsigned int last_entry_idle; ++ struct cputime cpu_prev_time; ++ struct cputime cpu_last_time; ++}; ++ ++struct rq_flags { ++ long unsigned int flags; ++ struct pin_cookie cookie; ++ unsigned int clock_update_flags; ++}; ++ ++enum numa_topology_type { ++ NUMA_DIRECT = 0, ++ NUMA_GLUELESS_MESH = 1, ++ NUMA_BACKPLANE = 2, ++}; ++ ++enum { ++ __SCHED_FEAT_GENTLE_FAIR_SLEEPERS = 0, ++ __SCHED_FEAT_START_DEBIT = 1, ++ __SCHED_FEAT_NEXT_BUDDY = 2, ++ __SCHED_FEAT_LAST_BUDDY = 3, ++ __SCHED_FEAT_CACHE_HOT_BUDDY = 4, ++ __SCHED_FEAT_WAKEUP_PREEMPTION = 5, ++ __SCHED_FEAT_HRTICK = 6, ++ __SCHED_FEAT_DOUBLE_TICK = 7, ++ __SCHED_FEAT_LB_BIAS = 8, ++ __SCHED_FEAT_NONTASK_CAPACITY = 9, ++ __SCHED_FEAT_TTWU_QUEUE = 10, ++ __SCHED_FEAT_SIS_AVG_CPU = 11, ++ __SCHED_FEAT_SIS_PROP = 12, ++ __SCHED_FEAT_WARN_DOUBLE_CLOCK = 13, ++ __SCHED_FEAT_RT_PUSH_IPI = 14, ++ __SCHED_FEAT_RT_RUNTIME_SHARE = 15, ++ __SCHED_FEAT_LB_MIN = 16, ++ __SCHED_FEAT_ATTACH_AGE_LOAD = 17, ++ __SCHED_FEAT_WA_IDLE = 18, ++ __SCHED_FEAT_WA_WEIGHT = 19, ++ __SCHED_FEAT_WA_BIAS = 20, ++ __SCHED_FEAT_UTIL_EST = 21, ++ __SCHED_FEAT_NR = 22, ++}; ++ ++struct irqtime { ++ u64 total; ++ u64 tick_delta; ++ u64 irq_start_time; ++ struct u64_stats_sync sync; ++}; ++ ++struct trace_event_raw_sched_kthread_stop { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_kthread_stop_ret { ++ struct trace_entry ent; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_wakeup_template { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ int prio; ++ int success; ++ int target_cpu; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_switch { ++ struct trace_entry ent; ++ char prev_comm[16]; ++ pid_t prev_pid; ++ int prev_prio; ++ long int prev_state; ++ char next_comm[16]; ++ pid_t next_pid; ++ int next_prio; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_migrate_task { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ int prio; ++ int orig_cpu; ++ int dest_cpu; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_process_template { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ int prio; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_process_wait { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ int prio; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_process_fork { ++ struct trace_entry ent; ++ char parent_comm[16]; ++ pid_t parent_pid; ++ char child_comm[16]; ++ pid_t child_pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_process_exec { ++ struct trace_entry ent; ++ u32 __data_loc_filename; ++ pid_t pid; ++ pid_t old_pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_stat_template { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ u64 delay; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_stat_runtime { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ u64 runtime; ++ u64 vruntime; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_pi_setprio { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ int oldprio; ++ int newprio; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_process_hang { ++ struct trace_entry ent; ++ char comm[16]; ++ pid_t pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_move_task_template { ++ struct trace_entry ent; ++ pid_t pid; ++ pid_t tgid; ++ pid_t ngid; ++ int src_cpu; ++ int src_nid; ++ int dst_cpu; ++ int dst_nid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_swap_numa { ++ struct trace_entry ent; ++ pid_t src_pid; ++ pid_t src_tgid; ++ pid_t src_ngid; ++ int src_cpu; ++ int src_nid; ++ pid_t dst_pid; ++ pid_t dst_tgid; ++ pid_t dst_ngid; ++ int dst_cpu; ++ int dst_nid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sched_wake_idle_without_ipi { ++ struct trace_entry ent; ++ int cpu; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_sched_kthread_stop {}; ++ ++struct trace_event_data_offsets_sched_kthread_stop_ret {}; ++ ++struct trace_event_data_offsets_sched_wakeup_template {}; ++ ++struct trace_event_data_offsets_sched_switch {}; ++ ++struct trace_event_data_offsets_sched_migrate_task {}; ++ ++struct trace_event_data_offsets_sched_process_template {}; ++ ++struct trace_event_data_offsets_sched_process_wait {}; ++ ++struct trace_event_data_offsets_sched_process_fork {}; ++ ++struct trace_event_data_offsets_sched_process_exec { ++ u32 filename; ++}; ++ ++struct trace_event_data_offsets_sched_stat_template {}; ++ ++struct trace_event_data_offsets_sched_stat_runtime {}; ++ ++struct trace_event_data_offsets_sched_pi_setprio {}; ++ ++struct trace_event_data_offsets_sched_process_hang {}; ++ ++struct trace_event_data_offsets_sched_move_task_template {}; ++ ++struct trace_event_data_offsets_sched_swap_numa {}; ++ ++struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; ++ ++struct migration_arg { ++ struct task_struct *task; ++ int dest_cpu; ++}; ++ ++struct migration_swap_arg { ++ struct task_struct *src_task; ++ struct task_struct *dst_task; ++ int src_cpu; ++ int dst_cpu; ++}; ++ ++struct tick_work { ++ int cpu; ++ atomic_t state; ++ struct delayed_work work; ++}; ++ ++struct cfs_schedulable_data { ++ struct task_group *tg; ++ u64 period; ++ u64 quota; ++}; ++ ++enum { ++ cpuset = 0, ++ possible = 1, ++ fail = 2, ++}; ++ ++struct idle_timer { ++ struct hrtimer timer; ++ int done; ++}; ++ ++struct numa_group { ++ atomic_t refcount; ++ spinlock_t lock; ++ int nr_tasks; ++ pid_t gid; ++ int active_nodes; ++ struct callback_head rcu; ++ long unsigned int total_faults; ++ long unsigned int max_faults_cpu; ++ long unsigned int *faults_cpu; ++ long unsigned int faults[0]; ++}; ++ ++enum tick_dep_bits { ++ TICK_DEP_BIT_POSIX_TIMER = 0, ++ TICK_DEP_BIT_PERF_EVENTS = 1, ++ TICK_DEP_BIT_SCHED = 2, ++ TICK_DEP_BIT_CLOCK_UNSTABLE = 3, ++}; ++ ++enum numa_faults_stats { ++ NUMA_MEM = 0, ++ NUMA_CPU = 1, ++ NUMA_MEMBUF = 2, ++ NUMA_CPUBUF = 3, ++}; ++ ++struct numa_stats { ++ long unsigned int load; ++ long unsigned int compute_capacity; ++ unsigned int nr_running; ++}; ++ ++struct task_numa_env { ++ struct task_struct *p; ++ int src_cpu; ++ int src_nid; ++ int dst_cpu; ++ int dst_nid; ++ struct numa_stats src_stats; ++ struct numa_stats dst_stats; ++ int imbalance_pct; ++ int dist; ++ struct task_struct *best_task; ++ long int best_imp; ++ int best_cpu; ++}; ++ ++enum fbq_type { ++ regular = 0, ++ remote = 1, ++ all = 2, ++}; ++ ++struct lb_env { ++ struct sched_domain *sd; ++ struct rq *src_rq; ++ int src_cpu; ++ int dst_cpu; ++ struct rq *dst_rq; ++ struct cpumask *dst_grpmask; ++ int new_dst_cpu; ++ enum cpu_idle_type idle; ++ long int imbalance; ++ struct cpumask *cpus; ++ unsigned int flags; ++ unsigned int loop; ++ unsigned int loop_break; ++ unsigned int loop_max; ++ enum fbq_type fbq_type; ++ struct list_head tasks; ++}; ++ ++enum group_type { ++ group_other = 0, ++ group_imbalanced = 1, ++ group_overloaded = 2, ++}; ++ ++struct sg_lb_stats { ++ long unsigned int avg_load; ++ long unsigned int group_load; ++ long unsigned int sum_weighted_load; ++ long unsigned int load_per_task; ++ long unsigned int group_capacity; ++ long unsigned int group_util; ++ unsigned int sum_nr_running; ++ unsigned int idle_cpus; ++ unsigned int group_weight; ++ enum group_type group_type; ++ int group_no_capacity; ++ unsigned int nr_numa_running; ++ unsigned int nr_preferred_running; ++}; ++ ++struct sd_lb_stats { ++ struct sched_group *busiest; ++ struct sched_group *local; ++ long unsigned int total_running; ++ long unsigned int total_load; ++ long unsigned int total_capacity; ++ long unsigned int avg_load; ++ struct sg_lb_stats busiest_stat; ++ struct sg_lb_stats local_stat; ++}; ++ ++typedef struct task_group *rt_rq_iter_t; ++ ++struct rt_schedulable_data { ++ struct task_group *tg; ++ u64 rt_period; ++ u64 rt_runtime; ++}; ++ ++struct wait_bit_key { ++ void *flags; ++ int bit_nr; ++ long unsigned int timeout; ++}; ++ ++struct wait_bit_queue_entry { ++ struct wait_bit_key key; ++ struct wait_queue_entry wq_entry; ++}; ++ ++typedef int wait_bit_action_f(struct wait_bit_key *, int); ++ ++struct sched_domain_attr { ++ int relax_domain_level; ++}; ++ ++typedef const struct cpumask * (*sched_domain_mask_f)(int); ++ ++typedef int (*sched_domain_flags_f)(); ++ ++struct sd_data { ++ struct sched_domain **sd; ++ struct sched_domain_shared **sds; ++ struct sched_group **sg; ++ struct sched_group_capacity **sgc; ++}; ++ ++struct sched_domain_topology_level { ++ sched_domain_mask_f mask; ++ sched_domain_flags_f sd_flags; ++ int flags; ++ int numa_level; ++ struct sd_data data; ++ char *name; ++}; ++ ++struct s_data { ++ struct sched_domain **sd; ++ struct root_domain *rd; ++}; ++ ++enum s_alloc { ++ sa_rootdomain = 0, ++ sa_sd = 1, ++ sa_sd_storage = 2, ++ sa_none = 3, ++}; ++ ++enum cpuacct_stat_index { ++ CPUACCT_STAT_USER = 0, ++ CPUACCT_STAT_SYSTEM = 1, ++ CPUACCT_STAT_NSTATS = 2, ++}; ++ ++struct cpuacct_usage { ++ u64 usages[2]; ++}; ++ ++struct cpuacct { ++ struct cgroup_subsys_state css; ++ struct cpuacct_usage *cpuusage; ++ struct kernel_cpustat *cpustat; ++}; ++ ++enum { ++ MEMBARRIER_FLAG_SYNC_CORE = 1, ++}; ++ ++enum membarrier_cmd { ++ MEMBARRIER_CMD_QUERY = 0, ++ MEMBARRIER_CMD_GLOBAL = 1, ++ MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, ++ MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, ++ MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, ++ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, ++ MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, ++ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, ++ MEMBARRIER_CMD_SHARED = 1, ++}; ++ ++struct ww_acquire_ctx; ++ ++struct mutex_waiter { ++ struct list_head list; ++ struct task_struct *task; ++ struct ww_acquire_ctx *ww_ctx; ++}; ++ ++struct ww_acquire_ctx { ++ struct task_struct *task; ++ long unsigned int stamp; ++ unsigned int acquired; ++ short unsigned int wounded; ++ short unsigned int is_wait_die; ++}; ++ ++struct ww_mutex { ++ struct mutex base; ++ struct ww_acquire_ctx *ctx; ++}; ++ ++struct semaphore { ++ raw_spinlock_t lock; ++ unsigned int count; ++ struct list_head wait_list; ++}; ++ ++struct semaphore_waiter { ++ struct list_head list; ++ struct task_struct *task; ++ bool up; ++}; ++ ++struct optimistic_spin_node { ++ struct optimistic_spin_node *next; ++ struct optimistic_spin_node *prev; ++ int locked; ++ int cpu; ++}; ++ ++enum qlock_stats { ++ qstat_pv_hash_hops = 0, ++ qstat_pv_kick_unlock = 1, ++ qstat_pv_kick_wake = 2, ++ qstat_pv_latency_kick = 3, ++ qstat_pv_latency_wake = 4, ++ qstat_pv_lock_stealing = 5, ++ qstat_pv_spurious_wakeup = 6, ++ qstat_pv_wait_again = 7, ++ qstat_pv_wait_early = 8, ++ qstat_pv_wait_head = 9, ++ qstat_pv_wait_node = 10, ++ qstat_lock_pending = 11, ++ qstat_lock_slowpath = 12, ++ qstat_lock_use_node2 = 13, ++ qstat_lock_use_node3 = 14, ++ qstat_lock_use_node4 = 15, ++ qstat_lock_no_node = 16, ++ qstat_num = 17, ++ qstat_reset_cnts = 17, ++}; ++ ++struct mcs_spinlock { ++ struct mcs_spinlock *next; ++ unsigned int locked; ++ int count; ++}; ++ ++struct qnode { ++ struct mcs_spinlock mcs; ++}; ++ ++struct hrtimer_sleeper { ++ struct hrtimer timer; ++ struct task_struct *task; ++}; ++ ++struct rt_mutex; ++ ++struct rt_mutex_waiter { ++ struct rb_node tree_entry; ++ struct rb_node pi_tree_entry; ++ struct task_struct *task; ++ struct rt_mutex *lock; ++ int prio; ++ u64 deadline; ++}; ++ ++struct rt_mutex { ++ raw_spinlock_t wait_lock; ++ struct rb_root_cached waiters; ++ struct task_struct *owner; ++}; ++ ++enum rtmutex_chainwalk { ++ RT_MUTEX_MIN_CHAINWALK = 0, ++ RT_MUTEX_FULL_CHAINWALK = 1, ++}; ++ ++enum rwsem_waiter_type { ++ RWSEM_WAITING_FOR_WRITE = 0, ++ RWSEM_WAITING_FOR_READ = 1, ++}; ++ ++struct rwsem_waiter { ++ struct list_head list; ++ struct task_struct *task; ++ enum rwsem_waiter_type type; ++}; ++ ++enum rwsem_wake_type { ++ RWSEM_WAKE_ANY = 0, ++ RWSEM_WAKE_READERS = 1, ++ RWSEM_WAKE_READ_OWNED = 2, ++}; ++ ++enum { ++ PM_QOS_RESERVED = 0, ++ PM_QOS_CPU_DMA_LATENCY = 1, ++ PM_QOS_NETWORK_LATENCY = 2, ++ PM_QOS_NETWORK_THROUGHPUT = 3, ++ PM_QOS_MEMORY_BANDWIDTH = 4, ++ PM_QOS_NUM_CLASSES = 5, ++}; ++ ++struct pm_qos_request { ++ struct plist_node node; ++ int pm_qos_class; ++ struct delayed_work work; ++}; ++ ++enum pm_qos_req_action { ++ PM_QOS_ADD_REQ = 0, ++ PM_QOS_UPDATE_REQ = 1, ++ PM_QOS_REMOVE_REQ = 2, ++}; ++ ++struct pm_qos_object { ++ struct pm_qos_constraints *constraints; ++ struct miscdevice pm_qos_power_miscdev; ++ char *name; ++}; ++ ++struct va_format { ++ const char *fmt; ++ va_list *va; ++}; ++ ++enum { ++ TEST_NONE = 0, ++ TEST_CORE = 1, ++ TEST_CPUS = 2, ++ TEST_PLATFORM = 3, ++ TEST_DEVICES = 4, ++ TEST_FREEZER = 5, ++ __TEST_AFTER_LAST = 6, ++}; ++ ++struct pm_vt_switch { ++ struct list_head head; ++ struct device *dev; ++ bool required; ++}; ++ ++struct platform_suspend_ops { ++ int (*valid)(suspend_state_t); ++ int (*begin)(suspend_state_t); ++ int (*prepare)(); ++ int (*prepare_late)(); ++ int (*enter)(suspend_state_t); ++ void (*wake)(); ++ void (*finish)(); ++ bool (*suspend_again)(); ++ void (*end)(); ++ void (*recover)(); ++}; ++ ++struct platform_s2idle_ops { ++ int (*begin)(); ++ int (*prepare)(); ++ void (*wake)(); ++ void (*sync)(); ++ void (*restore)(); ++ void (*end)(); ++}; ++ ++struct platform_hibernation_ops { ++ int (*begin)(); ++ void (*end)(); ++ int (*pre_snapshot)(); ++ void (*finish)(); ++ int (*prepare)(); ++ int (*enter)(); ++ void (*leave)(); ++ int (*pre_restore)(); ++ void (*restore_cleanup)(); ++ void (*recover)(); ++}; ++ ++enum { ++ HIBERNATION_INVALID = 0, ++ HIBERNATION_PLATFORM = 1, ++ HIBERNATION_SHUTDOWN = 2, ++ HIBERNATION_REBOOT = 3, ++ HIBERNATION_SUSPEND = 4, ++ HIBERNATION_TEST_RESUME = 5, ++ __HIBERNATION_AFTER_LAST = 6, ++}; ++ ++struct swsusp_info { ++ struct new_utsname uts; ++ u32 version_code; ++ long unsigned int num_physpages; ++ int cpus; ++ long unsigned int image_pages; ++ long unsigned int pages; ++ long unsigned int size; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct snapshot_handle { ++ unsigned int cur; ++ void *buffer; ++ int sync_read; ++}; ++ ++struct linked_page { ++ struct linked_page *next; ++ char data[65528]; ++}; ++ ++struct chain_allocator { ++ struct linked_page *chain; ++ unsigned int used_space; ++ gfp_t gfp_mask; ++ int safe_needed; ++}; ++ ++struct rtree_node { ++ struct list_head list; ++ long unsigned int *data; ++}; ++ ++struct mem_zone_bm_rtree { ++ struct list_head list; ++ struct list_head nodes; ++ struct list_head leaves; ++ long unsigned int start_pfn; ++ long unsigned int end_pfn; ++ struct rtree_node *rtree; ++ int levels; ++ unsigned int blocks; ++}; ++ ++struct bm_position { ++ struct mem_zone_bm_rtree *zone; ++ struct rtree_node *node; ++ long unsigned int node_pfn; ++ int node_bit; ++}; ++ ++struct memory_bitmap { ++ struct list_head zones; ++ struct linked_page *p_list; ++ struct bm_position cur; ++}; ++ ++struct mem_extent { ++ struct list_head hook; ++ long unsigned int start; ++ long unsigned int end; ++}; ++ ++struct nosave_region { ++ struct list_head list; ++ long unsigned int start_pfn; ++ long unsigned int end_pfn; ++}; ++ ++enum req_opf { ++ REQ_OP_READ = 0, ++ REQ_OP_WRITE = 1, ++ REQ_OP_FLUSH = 2, ++ REQ_OP_DISCARD = 3, ++ REQ_OP_ZONE_REPORT = 4, ++ REQ_OP_SECURE_ERASE = 5, ++ REQ_OP_ZONE_RESET = 6, ++ REQ_OP_WRITE_SAME = 7, ++ REQ_OP_WRITE_ZEROES = 9, ++ REQ_OP_SCSI_IN = 32, ++ REQ_OP_SCSI_OUT = 33, ++ REQ_OP_DRV_IN = 34, ++ REQ_OP_DRV_OUT = 35, ++ REQ_OP_LAST = 36, ++}; ++ ++enum req_flag_bits { ++ __REQ_FAILFAST_DEV = 8, ++ __REQ_FAILFAST_TRANSPORT = 9, ++ __REQ_FAILFAST_DRIVER = 10, ++ __REQ_SYNC = 11, ++ __REQ_META = 12, ++ __REQ_PRIO = 13, ++ __REQ_NOMERGE = 14, ++ __REQ_IDLE = 15, ++ __REQ_INTEGRITY = 16, ++ __REQ_FUA = 17, ++ __REQ_PREFLUSH = 18, ++ __REQ_RAHEAD = 19, ++ __REQ_BACKGROUND = 20, ++ __REQ_NOWAIT = 21, ++ __REQ_NOUNMAP = 22, ++ __REQ_DRV = 23, ++ __REQ_SWAP = 24, ++ __REQ_NR_BITS = 25, ++}; ++ ++typedef struct { ++ long unsigned int val; ++} swp_entry_t; ++ ++struct swap_map_page { ++ sector_t entries[8191]; ++ sector_t next_swap; ++}; ++ ++struct swap_map_page_list { ++ struct swap_map_page *map; ++ struct swap_map_page_list *next; ++}; ++ ++struct swap_map_handle { ++ struct swap_map_page *cur; ++ struct swap_map_page_list *maps; ++ sector_t cur_swap; ++ sector_t first_sector; ++ unsigned int k; ++ long unsigned int reqd_free_pages; ++ u32 crc32; ++}; ++ ++struct swsusp_header { ++ char reserved[65500]; ++ u32 crc32; ++ sector_t image; ++ unsigned int flags; ++ char orig_sig[10]; ++ char sig[10]; ++}; ++ ++struct swsusp_extent { ++ struct rb_node node; ++ long unsigned int start; ++ long unsigned int end; ++}; ++ ++struct hib_bio_batch { ++ atomic_t count; ++ wait_queue_head_t wait; ++ blk_status_t error; ++}; ++ ++struct crc_data { ++ struct task_struct *thr; ++ atomic_t ready; ++ atomic_t stop; ++ unsigned int run_threads; ++ wait_queue_head_t go; ++ wait_queue_head_t done; ++ u32 *crc32; ++ size_t *unc_len[3]; ++ unsigned char *unc[3]; ++}; ++ ++struct cmp_data { ++ struct task_struct *thr; ++ atomic_t ready; ++ atomic_t stop; ++ int ret; ++ wait_queue_head_t go; ++ wait_queue_head_t done; ++ size_t unc_len; ++ size_t cmp_len; ++ unsigned char unc[2097152]; ++ unsigned char cmp[2293760]; ++ unsigned char wrk[16384]; ++}; ++ ++struct dec_data { ++ struct task_struct *thr; ++ atomic_t ready; ++ atomic_t stop; ++ int ret; ++ wait_queue_head_t go; ++ wait_queue_head_t done; ++ size_t unc_len; ++ size_t cmp_len; ++ unsigned char unc[2097152]; ++ unsigned char cmp[2293760]; ++}; ++ ++typedef s64 compat_loff_t; ++ ++struct resume_swap_area { ++ __kernel_loff_t offset; ++ __u32 dev; ++} __attribute__((packed)); ++ ++struct snapshot_data { ++ struct snapshot_handle handle; ++ int swap; ++ int mode; ++ bool frozen; ++ bool ready; ++ bool platform_support; ++ bool free_bitmaps; ++}; ++ ++struct compat_resume_swap_area { ++ compat_loff_t offset; ++ u32 dev; ++} __attribute__((packed)); ++ ++struct sysrq_key_op { ++ void (*handler)(int); ++ char *help_msg; ++ char *action_msg; ++ int enable_mask; ++}; ++ ++enum kdb_msgsrc { ++ KDB_MSGSRC_INTERNAL = 0, ++ KDB_MSGSRC_PRINTK = 1, ++}; ++ ++struct kmsg_dumper { ++ struct list_head list; ++ void (*dump)(struct kmsg_dumper *, enum kmsg_dump_reason); ++ enum kmsg_dump_reason max_reason; ++ bool active; ++ bool registered; ++ u32 cur_idx; ++ u32 next_idx; ++ u64 cur_seq; ++ u64 next_seq; ++}; ++ ++struct trace_event_raw_console { ++ struct trace_entry ent; ++ u32 __data_loc_msg; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_console { ++ u32 msg; ++}; ++ ++struct console_cmdline { ++ char name[16]; ++ int index; ++ char *options; ++}; ++ ++enum devkmsg_log_bits { ++ __DEVKMSG_LOG_BIT_ON = 0, ++ __DEVKMSG_LOG_BIT_OFF = 1, ++ __DEVKMSG_LOG_BIT_LOCK = 2, ++}; ++ ++enum devkmsg_log_masks { ++ DEVKMSG_LOG_MASK_ON = 1, ++ DEVKMSG_LOG_MASK_OFF = 2, ++ DEVKMSG_LOG_MASK_LOCK = 4, ++}; ++ ++enum con_msg_format_flags { ++ MSG_FORMAT_DEFAULT = 0, ++ MSG_FORMAT_SYSLOG = 1, ++}; ++ ++enum log_flags { ++ LOG_NEWLINE = 2, ++ LOG_PREFIX = 4, ++ LOG_CONT = 8, ++}; ++ ++struct printk_log { ++ u64 ts_nsec; ++ u16 len; ++ u16 text_len; ++ u16 dict_len; ++ u8 facility; ++ u8 flags: 5; ++ u8 level: 3; ++}; ++ ++struct devkmsg_user { ++ u64 seq; ++ u32 idx; ++ struct ratelimit_state rs; ++ struct mutex lock; ++ char buf[8192]; ++}; ++ ++struct cont { ++ char buf[992]; ++ size_t len; ++ struct task_struct *owner; ++ u64 ts_nsec; ++ u8 level; ++ u8 facility; ++ enum log_flags flags; ++}; ++ ++struct printk_safe_seq_buf { ++ atomic_t len; ++ atomic_t message_lost; ++ struct irq_work work; ++ unsigned char buffer[8160]; ++}; ++ ++enum { ++ _IRQ_DEFAULT_INIT_FLAGS = 0, ++ _IRQ_PER_CPU = 512, ++ _IRQ_LEVEL = 256, ++ _IRQ_NOPROBE = 1024, ++ _IRQ_NOREQUEST = 2048, ++ _IRQ_NOTHREAD = 65536, ++ _IRQ_NOAUTOEN = 4096, ++ _IRQ_MOVE_PCNTXT = 16384, ++ _IRQ_NO_BALANCING = 8192, ++ _IRQ_NESTED_THREAD = 32768, ++ _IRQ_PER_CPU_DEVID = 131072, ++ _IRQ_IS_POLLED = 262144, ++ _IRQ_DISABLE_UNLAZY = 524288, ++ _IRQF_MODIFY_MASK = 1048335, ++}; ++ ++enum { ++ IRQTF_RUNTHREAD = 0, ++ IRQTF_WARNED = 1, ++ IRQTF_AFFINITY = 2, ++ IRQTF_FORCED_THREAD = 3, ++}; ++ ++enum { ++ IRQS_AUTODETECT = 1, ++ IRQS_SPURIOUS_DISABLED = 2, ++ IRQS_POLL_INPROGRESS = 8, ++ IRQS_ONESHOT = 32, ++ IRQS_REPLAY = 64, ++ IRQS_WAITING = 128, ++ IRQS_PENDING = 512, ++ IRQS_SUSPENDED = 2048, ++ IRQS_TIMINGS = 4096, ++ IRQS_NMI = 8192, ++}; ++ ++enum { ++ IRQ_SET_MASK_OK = 0, ++ IRQ_SET_MASK_OK_NOCOPY = 1, ++ IRQ_SET_MASK_OK_DONE = 2, ++}; ++ ++enum { ++ IRQCHIP_SET_TYPE_MASKED = 1, ++ IRQCHIP_EOI_IF_HANDLED = 2, ++ IRQCHIP_MASK_ON_SUSPEND = 4, ++ IRQCHIP_ONOFFLINE_ENABLED = 8, ++ IRQCHIP_SKIP_SET_WAKE = 16, ++ IRQCHIP_ONESHOT_SAFE = 32, ++ IRQCHIP_EOI_THREADED = 64, ++ IRQCHIP_SUPPORTS_LEVEL_MSI = 128, ++ IRQCHIP_SUPPORTS_NMI = 256, ++}; ++ ++enum { ++ IRQC_IS_HARDIRQ = 0, ++ IRQC_IS_NESTED = 1, ++}; ++ ++enum { ++ IRQ_STARTUP_NORMAL = 0, ++ IRQ_STARTUP_MANAGED = 1, ++ IRQ_STARTUP_ABORT = 2, ++}; ++ ++struct irq_devres { ++ unsigned int irq; ++ void *dev_id; ++}; ++ ++struct irq_desc_devres { ++ unsigned int from; ++ unsigned int cnt; ++}; ++ ++struct irq_generic_chip_devres { ++ struct irq_chip_generic *gc; ++ u32 msk; ++ unsigned int clr; ++ unsigned int set; ++}; ++ ++struct acpi_buffer { ++ acpi_size length; ++ void *pointer; ++}; ++ ++enum { ++ IRQ_DOMAIN_FLAG_HIERARCHY = 1, ++ IRQ_DOMAIN_NAME_ALLOCATED = 2, ++ IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, ++ IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, ++ IRQ_DOMAIN_FLAG_MSI = 16, ++ IRQ_DOMAIN_FLAG_MSI_REMAP = 32, ++ IRQ_DOMAIN_MSI_NOMASK_QUIRK = 64, ++ IRQ_DOMAIN_FLAG_NONCORE = 65536, ++}; ++ ++enum { ++ IRQCHIP_FWNODE_REAL = 0, ++ IRQCHIP_FWNODE_NAMED = 1, ++ IRQCHIP_FWNODE_NAMED_ID = 2, ++}; ++ ++struct irqchip_fwid { ++ struct fwnode_handle fwnode; ++ unsigned int type; ++ char *name; ++ void *data; ++}; ++ ++enum { ++ AFFINITY = 0, ++ AFFINITY_LIST = 1, ++ EFFECTIVE = 2, ++ EFFECTIVE_LIST = 3, ++}; ++ ++struct msi_alloc_info { ++ struct msi_desc *desc; ++ irq_hw_number_t hwirq; ++ union { ++ long unsigned int ul; ++ void *ptr; ++ } scratchpad[2]; ++}; ++ ++typedef struct msi_alloc_info msi_alloc_info_t; ++ ++struct msi_domain_info; ++ ++struct msi_domain_ops { ++ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); ++ int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); ++ void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); ++ int (*msi_check)(struct irq_domain *, struct msi_domain_info *, struct device *); ++ int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); ++ void (*msi_finish)(msi_alloc_info_t *, int); ++ void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); ++ int (*handle_error)(struct irq_domain *, struct msi_desc *, int); ++}; ++ ++struct msi_domain_info { ++ u32 flags; ++ struct msi_domain_ops *ops; ++ struct irq_chip *chip; ++ void *chip_data; ++ irq_flow_handler_t handler; ++ void *handler_data; ++ const char *handler_name; ++ void *data; ++}; ++ ++enum { ++ MSI_FLAG_USE_DEF_DOM_OPS = 1, ++ MSI_FLAG_USE_DEF_CHIP_OPS = 2, ++ MSI_FLAG_MULTI_PCI_MSI = 4, ++ MSI_FLAG_PCI_MSIX = 8, ++ MSI_FLAG_ACTIVATE_EARLY = 16, ++ MSI_FLAG_MUST_REACTIVATE = 32, ++ MSI_FLAG_LEVEL_CAPABLE = 64, ++}; ++ ++struct irq_affinity { ++ int pre_vectors; ++ int post_vectors; ++}; ++ ++struct trace_event_raw_rcu_utilization { ++ struct trace_entry ent; ++ const char *s; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_rcu_utilization {}; ++ ++enum { ++ GP_IDLE = 0, ++ GP_PENDING = 1, ++ GP_PASSED = 2, ++}; ++ ++enum { ++ CB_IDLE = 0, ++ CB_PENDING = 1, ++ CB_REPLAY = 2, ++}; ++ ++struct rcu_cblist { ++ struct callback_head *head; ++ struct callback_head **tail; ++ long int len; ++ long int len_lazy; ++}; ++ ++enum rcutorture_type { ++ RCU_FLAVOR = 0, ++ RCU_BH_FLAVOR = 1, ++ RCU_SCHED_FLAVOR = 2, ++ RCU_TASKS_FLAVOR = 3, ++ SRCU_FLAVOR = 4, ++ INVALID_RCU_FLAVOR = 5, ++}; ++ ++enum tick_device_mode { ++ TICKDEV_MODE_PERIODIC = 0, ++ TICKDEV_MODE_ONESHOT = 1, ++}; ++ ++struct tick_device___2 { ++ struct clock_event_device *evtdev; ++ enum tick_device_mode mode; ++}; ++ ++struct rcu_dynticks { ++ long int dynticks_nesting; ++ long int dynticks_nmi_nesting; ++ atomic_t dynticks; ++ bool rcu_need_heavy_qs; ++ long unsigned int rcu_qs_ctr; ++ bool rcu_urgent_qs; ++}; ++ ++struct rcu_state; ++ ++struct rcu_exp_work { ++ smp_call_func_t rew_func; ++ struct rcu_state *rew_rsp; ++ long unsigned int rew_s; ++ struct work_struct rew_work; ++}; ++ ++struct rcu_node { ++ raw_spinlock_t lock; ++ long unsigned int gp_seq; ++ long unsigned int gp_seq_needed; ++ long unsigned int completedqs; ++ long unsigned int qsmask; ++ long unsigned int rcu_gp_init_mask; ++ long unsigned int qsmaskinit; ++ long unsigned int qsmaskinitnext; ++ long unsigned int expmask; ++ long unsigned int expmaskinit; ++ long unsigned int expmaskinitnext; ++ long unsigned int ffmask; ++ long unsigned int grpmask; ++ int grplo; ++ int grphi; ++ u8 grpnum; ++ u8 level; ++ bool wait_blkd_tasks; ++ struct rcu_node *parent; ++ struct list_head blkd_tasks; ++ struct list_head *gp_tasks; ++ struct list_head *exp_tasks; ++ struct list_head *boost_tasks; ++ struct rt_mutex boost_mtx; ++ long unsigned int boost_time; ++ struct task_struct *boost_kthread_task; ++ unsigned int boost_kthread_status; ++ struct swait_queue_head nocb_gp_wq[2]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ raw_spinlock_t fqslock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t exp_lock; ++ long unsigned int exp_seq_rq; ++ wait_queue_head_t exp_wq[4]; ++ struct rcu_exp_work rew; ++ bool exp_need_flush; ++ long: 56; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct rcu_data; ++ ++struct rcu_state { ++ struct rcu_node node[65]; ++ struct rcu_node *level[3]; ++ struct rcu_data *rda; ++ call_rcu_func_t call; ++ int ncpus; ++ long: 32; ++ long: 64; ++ long: 64; ++ u8 boost; ++ long unsigned int gp_seq; ++ struct task_struct *gp_kthread; ++ struct swait_queue_head gp_wq; ++ short int gp_flags; ++ short int gp_state; ++ struct mutex barrier_mutex; ++ atomic_t barrier_cpu_count; ++ struct completion barrier_completion; ++ long unsigned int barrier_sequence; ++ struct mutex exp_mutex; ++ struct mutex exp_wake_mutex; ++ long unsigned int expedited_sequence; ++ atomic_t expedited_need_qs; ++ struct swait_queue_head expedited_wq; ++ int ncpus_snap; ++ long unsigned int jiffies_force_qs; ++ long unsigned int jiffies_kick_kthreads; ++ long unsigned int n_force_qs; ++ long unsigned int gp_start; ++ long unsigned int gp_activity; ++ long unsigned int gp_req_activity; ++ long unsigned int jiffies_stall; ++ long unsigned int jiffies_resched; ++ long unsigned int n_force_qs_gpstart; ++ long unsigned int gp_max; ++ const char *name; ++ char abbr; ++ struct list_head flavors; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t ofl_lock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++union rcu_noqs { ++ struct { ++ u8 norm; ++ u8 exp; ++ } b; ++ u16 s; ++}; ++ ++struct rcu_data { ++ long unsigned int gp_seq; ++ long unsigned int gp_seq_needed; ++ long unsigned int rcu_qs_ctr_snap; ++ union rcu_noqs cpu_no_qs; ++ bool core_needs_qs; ++ bool beenonline; ++ bool gpwrap; ++ struct rcu_node *mynode; ++ long unsigned int grpmask; ++ long unsigned int ticks_this_gp; ++ struct rcu_segcblist cblist; ++ long int qlen_last_fqs_check; ++ long unsigned int n_force_qs_snap; ++ long int blimit; ++ struct rcu_dynticks *dynticks; ++ int dynticks_snap; ++ long unsigned int dynticks_fqs; ++ long unsigned int cond_resched_completed; ++ struct callback_head barrier_head; ++ int exp_dynticks_snap; ++ struct callback_head *nocb_head; ++ struct callback_head **nocb_tail; ++ atomic_long_t nocb_q_count; ++ atomic_long_t nocb_q_count_lazy; ++ struct callback_head *nocb_follower_head; ++ struct callback_head **nocb_follower_tail; ++ struct swait_queue_head nocb_wq; ++ struct task_struct *nocb_kthread; ++ raw_spinlock_t nocb_lock; ++ int nocb_defer_wakeup; ++ struct timer_list nocb_timer; ++ struct callback_head *nocb_gp_head; ++ struct callback_head **nocb_gp_tail; ++ bool nocb_leader_sleep; ++ struct rcu_data *nocb_next_follower; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct rcu_data *nocb_leader; ++ unsigned int softirq_snap; ++ struct irq_work rcu_iw; ++ bool rcu_iw_pending; ++ long unsigned int rcu_iw_gp_seq; ++ long unsigned int rcu_ofl_gp_seq; ++ short int rcu_ofl_gp_flags; ++ long unsigned int rcu_onl_gp_seq; ++ short int rcu_onl_gp_flags; ++ int cpu; ++ struct rcu_state *rsp; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct patch_data { ++ struct klp_patch *patch; ++ atomic_t cpu_count; ++}; ++ ++struct klp_find_arg { ++ const char *objname; ++ const char *name; ++ long unsigned int addr; ++ long unsigned int count; ++ long unsigned int pos; ++}; ++ ++struct dma_devres { ++ size_t size; ++ void *vaddr; ++ dma_addr_t dma_handle; ++ long unsigned int attrs; ++}; ++ ++struct reserved_mem_ops; ++ ++struct reserved_mem { ++ const char *name; ++ long unsigned int fdt_node; ++ long unsigned int phandle; ++ const struct reserved_mem_ops *ops; ++ phys_addr_t base; ++ phys_addr_t size; ++ void *priv; ++}; ++ ++struct reserved_mem_ops { ++ int (*device_init)(struct reserved_mem *, struct device *); ++ void (*device_release)(struct reserved_mem *, struct device *); ++}; ++ ++typedef int (*reservedmem_of_init_fn)(struct reserved_mem *); ++ ++struct dma_coherent_mem { ++ void *virt_base; ++ dma_addr_t device_base; ++ long unsigned int pfn_base; ++ int size; ++ int flags; ++ long unsigned int *bitmap; ++ spinlock_t spinlock; ++ bool use_dev_dma_pfn_offset; ++}; ++ ++enum dma_sync_target { ++ SYNC_FOR_CPU = 0, ++ SYNC_FOR_DEVICE = 1, ++}; ++ ++struct trace_event_raw_swiotlb_bounced { ++ struct trace_entry ent; ++ u32 __data_loc_dev_name; ++ u64 dma_mask; ++ dma_addr_t dev_addr; ++ size_t size; ++ enum swiotlb_force swiotlb_force; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_swiotlb_bounced { ++ u32 dev_name; ++}; ++ ++enum kcmp_type { ++ KCMP_FILE = 0, ++ KCMP_VM = 1, ++ KCMP_FILES = 2, ++ KCMP_FS = 3, ++ KCMP_SIGHAND = 4, ++ KCMP_IO = 5, ++ KCMP_SYSVSEM = 6, ++ KCMP_EPOLL_TFD = 7, ++ KCMP_TYPES = 8, ++}; ++ ++struct kcmp_epoll_slot { ++ __u32 efd; ++ __u32 tfd; ++ __u32 toff; ++}; ++ ++enum profile_type { ++ PROFILE_TASK_EXIT = 0, ++ PROFILE_MUNMAP = 1, ++}; ++ ++struct profile_hit { ++ u32 pc; ++ u32 hits; ++}; ++ ++typedef __kernel_suseconds_t suseconds_t; ++ ++typedef __kernel_time_t time_t; ++ ++typedef __u64 timeu64_t; ++ ++struct itimerspec { ++ struct timespec it_interval; ++ struct timespec it_value; ++}; ++ ++struct __kernel_old_timeval { ++ __kernel_long_t tv_sec; ++ __kernel_long_t tv_usec; ++}; ++ ++struct itimerspec64 { ++ struct timespec64 it_interval; ++ struct timespec64 it_value; ++}; ++ ++struct timex { ++ unsigned int modes; ++ __kernel_long_t offset; ++ __kernel_long_t freq; ++ __kernel_long_t maxerror; ++ __kernel_long_t esterror; ++ int status; ++ __kernel_long_t constant; ++ __kernel_long_t precision; ++ __kernel_long_t tolerance; ++ struct timeval time; ++ __kernel_long_t tick; ++ __kernel_long_t ppsfreq; ++ __kernel_long_t jitter; ++ int shift; ++ __kernel_long_t stabil; ++ __kernel_long_t jitcnt; ++ __kernel_long_t calcnt; ++ __kernel_long_t errcnt; ++ __kernel_long_t stbcnt; ++ int tai; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct compat_itimerspec { ++ struct compat_timespec it_interval; ++ struct compat_timespec it_value; ++}; ++ ++struct compat_timex { ++ compat_uint_t modes; ++ compat_long_t offset; ++ compat_long_t freq; ++ compat_long_t maxerror; ++ compat_long_t esterror; ++ compat_int_t status; ++ compat_long_t constant; ++ compat_long_t precision; ++ compat_long_t tolerance; ++ struct compat_timeval time; ++ compat_long_t tick; ++ compat_long_t ppsfreq; ++ compat_long_t jitter; ++ compat_int_t shift; ++ compat_long_t stabil; ++ compat_long_t jitcnt; ++ compat_long_t calcnt; ++ compat_long_t errcnt; ++ compat_long_t stbcnt; ++ compat_int_t tai; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct itimerval { ++ struct timeval it_interval; ++ struct timeval it_value; ++}; ++ ++struct trace_event_raw_timer_class { ++ struct trace_entry ent; ++ void *timer; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_timer_start { ++ struct trace_entry ent; ++ void *timer; ++ void *function; ++ long unsigned int expires; ++ long unsigned int now; ++ unsigned int flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_timer_expire_entry { ++ struct trace_entry ent; ++ void *timer; ++ long unsigned int now; ++ void *function; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_hrtimer_init { ++ struct trace_entry ent; ++ void *hrtimer; ++ clockid_t clockid; ++ enum hrtimer_mode mode; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_hrtimer_start { ++ struct trace_entry ent; ++ void *hrtimer; ++ void *function; ++ s64 expires; ++ s64 softexpires; ++ enum hrtimer_mode mode; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_hrtimer_expire_entry { ++ struct trace_entry ent; ++ void *hrtimer; ++ s64 now; ++ void *function; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_hrtimer_class { ++ struct trace_entry ent; ++ void *hrtimer; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_itimer_state { ++ struct trace_entry ent; ++ int which; ++ long long unsigned int expires; ++ long int value_sec; ++ long int value_usec; ++ long int interval_sec; ++ long int interval_usec; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_itimer_expire { ++ struct trace_entry ent; ++ int which; ++ pid_t pid; ++ long long unsigned int now; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_tick_stop { ++ struct trace_entry ent; ++ int success; ++ int dependency; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_timer_class {}; ++ ++struct trace_event_data_offsets_timer_start {}; ++ ++struct trace_event_data_offsets_timer_expire_entry {}; ++ ++struct trace_event_data_offsets_hrtimer_init {}; ++ ++struct trace_event_data_offsets_hrtimer_start {}; ++ ++struct trace_event_data_offsets_hrtimer_expire_entry {}; ++ ++struct trace_event_data_offsets_hrtimer_class {}; ++ ++struct trace_event_data_offsets_itimer_state {}; ++ ++struct trace_event_data_offsets_itimer_expire {}; ++ ++struct trace_event_data_offsets_tick_stop {}; ++ ++struct timer_base { ++ raw_spinlock_t lock; ++ struct timer_list *running_timer; ++ long unsigned int clk; ++ long unsigned int next_expiry; ++ unsigned int cpu; ++ bool is_idle; ++ bool must_forward_clk; ++ long unsigned int pending_map[9]; ++ struct hlist_head vectors[576]; ++ long: 64; ++ long: 64; ++}; ++ ++struct process_timer { ++ struct timer_list timer; ++ struct task_struct *task; ++}; ++ ++struct system_time_snapshot { ++ u64 cycles; ++ ktime_t real; ++ ktime_t raw; ++ unsigned int clock_was_set_seq; ++ u8 cs_was_changed_seq; ++}; ++ ++struct system_device_crosststamp { ++ ktime_t device; ++ ktime_t sys_realtime; ++ ktime_t sys_monoraw; ++}; ++ ++struct system_counterval_t { ++ u64 cycles; ++ struct clocksource *cs; ++}; ++ ++enum timekeeping_adv_mode { ++ TK_ADV_TICK = 0, ++ TK_ADV_FREQ = 1, ++}; ++ ++struct tk_fast { ++ seqcount_t seq; ++ struct tk_read_base base[2]; ++}; ++ ++enum tick_nohz_mode { ++ NOHZ_MODE_INACTIVE = 0, ++ NOHZ_MODE_LOWRES = 1, ++ NOHZ_MODE_HIGHRES = 2, ++}; ++ ++struct tick_sched { ++ struct hrtimer sched_timer; ++ long unsigned int check_clocks; ++ enum tick_nohz_mode nohz_mode; ++ unsigned int inidle: 1; ++ unsigned int tick_stopped: 1; ++ unsigned int idle_active: 1; ++ unsigned int do_timer_last: 1; ++ unsigned int got_idle_tick: 1; ++ ktime_t last_tick; ++ ktime_t next_tick; ++ long unsigned int idle_jiffies; ++ long unsigned int idle_calls; ++ long unsigned int idle_sleeps; ++ ktime_t idle_entrytime; ++ ktime_t idle_waketime; ++ ktime_t idle_exittime; ++ ktime_t idle_sleeptime; ++ ktime_t iowait_sleeptime; ++ long unsigned int last_jiffies; ++ u64 timer_expires; ++ u64 timer_expires_base; ++ u64 next_timer; ++ ktime_t idle_expires; ++ atomic_t tick_dep_mask; ++}; ++ ++struct timer_list_iter { ++ int cpu; ++ bool second_pass; ++ u64 now; ++}; ++ ++struct tm { ++ int tm_sec; ++ int tm_min; ++ int tm_hour; ++ int tm_mday; ++ int tm_mon; ++ long int tm_year; ++ int tm_wday; ++ int tm_yday; ++}; ++ ++typedef __kernel_timer_t timer_t; ++ ++struct rtc_time { ++ int tm_sec; ++ int tm_min; ++ int tm_hour; ++ int tm_mday; ++ int tm_mon; ++ int tm_year; ++ int tm_wday; ++ int tm_yday; ++ int tm_isdst; ++}; ++ ++struct rtc_wkalrm { ++ unsigned char enabled; ++ unsigned char pending; ++ struct rtc_time time; ++}; ++ ++struct class_interface { ++ struct list_head node; ++ struct class *class; ++ int (*add_dev)(struct device *, struct class_interface *); ++ void (*remove_dev)(struct device *, struct class_interface *); ++}; ++ ++struct rtc_class_ops { ++ int (*ioctl)(struct device *, unsigned int, long unsigned int); ++ int (*read_time)(struct device *, struct rtc_time *); ++ int (*set_time)(struct device *, struct rtc_time *); ++ int (*read_alarm)(struct device *, struct rtc_wkalrm *); ++ int (*set_alarm)(struct device *, struct rtc_wkalrm *); ++ int (*proc)(struct device *, struct seq_file *); ++ int (*set_mmss64)(struct device *, time64_t); ++ int (*set_mmss)(struct device *, long unsigned int); ++ int (*read_callback)(struct device *, int); ++ int (*alarm_irq_enable)(struct device *, unsigned int); ++ int (*read_offset)(struct device *, long int *); ++ int (*set_offset)(struct device *, long int); ++}; ++ ++struct rtc_timer { ++ struct timerqueue_node node; ++ ktime_t period; ++ void (*func)(void *); ++ void *private_data; ++ int enabled; ++}; ++ ++struct nvmem_device; ++ ++struct rtc_device { ++ struct device dev; ++ struct module *owner; ++ int id; ++ const struct rtc_class_ops *ops; ++ struct mutex ops_lock; ++ struct cdev char_dev; ++ long unsigned int flags; ++ long unsigned int irq_data; ++ spinlock_t irq_lock; ++ wait_queue_head_t irq_queue; ++ struct fasync_struct *async_queue; ++ int irq_freq; ++ int max_user_freq; ++ struct timerqueue_head timerqueue; ++ struct rtc_timer aie_timer; ++ struct rtc_timer uie_rtctimer; ++ struct hrtimer pie_timer; ++ int pie_enabled; ++ struct work_struct irqwork; ++ int uie_unsupported; ++ long int set_offset_nsec; ++ bool registered; ++ struct nvmem_device *nvmem; ++ bool nvram_old_abi; ++ struct bin_attribute *nvram; ++ time64_t range_min; ++ timeu64_t range_max; ++ time64_t start_secs; ++ time64_t offset_secs; ++ bool set_start_time; ++}; ++ ++enum alarmtimer_type { ++ ALARM_REALTIME = 0, ++ ALARM_BOOTTIME = 1, ++ ALARM_NUMTYPE = 2, ++ ALARM_REALTIME_FREEZER = 3, ++ ALARM_BOOTTIME_FREEZER = 4, ++}; ++ ++enum alarmtimer_restart { ++ ALARMTIMER_NORESTART = 0, ++ ALARMTIMER_RESTART = 1, ++}; ++ ++struct alarm { ++ struct timerqueue_node node; ++ struct hrtimer timer; ++ enum alarmtimer_restart (*function)(struct alarm *, ktime_t); ++ enum alarmtimer_type type; ++ int state; ++ void *data; ++}; ++ ++struct property_entry; ++ ++struct platform_device_info { ++ struct device *parent; ++ struct fwnode_handle *fwnode; ++ const char *name; ++ int id; ++ const struct resource *res; ++ unsigned int num_res; ++ const void *data; ++ size_t size_data; ++ u64 dma_mask; ++ struct property_entry *properties; ++}; ++ ++struct cpu_timer_list { ++ struct list_head entry; ++ u64 expires; ++ u64 incr; ++ struct task_struct *task; ++ int firing; ++}; ++ ++struct k_clock; ++ ++struct k_itimer { ++ struct list_head list; ++ struct hlist_node t_hash; ++ spinlock_t it_lock; ++ const struct k_clock *kclock; ++ clockid_t it_clock; ++ timer_t it_id; ++ int it_active; ++ s64 it_overrun; ++ s64 it_overrun_last; ++ int it_requeue_pending; ++ int it_sigev_notify; ++ ktime_t it_interval; ++ struct signal_struct *it_signal; ++ union { ++ struct pid *it_pid; ++ struct task_struct *it_process; ++ }; ++ struct sigqueue *sigq; ++ union { ++ struct { ++ struct hrtimer timer; ++ } real; ++ struct cpu_timer_list cpu; ++ struct { ++ struct alarm alarmtimer; ++ } alarm; ++ struct callback_head rcu; ++ } it; ++}; ++ ++struct k_clock { ++ int (*clock_getres)(const clockid_t, struct timespec64 *); ++ int (*clock_set)(const clockid_t, const struct timespec64 *); ++ int (*clock_get)(const clockid_t, struct timespec64 *); ++ int (*clock_adj)(const clockid_t, struct timex *); ++ int (*timer_create)(struct k_itimer *); ++ int (*nsleep)(const clockid_t, int, const struct timespec64 *); ++ int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); ++ int (*timer_del)(struct k_itimer *); ++ void (*timer_get)(struct k_itimer *, struct itimerspec64 *); ++ void (*timer_rearm)(struct k_itimer *); ++ s64 (*timer_forward)(struct k_itimer *, ktime_t); ++ ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); ++ int (*timer_try_to_cancel)(struct k_itimer *); ++ void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); ++}; ++ ++struct trace_event_raw_alarmtimer_suspend { ++ struct trace_entry ent; ++ s64 expires; ++ unsigned char alarm_type; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_alarm_class { ++ struct trace_entry ent; ++ void *alarm; ++ unsigned char alarm_type; ++ s64 expires; ++ s64 now; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_alarmtimer_suspend {}; ++ ++struct trace_event_data_offsets_alarm_class {}; ++ ++struct alarm_base { ++ spinlock_t lock; ++ struct timerqueue_head timerqueue; ++ ktime_t (*gettime)(); ++ clockid_t base_clockid; ++}; ++ ++struct sigevent { ++ sigval_t sigev_value; ++ int sigev_signo; ++ int sigev_notify; ++ union { ++ int _pad[12]; ++ int _tid; ++ struct { ++ void (*_function)(sigval_t); ++ void *_attribute; ++ } _sigev_thread; ++ } _sigev_un; ++}; ++ ++typedef struct sigevent sigevent_t; ++ ++struct compat_sigevent { ++ compat_sigval_t sigev_value; ++ compat_int_t sigev_signo; ++ compat_int_t sigev_notify; ++ union { ++ compat_int_t _pad[13]; ++ compat_int_t _tid; ++ struct { ++ compat_uptr_t _function; ++ compat_uptr_t _attribute; ++ } _sigev_thread; ++ } _sigev_un; ++}; ++ ++typedef unsigned int uint; ++ ++struct posix_clock; ++ ++struct posix_clock_operations { ++ struct module *owner; ++ int (*clock_adjtime)(struct posix_clock *, struct timex *); ++ int (*clock_gettime)(struct posix_clock *, struct timespec64 *); ++ int (*clock_getres)(struct posix_clock *, struct timespec64 *); ++ int (*clock_settime)(struct posix_clock *, const struct timespec64 *); ++ long int (*ioctl)(struct posix_clock *, unsigned int, long unsigned int); ++ int (*open)(struct posix_clock *, fmode_t); ++ __poll_t (*poll)(struct posix_clock *, struct file *, poll_table *); ++ int (*release)(struct posix_clock *); ++ ssize_t (*read)(struct posix_clock *, uint, char *, size_t); ++}; ++ ++struct posix_clock { ++ struct posix_clock_operations ops; ++ struct cdev cdev; ++ struct device *dev; ++ struct rw_semaphore rwsem; ++ bool zombie; ++}; ++ ++struct posix_clock_desc { ++ struct file *fp; ++ struct posix_clock *clk; ++}; ++ ++struct compat_itimerval { ++ struct compat_timeval it_interval; ++ struct compat_timeval it_value; ++}; ++ ++typedef s64 int64_t; ++ ++struct ce_unbind { ++ struct clock_event_device *ce; ++ int res; ++}; ++ ++enum tick_broadcast_state { ++ TICK_BROADCAST_EXIT = 0, ++ TICK_BROADCAST_ENTER = 1, ++}; ++ ++enum tick_broadcast_mode { ++ TICK_BROADCAST_OFF = 0, ++ TICK_BROADCAST_ON = 1, ++ TICK_BROADCAST_FORCE = 2, ++}; ++ ++struct clock_read_data { ++ u64 epoch_ns; ++ u64 epoch_cyc; ++ u64 sched_clock_mask; ++ u64 (*read_sched_clock)(); ++ u32 mult; ++ u32 shift; ++}; ++ ++struct clock_data { ++ seqcount_t seq; ++ struct clock_read_data read_data[2]; ++ ktime_t wrap_kt; ++ long unsigned int rate; ++ u64 (*actual_read_sched_clock)(); ++}; ++ ++union futex_key { ++ struct { ++ u64 i_seq; ++ long unsigned int pgoff; ++ unsigned int offset; ++ } shared; ++ struct { ++ union { ++ struct mm_struct *mm; ++ u64 __tmp; ++ }; ++ long unsigned int address; ++ unsigned int offset; ++ } private; ++ struct { ++ u64 ptr; ++ long unsigned int word; ++ unsigned int offset; ++ } both; ++}; ++ ++struct futex_pi_state { ++ struct list_head list; ++ struct rt_mutex pi_mutex; ++ struct task_struct *owner; ++ atomic_t refcount; ++ union futex_key key; ++}; ++ ++struct futex_q { ++ struct plist_node list; ++ struct task_struct *task; ++ spinlock_t *lock_ptr; ++ union futex_key key; ++ struct futex_pi_state *pi_state; ++ struct rt_mutex_waiter *rt_waiter; ++ union futex_key *requeue_pi_key; ++ u32 bitset; ++}; ++ ++struct futex_hash_bucket { ++ atomic_t waiters; ++ spinlock_t lock; ++ struct plist_head chain; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++enum futex_access { ++ FUTEX_READ = 0, ++ FUTEX_WRITE = 1, ++}; ++ ++enum { ++ CSD_FLAG_LOCK = 1, ++ CSD_FLAG_SYNCHRONOUS = 2, ++}; ++ ++struct call_function_data { ++ call_single_data_t *csd; ++ cpumask_var_t cpumask; ++ cpumask_var_t cpumask_ipi; ++}; ++ ++struct smp_call_on_cpu_struct { ++ struct work_struct work; ++ struct completion done; ++ int (*func)(void *); ++ void *data; ++ int ret; ++ int cpu; ++}; ++ ++struct latch_tree_root { ++ seqcount_t seq; ++ struct rb_root tree[2]; ++}; ++ ++struct latch_tree_ops { ++ bool (*less)(struct latch_tree_node *, struct latch_tree_node *); ++ int (*comp)(void *, struct latch_tree_node *); ++}; ++ ++struct module_use { ++ struct list_head source_list; ++ struct list_head target_list; ++ struct module *source; ++ struct module *target; ++}; ++ ++struct module_sect_attr { ++ struct module_attribute mattr; ++ char *name; ++ long unsigned int address; ++}; ++ ++struct module_sect_attrs { ++ struct attribute_group grp; ++ unsigned int nsections; ++ struct module_sect_attr attrs[0]; ++}; ++ ++struct module_notes_attrs { ++ struct kobject *dir; ++ unsigned int notes; ++ struct bin_attribute attrs[0]; ++}; ++ ++struct symsearch { ++ const struct kernel_symbol *start; ++ const struct kernel_symbol *stop; ++ const s32 *crcs; ++ enum { ++ NOT_GPL_ONLY = 0, ++ GPL_ONLY = 1, ++ WILL_BE_GPL_ONLY = 2, ++ } licence; ++ bool unused; ++}; ++ ++enum kernel_read_file_id { ++ READING_UNKNOWN = 0, ++ READING_FIRMWARE = 1, ++ READING_FIRMWARE_PREALLOC_BUFFER = 2, ++ READING_MODULE = 3, ++ READING_KEXEC_IMAGE = 4, ++ READING_KEXEC_INITRAMFS = 5, ++ READING_POLICY = 6, ++ READING_X509_CERTIFICATE = 7, ++ READING_MAX_ID = 8, ++}; ++ ++enum kernel_load_data_id { ++ LOADING_UNKNOWN = 0, ++ LOADING_FIRMWARE = 1, ++ LOADING_FIRMWARE_PREALLOC_BUFFER = 2, ++ LOADING_MODULE = 3, ++ LOADING_KEXEC_IMAGE = 4, ++ LOADING_KEXEC_INITRAMFS = 5, ++ LOADING_POLICY = 6, ++ LOADING_X509_CERTIFICATE = 7, ++ LOADING_MAX_ID = 8, ++}; ++ ++struct load_info { ++ const char *name; ++ struct module *mod; ++ Elf64_Ehdr *hdr; ++ long unsigned int len; ++ Elf64_Shdr *sechdrs; ++ char *secstrings; ++ char *strtab; ++ long unsigned int symoffs; ++ long unsigned int stroffs; ++ struct _ddebug *debug; ++ unsigned int num_debug; ++ bool sig_ok; ++ long unsigned int mod_kallsyms_init_off; ++ struct { ++ unsigned int sym; ++ unsigned int str; ++ unsigned int mod; ++ unsigned int vers; ++ unsigned int info; ++ unsigned int pcpu; ++ } index; ++}; ++ ++struct trace_event_raw_module_load { ++ struct trace_entry ent; ++ unsigned int taints; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_module_free { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_module_refcnt { ++ struct trace_entry ent; ++ long unsigned int ip; ++ int refcnt; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_module_request { ++ struct trace_entry ent; ++ long unsigned int ip; ++ bool wait; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_module_load { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_module_free { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_module_refcnt { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_module_request { ++ u32 name; ++}; ++ ++struct mod_tree_root { ++ struct latch_tree_root root; ++ long unsigned int addr_min; ++ long unsigned int addr_max; ++}; ++ ++struct find_symbol_arg { ++ const char *name; ++ bool gplok; ++ bool warn; ++ struct module *owner; ++ const s32 *crc; ++ const struct kernel_symbol *sym; ++}; ++ ++struct mod_initfree { ++ struct callback_head rcu; ++ void *module_init; ++}; ++ ++enum key_being_used_for { ++ VERIFYING_MODULE_SIGNATURE = 0, ++ VERIFYING_FIRMWARE_SIGNATURE = 1, ++ VERIFYING_KEXEC_PE_SIGNATURE = 2, ++ VERIFYING_KEY_SIGNATURE = 3, ++ VERIFYING_KEY_SELF_SIGNATURE = 4, ++ VERIFYING_UNSPECIFIED_SIGNATURE = 5, ++ NR__KEY_BEING_USED_FOR = 6, ++}; ++ ++struct asymmetric_key_subtype; ++ ++enum pkey_id_type { ++ PKEY_ID_PGP = 0, ++ PKEY_ID_X509 = 1, ++ PKEY_ID_PKCS7 = 2, ++}; ++ ++struct module_signature { ++ u8 algo; ++ u8 hash; ++ u8 id_type; ++ u8 signer_len; ++ u8 key_id_len; ++ u8 __pad[3]; ++ __be32 sig_len; ++}; ++ ++struct kallsym_iter { ++ loff_t pos; ++ loff_t pos_arch_end; ++ loff_t pos_mod_end; ++ loff_t pos_ftrace_mod_end; ++ long unsigned int value; ++ unsigned int nameoff; ++ char type; ++ char name[128]; ++ char module_name[56]; ++ int exported; ++ int show_value; ++}; ++ ++typedef struct { ++ int val[2]; ++} __kernel_fsid_t; ++ ++enum { ++ SB_UNFROZEN = 0, ++ SB_FREEZE_WRITE = 1, ++ SB_FREEZE_PAGEFAULT = 2, ++ SB_FREEZE_FS = 3, ++ SB_FREEZE_COMPLETE = 4, ++}; ++ ++struct kstatfs { ++ long int f_type; ++ long int f_bsize; ++ u64 f_blocks; ++ u64 f_bfree; ++ u64 f_bavail; ++ u64 f_files; ++ u64 f_ffree; ++ __kernel_fsid_t f_fsid; ++ long int f_namelen; ++ long int f_frsize; ++ long int f_flags; ++ long int f_spare[4]; ++}; ++ ++typedef __u16 comp_t; ++ ++struct acct_v3 { ++ char ac_flag; ++ char ac_version; ++ __u16 ac_tty; ++ __u32 ac_exitcode; ++ __u32 ac_uid; ++ __u32 ac_gid; ++ __u32 ac_pid; ++ __u32 ac_ppid; ++ __u32 ac_btime; ++ __u32 ac_etime; ++ comp_t ac_utime; ++ comp_t ac_stime; ++ comp_t ac_mem; ++ comp_t ac_io; ++ comp_t ac_rw; ++ comp_t ac_minflt; ++ comp_t ac_majflt; ++ comp_t ac_swaps; ++ char ac_comm[16]; ++}; ++ ++typedef struct acct_v3 acct_t; ++ ++struct fs_pin { ++ wait_queue_head_t wait; ++ int done; ++ struct hlist_node s_list; ++ struct hlist_node m_list; ++ void (*kill)(struct fs_pin *); ++}; ++ ++struct bsd_acct_struct { ++ struct fs_pin pin; ++ atomic_long_t count; ++ struct callback_head rcu; ++ struct mutex lock; ++ int active; ++ long unsigned int needcheck; ++ struct file *file; ++ struct pid_namespace *ns; ++ struct work_struct work; ++ struct completion done; ++}; ++ ++enum compound_dtor_id { ++ NULL_COMPOUND_DTOR = 0, ++ COMPOUND_PAGE_DTOR = 1, ++ HUGETLB_PAGE_DTOR = 2, ++ TRANSHUGE_PAGE_DTOR = 3, ++ NR_COMPOUND_DTORS = 4, ++}; ++ ++struct elf64_note { ++ Elf64_Word n_namesz; ++ Elf64_Word n_descsz; ++ Elf64_Word n_type; ++}; ++ ++typedef long unsigned int elf_greg_t; ++ ++typedef elf_greg_t elf_gregset_t[34]; ++ ++struct elf_siginfo { ++ int si_signo; ++ int si_code; ++ int si_errno; ++}; ++ ++struct elf_prstatus { ++ struct elf_siginfo pr_info; ++ short int pr_cursig; ++ long unsigned int pr_sigpend; ++ long unsigned int pr_sighold; ++ pid_t pr_pid; ++ pid_t pr_ppid; ++ pid_t pr_pgrp; ++ pid_t pr_sid; ++ struct timeval pr_utime; ++ struct timeval pr_stime; ++ struct timeval pr_cutime; ++ struct timeval pr_cstime; ++ elf_gregset_t pr_reg; ++ int pr_fpvalid; ++}; ++ ++struct compat_kexec_segment { ++ compat_uptr_t buf; ++ compat_size_t bufsz; ++ compat_ulong_t mem; ++ compat_size_t memsz; ++}; ++ ++enum migrate_reason { ++ MR_COMPACTION = 0, ++ MR_MEMORY_FAILURE = 1, ++ MR_MEMORY_HOTPLUG = 2, ++ MR_SYSCALL = 3, ++ MR_MEMPOLICY_MBIND = 4, ++ MR_NUMA_MISPLACED = 5, ++ MR_CONTIG_RANGE = 6, ++ MR_TYPES = 7, ++}; ++ ++enum kernfs_node_type { ++ KERNFS_DIR = 1, ++ KERNFS_FILE = 2, ++ KERNFS_LINK = 4, ++}; ++ ++enum kernfs_root_flag { ++ KERNFS_ROOT_CREATE_DEACTIVATED = 1, ++ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, ++ KERNFS_ROOT_SUPPORT_EXPORTOP = 4, ++}; ++ ++enum { ++ CGRP_NOTIFY_ON_RELEASE = 0, ++ CGRP_CPUSET_CLONE_CHILDREN = 1, ++}; ++ ++enum { ++ CGRP_ROOT_NOPREFIX = 2, ++ CGRP_ROOT_XATTR = 4, ++ CGRP_ROOT_NS_DELEGATE = 8, ++ CGRP_ROOT_CPUSET_V2_MODE = 16, ++}; ++ ++struct cgroup_taskset { ++ struct list_head src_csets; ++ struct list_head dst_csets; ++ int nr_tasks; ++ int ssid; ++ struct list_head *csets; ++ struct css_set *cur_cset; ++ struct task_struct *cur_task; ++}; ++ ++struct css_task_iter { ++ struct cgroup_subsys *ss; ++ unsigned int flags; ++ struct list_head *cset_pos; ++ struct list_head *cset_head; ++ struct list_head *tcset_pos; ++ struct list_head *tcset_head; ++ struct list_head *task_pos; ++ struct list_head *tasks_head; ++ struct list_head *mg_tasks_head; ++ struct list_head *dying_tasks_head; ++ struct list_head *cur_tasks_head; ++ struct css_set *cur_cset; ++ struct css_set *cur_dcset; ++ struct task_struct *cur_task; ++ struct list_head iters_node; ++}; ++ ++struct cgrp_cset_link { ++ struct cgroup *cgrp; ++ struct css_set *cset; ++ struct list_head cset_link; ++ struct list_head cgrp_link; ++}; ++ ++struct cgroup_mgctx { ++ struct list_head preloaded_src_csets; ++ struct list_head preloaded_dst_csets; ++ struct cgroup_taskset tset; ++ u16 ss_mask; ++}; ++ ++struct cgroup_sb_opts { ++ u16 subsys_mask; ++ unsigned int flags; ++ char *release_agent; ++ bool cpuset_clone_children; ++ char *name; ++ bool none; ++}; ++ ++struct trace_event_raw_cgroup_root { ++ struct trace_entry ent; ++ int root; ++ u16 ss_mask; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_cgroup { ++ struct trace_entry ent; ++ int root; ++ int id; ++ int level; ++ u32 __data_loc_path; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_cgroup_migrate { ++ struct trace_entry ent; ++ int dst_root; ++ int dst_id; ++ int dst_level; ++ int pid; ++ u32 __data_loc_dst_path; ++ u32 __data_loc_comm; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_cgroup_root { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_cgroup { ++ u32 path; ++}; ++ ++struct trace_event_data_offsets_cgroup_migrate { ++ u32 dst_path; ++ u32 comm; ++}; ++ ++struct cgroupstats { ++ __u64 nr_sleeping; ++ __u64 nr_running; ++ __u64 nr_stopped; ++ __u64 nr_uninterruptible; ++ __u64 nr_io_wait; ++}; ++ ++enum cgroup_filetype { ++ CGROUP_FILE_PROCS = 0, ++ CGROUP_FILE_TASKS = 1, ++}; ++ ++struct cgroup_pidlist { ++ struct { ++ enum cgroup_filetype type; ++ struct pid_namespace *ns; ++ } key; ++ pid_t *list; ++ int length; ++ struct list_head links; ++ struct cgroup *owner; ++ struct delayed_work destroy_dwork; ++}; ++ ++enum freezer_state_flags { ++ CGROUP_FREEZER_ONLINE = 1, ++ CGROUP_FREEZING_SELF = 2, ++ CGROUP_FREEZING_PARENT = 4, ++ CGROUP_FROZEN = 8, ++ CGROUP_FREEZING = 6, ++}; ++ ++struct freezer { ++ struct cgroup_subsys_state css; ++ unsigned int state; ++}; ++ ++struct pids_cgroup { ++ struct cgroup_subsys_state css; ++ atomic64_t counter; ++ atomic64_t limit; ++ struct cgroup_file events_file; ++ atomic64_t events_limit; ++}; ++ ++typedef struct { ++ char *from; ++ char *to; ++} substring_t; ++ ++enum rdmacg_resource_type { ++ RDMACG_RESOURCE_HCA_HANDLE = 0, ++ RDMACG_RESOURCE_HCA_OBJECT = 1, ++ RDMACG_RESOURCE_MAX = 2, ++}; ++ ++struct rdma_cgroup { ++ struct cgroup_subsys_state css; ++ struct list_head rpools; ++}; ++ ++struct rdmacg_device { ++ struct list_head dev_node; ++ struct list_head rpools; ++ char *name; ++}; ++ ++enum rdmacg_file_type { ++ RDMACG_RESOURCE_TYPE_MAX = 0, ++ RDMACG_RESOURCE_TYPE_STAT = 1, ++}; ++ ++struct rdmacg_resource { ++ int max; ++ int usage; ++}; ++ ++struct rdmacg_resource_pool { ++ struct rdmacg_device *device; ++ struct rdmacg_resource resources[2]; ++ struct list_head cg_node; ++ struct list_head dev_node; ++ u64 usage_sum; ++ int num_max_cnt; ++}; ++ ++struct fmeter { ++ int cnt; ++ int val; ++ time64_t time; ++ spinlock_t lock; ++}; ++ ++struct cpuset { ++ struct cgroup_subsys_state css; ++ long unsigned int flags; ++ cpumask_var_t cpus_allowed; ++ nodemask_t mems_allowed; ++ cpumask_var_t effective_cpus; ++ nodemask_t effective_mems; ++ nodemask_t old_mems_allowed; ++ struct fmeter fmeter; ++ int attach_in_progress; ++ int pn; ++ int relax_domain_level; ++}; ++ ++typedef enum { ++ CS_ONLINE = 0, ++ CS_CPU_EXCLUSIVE = 1, ++ CS_MEM_EXCLUSIVE = 2, ++ CS_MEM_HARDWALL = 3, ++ CS_MEMORY_MIGRATE = 4, ++ CS_SCHED_LOAD_BALANCE = 5, ++ CS_SPREAD_PAGE = 6, ++ CS_SPREAD_SLAB = 7, ++} cpuset_flagbits_t; ++ ++struct cpuset_migrate_mm_work { ++ struct work_struct work; ++ struct mm_struct *mm; ++ nodemask_t from; ++ nodemask_t to; ++}; ++ ++typedef enum { ++ FILE_MEMORY_MIGRATE = 0, ++ FILE_CPULIST = 1, ++ FILE_MEMLIST = 2, ++ FILE_EFFECTIVE_CPULIST = 3, ++ FILE_EFFECTIVE_MEMLIST = 4, ++ FILE_CPU_EXCLUSIVE = 5, ++ FILE_MEM_EXCLUSIVE = 6, ++ FILE_MEM_HARDWALL = 7, ++ FILE_SCHED_LOAD_BALANCE = 8, ++ FILE_SCHED_RELAX_DOMAIN_LEVEL = 9, ++ FILE_MEMORY_PRESSURE_ENABLED = 10, ++ FILE_MEMORY_PRESSURE = 11, ++ FILE_SPREAD_PAGE = 12, ++ FILE_SPREAD_SLAB = 13, ++} cpuset_filetype_t; ++ ++struct key_preparsed_payload { ++ char *description; ++ union key_payload payload; ++ const void *data; ++ size_t datalen; ++ size_t quotalen; ++ time64_t expiry; ++}; ++ ++struct key_match_data { ++ bool (*cmp)(const struct key *, const struct key_match_data *); ++ const void *raw_data; ++ void *preparsed; ++ unsigned int lookup_type; ++}; ++ ++struct idmap_key { ++ bool map_up; ++ u32 id; ++ u32 count; ++}; ++ ++struct cpu_stop_done { ++ atomic_t nr_todo; ++ int ret; ++ struct completion completion; ++}; ++ ++struct cpu_stopper { ++ struct task_struct *thread; ++ raw_spinlock_t lock; ++ bool enabled; ++ struct list_head works; ++ struct cpu_stop_work stop_work; ++}; ++ ++enum multi_stop_state { ++ MULTI_STOP_NONE = 0, ++ MULTI_STOP_PREPARE = 1, ++ MULTI_STOP_DISABLE_IRQ = 2, ++ MULTI_STOP_RUN = 3, ++ MULTI_STOP_EXIT = 4, ++}; ++ ++struct multi_stop_data { ++ cpu_stop_fn_t fn; ++ void *data; ++ unsigned int num_threads; ++ const struct cpumask *active_cpus; ++ enum multi_stop_state state; ++ atomic_t thread_ack; ++}; ++ ++struct cpu_vfs_cap_data { ++ __u32 magic_etc; ++ kernel_cap_t permitted; ++ kernel_cap_t inheritable; ++}; ++ ++enum audit_nlgrps { ++ AUDIT_NLGRP_NONE = 0, ++ AUDIT_NLGRP_READLOG = 1, ++ __AUDIT_NLGRP_MAX = 2, ++}; ++ ++struct audit_status { ++ __u32 mask; ++ __u32 enabled; ++ __u32 failure; ++ __u32 pid; ++ __u32 rate_limit; ++ __u32 backlog_limit; ++ __u32 lost; ++ __u32 backlog; ++ union { ++ __u32 version; ++ __u32 feature_bitmap; ++ }; ++ __u32 backlog_wait_time; ++}; ++ ++struct audit_features { ++ __u32 vers; ++ __u32 mask; ++ __u32 features; ++ __u32 lock; ++}; ++ ++struct audit_tty_status { ++ __u32 enabled; ++ __u32 log_passwd; ++}; ++ ++struct audit_sig_info { ++ uid_t uid; ++ pid_t pid; ++ char ctx[0]; ++}; ++ ++struct netlink_kernel_cfg { ++ unsigned int groups; ++ unsigned int flags; ++ void (*input)(struct sk_buff *); ++ struct mutex *cb_mutex; ++ int (*bind)(struct net *, int); ++ void (*unbind)(struct net *, int); ++ bool (*compare)(struct net *, struct sock *); ++}; ++ ++struct audit_netlink_list { ++ __u32 portid; ++ struct net *net; ++ struct sk_buff_head q; ++}; ++ ++struct audit_net { ++ struct sock *sk; ++}; ++ ++struct auditd_connection { ++ struct pid *pid; ++ u32 portid; ++ struct net *net; ++ struct callback_head rcu; ++}; ++ ++struct audit_ctl_mutex { ++ struct mutex lock; ++ void *owner; ++}; ++ ++struct audit_buffer { ++ struct sk_buff *skb; ++ struct audit_context *ctx; ++ gfp_t gfp_mask; ++}; ++ ++struct audit_reply { ++ __u32 portid; ++ struct net *net; ++ struct sk_buff *skb; ++}; ++ ++enum { ++ Audit_equal = 0, ++ Audit_not_equal = 1, ++ Audit_bitmask = 2, ++ Audit_bittest = 3, ++ Audit_lt = 4, ++ Audit_gt = 5, ++ Audit_le = 6, ++ Audit_ge = 7, ++ Audit_bad = 8, ++}; ++ ++struct audit_rule_data { ++ __u32 flags; ++ __u32 action; ++ __u32 field_count; ++ __u32 mask[64]; ++ __u32 fields[64]; ++ __u32 values[64]; ++ __u32 fieldflags[64]; ++ __u32 buflen; ++ char buf[0]; ++}; ++ ++struct audit_field; ++ ++struct audit_watch; ++ ++struct audit_tree; ++ ++struct audit_fsnotify_mark; ++ ++struct audit_krule { ++ u32 pflags; ++ u32 flags; ++ u32 listnr; ++ u32 action; ++ u32 mask[64]; ++ u32 buflen; ++ u32 field_count; ++ char *filterkey; ++ struct audit_field *fields; ++ struct audit_field *arch_f; ++ struct audit_field *inode_f; ++ struct audit_watch *watch; ++ struct audit_tree *tree; ++ struct audit_fsnotify_mark *exe; ++ struct list_head rlist; ++ struct list_head list; ++ u64 prio; ++}; ++ ++struct audit_field { ++ u32 type; ++ union { ++ u32 val; ++ kuid_t uid; ++ kgid_t gid; ++ struct { ++ char *lsm_str; ++ void *lsm_rule; ++ }; ++ }; ++ u32 op; ++}; ++ ++struct audit_entry { ++ struct list_head list; ++ struct callback_head rcu; ++ struct audit_krule rule; ++}; ++ ++struct audit_buffer___2; ++ ++typedef int __kernel_key_t; ++ ++typedef __kernel_key_t key_t; ++ ++struct kern_ipc_perm { ++ spinlock_t lock; ++ bool deleted; ++ int id; ++ key_t key; ++ kuid_t uid; ++ kgid_t gid; ++ kuid_t cuid; ++ kgid_t cgid; ++ umode_t mode; ++ long unsigned int seq; ++ void *security; ++ struct rhash_head khtnode; ++ struct callback_head rcu; ++ refcount_t refcount; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++typedef struct fsnotify_mark_connector *fsnotify_connp_t; ++ ++struct fsnotify_mark_connector { ++ spinlock_t lock; ++ unsigned int type; ++ union { ++ fsnotify_connp_t *obj; ++ struct fsnotify_mark_connector *destroy_next; ++ }; ++ struct hlist_head list; ++}; ++ ++enum fsnotify_obj_type { ++ FSNOTIFY_OBJ_TYPE_INODE = 0, ++ FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, ++ FSNOTIFY_OBJ_TYPE_COUNT = 2, ++ FSNOTIFY_OBJ_TYPE_DETACHED = 2, ++}; ++ ++struct audit_aux_data { ++ struct audit_aux_data *next; ++ int type; ++}; ++ ++struct audit_chunk; ++ ++struct audit_tree_refs { ++ struct audit_tree_refs *next; ++ struct audit_chunk *c[31]; ++}; ++ ++struct audit_aux_data_pids { ++ struct audit_aux_data d; ++ pid_t target_pid[16]; ++ kuid_t target_auid[16]; ++ kuid_t target_uid[16]; ++ unsigned int target_sessionid[16]; ++ u32 target_sid[16]; ++ char target_comm[256]; ++ int pid_count; ++}; ++ ++struct audit_aux_data_bprm_fcaps { ++ struct audit_aux_data d; ++ struct audit_cap_data fcap; ++ unsigned int fcap_ver; ++ struct audit_cap_data old_pcap; ++ struct audit_cap_data new_pcap; ++}; ++ ++struct audit_parent; ++ ++struct audit_watch { ++ refcount_t count; ++ dev_t dev; ++ char *path; ++ long unsigned int ino; ++ struct audit_parent *parent; ++ struct list_head wlist; ++ struct list_head rules; ++}; ++ ++struct fsnotify_group; ++ ++struct fsnotify_iter_info; ++ ++struct fsnotify_mark; ++ ++struct fsnotify_event; ++ ++struct fsnotify_ops { ++ int (*handle_event)(struct fsnotify_group *, struct inode *, u32, const void *, int, const unsigned char *, u32, struct fsnotify_iter_info *); ++ void (*free_group_priv)(struct fsnotify_group *); ++ void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); ++ void (*free_event)(struct fsnotify_event *); ++ void (*free_mark)(struct fsnotify_mark *); ++}; ++ ++struct inotify_group_private_data { ++ spinlock_t idr_lock; ++ struct idr idr; ++ struct ucounts *ucounts; ++}; ++ ++struct fanotify_group_private_data { ++ struct list_head access_list; ++ wait_queue_head_t access_waitq; ++ int f_flags; ++ unsigned int max_marks; ++ struct user_struct *user; ++ bool audit; ++}; ++ ++struct fsnotify_group { ++ const struct fsnotify_ops *ops; ++ refcount_t refcnt; ++ spinlock_t notification_lock; ++ struct list_head notification_list; ++ wait_queue_head_t notification_waitq; ++ unsigned int q_len; ++ unsigned int max_events; ++ unsigned int priority; ++ bool shutdown; ++ struct mutex mark_mutex; ++ atomic_t num_marks; ++ atomic_t user_waits; ++ struct list_head marks_list; ++ struct fasync_struct *fsn_fa; ++ struct fsnotify_event *overflow_event; ++ struct mem_cgroup *memcg; ++ union { ++ void *private; ++ struct inotify_group_private_data inotify_data; ++ struct fanotify_group_private_data fanotify_data; ++ }; ++}; ++ ++struct fsnotify_iter_info { ++ struct fsnotify_mark *marks[2]; ++ unsigned int report_mask; ++ int srcu_idx; ++}; ++ ++struct fsnotify_mark { ++ __u32 mask; ++ refcount_t refcnt; ++ struct fsnotify_group *group; ++ struct list_head g_list; ++ spinlock_t lock; ++ struct hlist_node obj_list; ++ struct fsnotify_mark_connector *connector; ++ __u32 ignored_mask; ++ unsigned int flags; ++}; ++ ++struct fsnotify_event { ++ struct list_head list; ++ struct inode *inode; ++ u32 mask; ++}; ++ ++struct audit_parent { ++ struct list_head watches; ++ struct fsnotify_mark mark; ++}; ++ ++struct audit_fsnotify_mark { ++ dev_t dev; ++ long unsigned int ino; ++ char *path; ++ struct fsnotify_mark mark; ++ struct audit_krule *rule; ++}; ++ ++struct audit_chunk___2; ++ ++struct audit_tree { ++ refcount_t count; ++ int goner; ++ struct audit_chunk___2 *root; ++ struct list_head chunks; ++ struct list_head rules; ++ struct list_head list; ++ struct list_head same_root; ++ struct callback_head head; ++ char pathname[0]; ++}; ++ ++struct node___2 { ++ struct list_head list; ++ struct audit_tree *owner; ++ unsigned int index; ++}; ++ ++struct audit_chunk___2 { ++ struct list_head hash; ++ long unsigned int key; ++ struct fsnotify_mark mark; ++ struct list_head trees; ++ int dead; ++ int count; ++ atomic_long_t refs; ++ struct callback_head head; ++ struct node___2 owners[0]; ++}; ++ ++enum { ++ HASH_SIZE = 128, ++}; ++ ++struct kprobe_blacklist_entry { ++ struct list_head list; ++ long unsigned int start_addr; ++ long unsigned int end_addr; ++}; ++ ++struct kprobe_insn_page { ++ struct list_head list; ++ kprobe_opcode_t *insns; ++ struct kprobe_insn_cache *cache; ++ int nused; ++ int ngarbage; ++ char slot_used[0]; ++}; ++ ++enum kprobe_slot_state { ++ SLOT_CLEAN = 0, ++ SLOT_DIRTY = 1, ++ SLOT_USED = 2, ++}; ++ ++enum { ++ KDB_NOT_INITIALIZED = 0, ++ KDB_INIT_EARLY = 1, ++ KDB_INIT_FULL = 2, ++}; ++ ++struct kgdb_state { ++ int ex_vector; ++ int signo; ++ int err_code; ++ int cpu; ++ int pass_exception; ++ long unsigned int thr_query; ++ long unsigned int threadid; ++ long int kgdb_usethreadid; ++ struct pt_regs *linux_regs; ++ atomic_t *send_ready; ++}; ++ ++struct debuggerinfo_struct { ++ void *debuggerinfo; ++ struct task_struct *task; ++ int exception_state; ++ int ret_state; ++ int irq_depth; ++ int enter_kgdb; ++}; ++ ++struct _kdb_bp { ++ long unsigned int bp_addr; ++ unsigned int bp_free: 1; ++ unsigned int bp_enabled: 1; ++ unsigned int bp_type: 4; ++ unsigned int bp_installed: 1; ++ unsigned int bp_delay: 1; ++ unsigned int bp_delayed: 1; ++ unsigned int bph_length; ++}; ++ ++typedef struct _kdb_bp kdb_bp_t; ++ ++typedef enum { ++ KDB_ENABLE_ALL = 1, ++ KDB_ENABLE_MEM_READ = 2, ++ KDB_ENABLE_MEM_WRITE = 4, ++ KDB_ENABLE_REG_READ = 8, ++ KDB_ENABLE_REG_WRITE = 16, ++ KDB_ENABLE_INSPECT = 32, ++ KDB_ENABLE_FLOW_CTRL = 64, ++ KDB_ENABLE_SIGNAL = 128, ++ KDB_ENABLE_REBOOT = 256, ++ KDB_ENABLE_ALWAYS_SAFE = 512, ++ KDB_ENABLE_MASK = 1023, ++ KDB_ENABLE_ALL_NO_ARGS = 1024, ++ KDB_ENABLE_MEM_READ_NO_ARGS = 2048, ++ KDB_ENABLE_MEM_WRITE_NO_ARGS = 4096, ++ KDB_ENABLE_REG_READ_NO_ARGS = 8192, ++ KDB_ENABLE_REG_WRITE_NO_ARGS = 16384, ++ KDB_ENABLE_INSPECT_NO_ARGS = 32768, ++ KDB_ENABLE_FLOW_CTRL_NO_ARGS = 65536, ++ KDB_ENABLE_SIGNAL_NO_ARGS = 131072, ++ KDB_ENABLE_REBOOT_NO_ARGS = 262144, ++ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = 524288, ++ KDB_ENABLE_MASK_NO_ARGS = 1047552, ++ KDB_REPEAT_NO_ARGS = 1073741824, ++ KDB_REPEAT_WITH_ARGS = 2147483648, ++} kdb_cmdflags_t; ++ ++typedef int (*kdb_func_t)(int, const char **); ++ ++typedef enum { ++ KDB_REASON_ENTER = 1, ++ KDB_REASON_ENTER_SLAVE = 2, ++ KDB_REASON_BREAK = 3, ++ KDB_REASON_DEBUG = 4, ++ KDB_REASON_OOPS = 5, ++ KDB_REASON_SWITCH = 6, ++ KDB_REASON_KEYBOARD = 7, ++ KDB_REASON_NMI = 8, ++ KDB_REASON_RECURSE = 9, ++ KDB_REASON_SSTEP = 10, ++ KDB_REASON_SYSTEM_NMI = 11, ++} kdb_reason_t; ++ ++struct __ksymtab { ++ long unsigned int value; ++ const char *mod_name; ++ long unsigned int mod_start; ++ long unsigned int mod_end; ++ const char *sec_name; ++ long unsigned int sec_start; ++ long unsigned int sec_end; ++ const char *sym_name; ++ long unsigned int sym_start; ++ long unsigned int sym_end; ++}; ++ ++typedef struct __ksymtab kdb_symtab_t; ++ ++struct _kdbtab { ++ char *cmd_name; ++ kdb_func_t cmd_func; ++ char *cmd_usage; ++ char *cmd_help; ++ short int cmd_minlen; ++ kdb_cmdflags_t cmd_flags; ++}; ++ ++typedef struct _kdbtab kdbtab_t; ++ ++typedef enum { ++ KDB_DB_BPT = 0, ++ KDB_DB_SS = 1, ++ KDB_DB_SSBPT = 2, ++ KDB_DB_NOBPT = 3, ++} kdb_dbtrap_t; ++ ++struct _kdbmsg { ++ int km_diag; ++ char *km_msg; ++}; ++ ++typedef struct _kdbmsg kdbmsg_t; ++ ++struct defcmd_set { ++ int count; ++ int usable; ++ char *name; ++ char *usage; ++ char *help; ++ char **command; ++}; ++ ++struct debug_alloc_header { ++ u32 next; ++ u32 size; ++ void *caller; ++}; ++ ++typedef short unsigned int u_short; ++ ++struct seccomp_filter { ++ refcount_t usage; ++ bool log; ++ struct seccomp_filter *prev; ++ struct bpf_prog *prog; ++}; ++ ++struct seccomp_metadata { ++ __u64 filter_off; ++ __u64 flags; ++}; ++ ++struct sock_fprog { ++ short unsigned int len; ++ struct sock_filter *filter; ++}; ++ ++struct compat_sock_fprog { ++ u16 len; ++ compat_uptr_t filter; ++}; ++ ++struct seccomp_log_name { ++ u32 log; ++ const char *name; ++}; ++ ++struct rchan; ++ ++struct rchan_buf { ++ void *start; ++ void *data; ++ size_t offset; ++ size_t subbufs_produced; ++ size_t subbufs_consumed; ++ struct rchan *chan; ++ wait_queue_head_t read_wait; ++ struct irq_work wakeup_work; ++ struct dentry *dentry; ++ struct kref kref; ++ struct page **page_array; ++ unsigned int page_count; ++ unsigned int finalized; ++ size_t *padding; ++ size_t prev_padding; ++ size_t bytes_consumed; ++ size_t early_bytes; ++ unsigned int cpu; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct rchan_callbacks; ++ ++struct rchan { ++ u32 version; ++ size_t subbuf_size; ++ size_t n_subbufs; ++ size_t alloc_size; ++ struct rchan_callbacks *cb; ++ struct kref kref; ++ void *private_data; ++ size_t last_toobig; ++ struct rchan_buf **buf; ++ int is_global; ++ struct list_head list; ++ struct dentry *parent; ++ int has_base_filename; ++ char base_filename[255]; ++}; ++ ++struct rchan_callbacks { ++ int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); ++ void (*buf_mapped)(struct rchan_buf *, struct file *); ++ void (*buf_unmapped)(struct rchan_buf *, struct file *); ++ struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); ++ int (*remove_buf_file)(struct dentry *); ++}; ++ ++struct partial_page { ++ unsigned int offset; ++ unsigned int len; ++ long unsigned int private; ++}; ++ ++struct splice_pipe_desc { ++ struct page **pages; ++ struct partial_page *partial; ++ int nr_pages; ++ unsigned int nr_pages_max; ++ const struct pipe_buf_operations *ops; ++ void (*spd_release)(struct splice_pipe_desc *, unsigned int); ++}; ++ ++struct rchan_percpu_buf_dispatcher { ++ struct rchan_buf *buf; ++ struct dentry *dentry; ++}; ++ ++enum { ++ TASKSTATS_TYPE_UNSPEC = 0, ++ TASKSTATS_TYPE_PID = 1, ++ TASKSTATS_TYPE_TGID = 2, ++ TASKSTATS_TYPE_STATS = 3, ++ TASKSTATS_TYPE_AGGR_PID = 4, ++ TASKSTATS_TYPE_AGGR_TGID = 5, ++ TASKSTATS_TYPE_NULL = 6, ++ __TASKSTATS_TYPE_MAX = 7, ++}; ++ ++enum { ++ TASKSTATS_CMD_ATTR_UNSPEC = 0, ++ TASKSTATS_CMD_ATTR_PID = 1, ++ TASKSTATS_CMD_ATTR_TGID = 2, ++ TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, ++ TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, ++ __TASKSTATS_CMD_ATTR_MAX = 5, ++}; ++ ++enum { ++ CGROUPSTATS_CMD_UNSPEC = 3, ++ CGROUPSTATS_CMD_GET = 4, ++ CGROUPSTATS_CMD_NEW = 5, ++ __CGROUPSTATS_CMD_MAX = 6, ++}; ++ ++enum { ++ CGROUPSTATS_TYPE_UNSPEC = 0, ++ CGROUPSTATS_TYPE_CGROUP_STATS = 1, ++ __CGROUPSTATS_TYPE_MAX = 2, ++}; ++ ++enum { ++ CGROUPSTATS_CMD_ATTR_UNSPEC = 0, ++ CGROUPSTATS_CMD_ATTR_FD = 1, ++ __CGROUPSTATS_CMD_ATTR_MAX = 2, ++}; ++ ++struct listener { ++ struct list_head list; ++ pid_t pid; ++ char valid; ++}; ++ ++struct listener_list { ++ struct rw_semaphore sem; ++ struct list_head list; ++}; ++ ++enum actions { ++ REGISTER = 0, ++ DEREGISTER = 1, ++ CPU_DONT_CARE = 2, ++}; ++ ++struct tp_module { ++ struct list_head list; ++ struct module *mod; ++}; ++ ++struct tp_probes { ++ struct callback_head rcu; ++ struct tracepoint_func probes[0]; ++}; ++ ++enum { ++ FTRACE_OPS_FL_ENABLED = 1, ++ FTRACE_OPS_FL_DYNAMIC = 2, ++ FTRACE_OPS_FL_SAVE_REGS = 4, ++ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8, ++ FTRACE_OPS_FL_RECURSION_SAFE = 16, ++ FTRACE_OPS_FL_STUB = 32, ++ FTRACE_OPS_FL_INITIALIZED = 64, ++ FTRACE_OPS_FL_DELETED = 128, ++ FTRACE_OPS_FL_ADDING = 256, ++ FTRACE_OPS_FL_REMOVING = 512, ++ FTRACE_OPS_FL_MODIFYING = 1024, ++ FTRACE_OPS_FL_ALLOC_TRAMP = 2048, ++ FTRACE_OPS_FL_IPMODIFY = 4096, ++ FTRACE_OPS_FL_PID = 8192, ++ FTRACE_OPS_FL_RCU = 16384, ++ FTRACE_OPS_FL_TRACE_ARRAY = 32768, ++}; ++ ++struct ftrace_hash { ++ long unsigned int size_bits; ++ struct hlist_head *buckets; ++ long unsigned int count; ++ long unsigned int flags; ++ struct callback_head rcu; ++}; ++ ++enum { ++ FTRACE_FL_ENABLED = 2147483648, ++ FTRACE_FL_REGS = 1073741824, ++ FTRACE_FL_REGS_EN = 536870912, ++ FTRACE_FL_TRAMP = 268435456, ++ FTRACE_FL_TRAMP_EN = 134217728, ++ FTRACE_FL_IPMODIFY = 67108864, ++ FTRACE_FL_DISABLED = 33554432, ++}; ++ ++enum { ++ FTRACE_UPDATE_CALLS = 1, ++ FTRACE_DISABLE_CALLS = 2, ++ FTRACE_UPDATE_TRACE_FUNC = 4, ++ FTRACE_START_FUNC_RET = 8, ++ FTRACE_STOP_FUNC_RET = 16, ++}; ++ ++enum { ++ FTRACE_UPDATE_IGNORE = 0, ++ FTRACE_UPDATE_MAKE_CALL = 1, ++ FTRACE_UPDATE_MODIFY_CALL = 2, ++ FTRACE_UPDATE_MAKE_NOP = 3, ++}; ++ ++enum { ++ FTRACE_ITER_FILTER = 1, ++ FTRACE_ITER_NOTRACE = 2, ++ FTRACE_ITER_PRINTALL = 4, ++ FTRACE_ITER_DO_PROBES = 8, ++ FTRACE_ITER_PROBE = 16, ++ FTRACE_ITER_MOD = 32, ++ FTRACE_ITER_ENABLED = 64, ++}; ++ ++struct prog_entry; ++ ++struct event_filter { ++ struct prog_entry *prog; ++ char *filter_string; ++}; ++ ++struct trace_array_cpu; ++ ++struct trace_buffer { ++ struct trace_array *tr; ++ struct ring_buffer *buffer; ++ struct trace_array_cpu *data; ++ u64 time_start; ++ int cpu; ++}; ++ ++struct trace_pid_list; ++ ++struct trace_options; ++ ++struct trace_array { ++ struct list_head list; ++ char *name; ++ struct trace_buffer trace_buffer; ++ struct trace_buffer max_buffer; ++ bool allocated_snapshot; ++ long unsigned int max_latency; ++ struct trace_pid_list *filtered_pids; ++ arch_spinlock_t max_lock; ++ int buffer_disabled; ++ int sys_refcount_enter; ++ int sys_refcount_exit; ++ struct trace_event_file *enter_syscall_files[294]; ++ struct trace_event_file *exit_syscall_files[294]; ++ int stop_count; ++ int clock_id; ++ int nr_topts; ++ bool clear_trace; ++ struct tracer *current_trace; ++ unsigned int trace_flags; ++ unsigned char trace_flags_index[32]; ++ unsigned int flags; ++ raw_spinlock_t start_lock; ++ struct dentry *dir; ++ struct dentry *options; ++ struct dentry *percpu_dir; ++ struct dentry *event_dir; ++ struct trace_options *topts; ++ struct list_head systems; ++ struct list_head events; ++ struct trace_event_file *trace_marker_file; ++ cpumask_var_t tracing_cpumask; ++ int ref; ++ struct ftrace_ops *ops; ++ struct trace_pid_list *function_pids; ++ struct list_head func_probes; ++ struct list_head mod_trace; ++ struct list_head mod_notrace; ++ int function_enabled; ++ int time_stamp_abs_ref; ++ struct list_head hist_vars; ++}; ++ ++struct tracer_flags; ++ ++struct tracer { ++ const char *name; ++ int (*init)(struct trace_array *); ++ void (*reset)(struct trace_array *); ++ void (*start)(struct trace_array *); ++ void (*stop)(struct trace_array *); ++ int (*update_thresh)(struct trace_array *); ++ void (*open)(struct trace_iterator *); ++ void (*pipe_open)(struct trace_iterator *); ++ void (*close)(struct trace_iterator *); ++ void (*pipe_close)(struct trace_iterator *); ++ ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); ++ ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); ++ void (*print_header)(struct seq_file *); ++ enum print_line_t (*print_line)(struct trace_iterator *); ++ int (*set_flag)(struct trace_array *, u32, u32, int); ++ int (*flag_changed)(struct trace_array *, u32, int); ++ struct tracer *next; ++ struct tracer_flags *flags; ++ int enabled; ++ int ref; ++ bool print_max; ++ bool allow_instances; ++ bool use_max_tr; ++ bool noboot; ++}; ++ ++struct event_subsystem; ++ ++struct trace_subsystem_dir { ++ struct list_head list; ++ struct event_subsystem *subsystem; ++ struct trace_array *tr; ++ struct dentry *entry; ++ int ref_count; ++ int nr_events; ++}; ++ ++struct trace_array_cpu { ++ atomic_t disabled; ++ void *buffer_page; ++ long unsigned int entries; ++ long unsigned int saved_latency; ++ long unsigned int critical_start; ++ long unsigned int critical_end; ++ long unsigned int critical_sequence; ++ long unsigned int nice; ++ long unsigned int policy; ++ long unsigned int rt_priority; ++ long unsigned int skipped_entries; ++ u64 preempt_timestamp; ++ pid_t pid; ++ kuid_t uid; ++ char comm[16]; ++ bool ignore_pid; ++ bool ftrace_ignore_pid; ++}; ++ ++struct trace_option_dentry; ++ ++struct trace_options { ++ struct tracer *tracer; ++ struct trace_option_dentry *topts; ++}; ++ ++struct tracer_opt; ++ ++struct trace_option_dentry { ++ struct tracer_opt *opt; ++ struct tracer_flags *flags; ++ struct trace_array *tr; ++ struct dentry *entry; ++}; ++ ++struct trace_pid_list { ++ int pid_max; ++ long unsigned int *pids; ++}; ++ ++enum { ++ TRACE_ARRAY_FL_GLOBAL = 1, ++}; ++ ++struct tracer_opt { ++ const char *name; ++ u32 bit; ++}; ++ ++struct tracer_flags { ++ u32 val; ++ struct tracer_opt *opts; ++ struct tracer *trace; ++}; ++ ++enum { ++ TRACE_BUFFER_BIT = 0, ++ TRACE_BUFFER_NMI_BIT = 1, ++ TRACE_BUFFER_IRQ_BIT = 2, ++ TRACE_BUFFER_SIRQ_BIT = 3, ++ TRACE_FTRACE_BIT = 4, ++ TRACE_FTRACE_NMI_BIT = 5, ++ TRACE_FTRACE_IRQ_BIT = 6, ++ TRACE_FTRACE_SIRQ_BIT = 7, ++ TRACE_INTERNAL_BIT = 8, ++ TRACE_INTERNAL_NMI_BIT = 9, ++ TRACE_INTERNAL_IRQ_BIT = 10, ++ TRACE_INTERNAL_SIRQ_BIT = 11, ++ TRACE_BRANCH_BIT = 12, ++ TRACE_IRQ_BIT = 13, ++ TRACE_GRAPH_BIT = 14, ++ TRACE_GRAPH_DEPTH_START_BIT = 15, ++ TRACE_GRAPH_DEPTH_END_BIT = 16, ++}; ++ ++struct ftrace_mod_load { ++ struct list_head list; ++ char *func; ++ char *module; ++ int enable; ++}; ++ ++enum { ++ FTRACE_HASH_FL_MOD = 1, ++}; ++ ++struct ftrace_func_command { ++ struct list_head list; ++ char *name; ++ int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int); ++}; ++ ++struct ftrace_probe_ops { ++ void (*func)(long unsigned int, long unsigned int, struct trace_array *, struct ftrace_probe_ops *, void *); ++ int (*init)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *, void **); ++ void (*free)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *); ++ int (*print)(struct seq_file *, long unsigned int, struct ftrace_probe_ops *, void *); ++}; ++ ++typedef int (*ftrace_mapper_func)(void *); ++ ++struct trace_parser { ++ bool cont; ++ char *buffer; ++ unsigned int idx; ++ unsigned int size; ++}; ++ ++enum trace_iterator_bits { ++ TRACE_ITER_PRINT_PARENT_BIT = 0, ++ TRACE_ITER_SYM_OFFSET_BIT = 1, ++ TRACE_ITER_SYM_ADDR_BIT = 2, ++ TRACE_ITER_VERBOSE_BIT = 3, ++ TRACE_ITER_RAW_BIT = 4, ++ TRACE_ITER_HEX_BIT = 5, ++ TRACE_ITER_BIN_BIT = 6, ++ TRACE_ITER_BLOCK_BIT = 7, ++ TRACE_ITER_PRINTK_BIT = 8, ++ TRACE_ITER_ANNOTATE_BIT = 9, ++ TRACE_ITER_USERSTACKTRACE_BIT = 10, ++ TRACE_ITER_SYM_USEROBJ_BIT = 11, ++ TRACE_ITER_PRINTK_MSGONLY_BIT = 12, ++ TRACE_ITER_CONTEXT_INFO_BIT = 13, ++ TRACE_ITER_LATENCY_FMT_BIT = 14, ++ TRACE_ITER_RECORD_CMD_BIT = 15, ++ TRACE_ITER_RECORD_TGID_BIT = 16, ++ TRACE_ITER_OVERWRITE_BIT = 17, ++ TRACE_ITER_STOP_ON_FREE_BIT = 18, ++ TRACE_ITER_IRQ_INFO_BIT = 19, ++ TRACE_ITER_MARKERS_BIT = 20, ++ TRACE_ITER_EVENT_FORK_BIT = 21, ++ TRACE_ITER_FUNCTION_BIT = 22, ++ TRACE_ITER_FUNC_FORK_BIT = 23, ++ TRACE_ITER_DISPLAY_GRAPH_BIT = 24, ++ TRACE_ITER_STACKTRACE_BIT = 25, ++ TRACE_ITER_LAST_BIT = 26, ++}; ++ ++struct event_subsystem { ++ struct list_head list; ++ const char *name; ++ struct event_filter *filter; ++ int ref_count; ++}; ++ ++enum regex_type { ++ MATCH_FULL = 0, ++ MATCH_FRONT_ONLY = 1, ++ MATCH_MIDDLE_ONLY = 2, ++ MATCH_END_ONLY = 3, ++ MATCH_GLOB = 4, ++}; ++ ++struct ftrace_func_entry { ++ struct hlist_node hlist; ++ long unsigned int ip; ++}; ++ ++struct ftrace_func_probe { ++ struct ftrace_probe_ops *probe_ops; ++ struct ftrace_ops ops; ++ struct trace_array *tr; ++ struct list_head list; ++ void *data; ++ int ref; ++}; ++ ++struct ftrace_page { ++ struct ftrace_page *next; ++ struct dyn_ftrace *records; ++ int index; ++ int size; ++}; ++ ++struct ftrace_rec_iter { ++ struct ftrace_page *pg; ++ int index; ++}; ++ ++struct ftrace_iterator { ++ loff_t pos; ++ loff_t func_pos; ++ loff_t mod_pos; ++ struct ftrace_page *pg; ++ struct dyn_ftrace *func; ++ struct ftrace_func_probe *probe; ++ struct ftrace_func_entry *probe_entry; ++ struct trace_parser parser; ++ struct ftrace_hash *hash; ++ struct ftrace_ops *ops; ++ struct trace_array *tr; ++ struct list_head *mod_list; ++ int pidx; ++ int idx; ++ unsigned int flags; ++}; ++ ++struct ftrace_glob { ++ char *search; ++ unsigned int len; ++ int type; ++}; ++ ++struct ftrace_func_map { ++ struct ftrace_func_entry entry; ++ void *data; ++}; ++ ++struct ftrace_func_mapper { ++ struct ftrace_hash hash; ++}; ++ ++enum graph_filter_type { ++ GRAPH_FILTER_NOTRACE = 0, ++ GRAPH_FILTER_FUNCTION = 1, ++}; ++ ++struct ftrace_graph_data { ++ struct ftrace_hash *hash; ++ struct ftrace_func_entry *entry; ++ int idx; ++ enum graph_filter_type type; ++ struct ftrace_hash *new_hash; ++ const struct seq_operations *seq_ops; ++ struct trace_parser parser; ++}; ++ ++struct ftrace_mod_func { ++ struct list_head list; ++ char *name; ++ long unsigned int ip; ++ unsigned int size; ++}; ++ ++struct ftrace_mod_map { ++ struct callback_head rcu; ++ struct list_head list; ++ struct module *mod; ++ long unsigned int start_addr; ++ long unsigned int end_addr; ++ struct list_head funcs; ++ unsigned int num_funcs; ++}; ++ ++struct ftrace_init_func { ++ struct list_head list; ++ long unsigned int ip; ++}; ++ ++enum ring_buffer_type { ++ RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, ++ RINGBUF_TYPE_PADDING = 29, ++ RINGBUF_TYPE_TIME_EXTEND = 30, ++ RINGBUF_TYPE_TIME_STAMP = 31, ++}; ++ ++enum ring_buffer_flags { ++ RB_FL_OVERWRITE = 1, ++}; ++ ++struct rb_irq_work { ++ struct irq_work work; ++ wait_queue_head_t waiters; ++ wait_queue_head_t full_waiters; ++ bool waiters_pending; ++ bool full_waiters_pending; ++ bool wakeup_full; ++}; ++ ++struct ring_buffer_per_cpu; ++ ++struct ring_buffer { ++ unsigned int flags; ++ int cpus; ++ atomic_t record_disabled; ++ atomic_t resize_disabled; ++ cpumask_var_t cpumask; ++ struct lock_class_key *reader_lock_key; ++ struct mutex mutex; ++ struct ring_buffer_per_cpu **buffers; ++ struct hlist_node node; ++ u64 (*clock)(); ++ struct rb_irq_work irq_work; ++ bool time_stamp_abs; ++}; ++ ++struct buffer_page; ++ ++struct ring_buffer_iter { ++ struct ring_buffer_per_cpu *cpu_buffer; ++ long unsigned int head; ++ struct buffer_page *head_page; ++ struct buffer_page *cache_reader_page; ++ long unsigned int cache_read; ++ u64 read_stamp; ++}; ++ ++enum { ++ RB_LEN_TIME_EXTEND = 8, ++ RB_LEN_TIME_STAMP = 8, ++}; ++ ++struct buffer_data_page { ++ u64 time_stamp; ++ local_t commit; ++ unsigned char data[0]; ++}; ++ ++struct buffer_page { ++ struct list_head list; ++ local_t write; ++ unsigned int read; ++ local_t entries; ++ long unsigned int real_end; ++ struct buffer_data_page *page; ++}; ++ ++struct rb_event_info { ++ u64 ts; ++ u64 delta; ++ long unsigned int length; ++ struct buffer_page *tail_page; ++ int add_timestamp; ++}; ++ ++enum { ++ RB_CTX_NMI = 0, ++ RB_CTX_IRQ = 1, ++ RB_CTX_SOFTIRQ = 2, ++ RB_CTX_NORMAL = 3, ++ RB_CTX_MAX = 4, ++}; ++ ++struct ring_buffer_per_cpu { ++ int cpu; ++ atomic_t record_disabled; ++ struct ring_buffer *buffer; ++ raw_spinlock_t reader_lock; ++ arch_spinlock_t lock; ++ struct lock_class_key lock_key; ++ struct buffer_data_page *free_page; ++ long unsigned int nr_pages; ++ unsigned int current_context; ++ struct list_head *pages; ++ struct buffer_page *head_page; ++ struct buffer_page *tail_page; ++ struct buffer_page *commit_page; ++ struct buffer_page *reader_page; ++ long unsigned int lost_events; ++ long unsigned int last_overrun; ++ long unsigned int nest; ++ local_t entries_bytes; ++ local_t entries; ++ local_t overrun; ++ local_t commit_overrun; ++ local_t dropped_events; ++ local_t committing; ++ local_t commits; ++ long unsigned int read; ++ long unsigned int read_bytes; ++ u64 write_stamp; ++ u64 read_stamp; ++ long int nr_pages_to_update; ++ struct list_head new_pages; ++ struct work_struct update_pages_work; ++ struct completion update_done; ++ struct rb_irq_work irq_work; ++}; ++ ++struct trace_export { ++ struct trace_export *next; ++ void (*write)(struct trace_export *, const void *, unsigned int); ++}; ++ ++enum trace_iter_flags { ++ TRACE_FILE_LAT_FMT = 1, ++ TRACE_FILE_ANNOTATE = 2, ++ TRACE_FILE_TIME_IN_NS = 4, ++}; ++ ++enum event_trigger_type { ++ ETT_NONE = 0, ++ ETT_TRACE_ONOFF = 1, ++ ETT_SNAPSHOT = 2, ++ ETT_STACKTRACE = 4, ++ ETT_EVENT_ENABLE = 8, ++ ETT_EVENT_HIST = 16, ++ ETT_HIST_ENABLE = 32, ++}; ++ ++enum trace_type { ++ __TRACE_FIRST_TYPE = 0, ++ TRACE_FN = 1, ++ TRACE_CTX = 2, ++ TRACE_WAKE = 3, ++ TRACE_STACK = 4, ++ TRACE_PRINT = 5, ++ TRACE_BPRINT = 6, ++ TRACE_MMIO_RW = 7, ++ TRACE_MMIO_MAP = 8, ++ TRACE_BRANCH = 9, ++ TRACE_GRAPH_RET = 10, ++ TRACE_GRAPH_ENT = 11, ++ TRACE_USER_STACK = 12, ++ TRACE_BLK = 13, ++ TRACE_BPUTS = 14, ++ TRACE_HWLAT = 15, ++ TRACE_RAW_DATA = 16, ++ __TRACE_LAST_TYPE = 17, ++}; ++ ++struct ftrace_entry { ++ struct trace_entry ent; ++ long unsigned int ip; ++ long unsigned int parent_ip; ++}; ++ ++struct stack_entry { ++ struct trace_entry ent; ++ int size; ++ long unsigned int caller[8]; ++}; ++ ++struct userstack_entry { ++ struct trace_entry ent; ++ unsigned int tgid; ++ long unsigned int caller[8]; ++}; ++ ++struct bprint_entry { ++ struct trace_entry ent; ++ long unsigned int ip; ++ const char *fmt; ++ u32 buf[0]; ++}; ++ ++struct print_entry { ++ struct trace_entry ent; ++ long unsigned int ip; ++ char buf[0]; ++}; ++ ++struct raw_data_entry { ++ struct trace_entry ent; ++ unsigned int id; ++ char buf[0]; ++}; ++ ++struct bputs_entry { ++ struct trace_entry ent; ++ long unsigned int ip; ++ const char *str; ++}; ++ ++enum trace_flag_type { ++ TRACE_FLAG_IRQS_OFF = 1, ++ TRACE_FLAG_IRQS_NOSUPPORT = 2, ++ TRACE_FLAG_NEED_RESCHED = 4, ++ TRACE_FLAG_HARDIRQ = 8, ++ TRACE_FLAG_SOFTIRQ = 16, ++ TRACE_FLAG_PREEMPT_RESCHED = 32, ++ TRACE_FLAG_NMI = 64, ++}; ++ ++enum trace_iterator_flags { ++ TRACE_ITER_PRINT_PARENT = 1, ++ TRACE_ITER_SYM_OFFSET = 2, ++ TRACE_ITER_SYM_ADDR = 4, ++ TRACE_ITER_VERBOSE = 8, ++ TRACE_ITER_RAW = 16, ++ TRACE_ITER_HEX = 32, ++ TRACE_ITER_BIN = 64, ++ TRACE_ITER_BLOCK = 128, ++ TRACE_ITER_PRINTK = 256, ++ TRACE_ITER_ANNOTATE = 512, ++ TRACE_ITER_USERSTACKTRACE = 1024, ++ TRACE_ITER_SYM_USEROBJ = 2048, ++ TRACE_ITER_PRINTK_MSGONLY = 4096, ++ TRACE_ITER_CONTEXT_INFO = 8192, ++ TRACE_ITER_LATENCY_FMT = 16384, ++ TRACE_ITER_RECORD_CMD = 32768, ++ TRACE_ITER_RECORD_TGID = 65536, ++ TRACE_ITER_OVERWRITE = 131072, ++ TRACE_ITER_STOP_ON_FREE = 262144, ++ TRACE_ITER_IRQ_INFO = 524288, ++ TRACE_ITER_MARKERS = 1048576, ++ TRACE_ITER_EVENT_FORK = 2097152, ++ TRACE_ITER_FUNCTION = 4194304, ++ TRACE_ITER_FUNC_FORK = 8388608, ++ TRACE_ITER_DISPLAY_GRAPH = 16777216, ++ TRACE_ITER_STACKTRACE = 33554432, ++}; ++ ++struct saved_cmdlines_buffer { ++ unsigned int map_pid_to_cmdline[32769]; ++ unsigned int *map_cmdline_to_pid; ++ unsigned int cmdline_num; ++ int cmdline_idx; ++ char *saved_cmdlines; ++}; ++ ++struct ftrace_stack { ++ long unsigned int calls[8192]; ++}; ++ ++struct trace_buffer_struct { ++ int nesting; ++ char buffer[4096]; ++}; ++ ++struct ftrace_buffer_info { ++ struct trace_iterator iter; ++ void *spare; ++ unsigned int spare_cpu; ++ unsigned int read; ++}; ++ ++struct buffer_ref { ++ struct ring_buffer *buffer; ++ void *page; ++ int cpu; ++ refcount_t refcount; ++}; ++ ++struct ftrace_func_mapper___2; ++ ++struct ctx_switch_entry { ++ struct trace_entry ent; ++ unsigned int prev_pid; ++ unsigned int next_pid; ++ unsigned int next_cpu; ++ unsigned char prev_prio; ++ unsigned char prev_state; ++ unsigned char next_prio; ++ unsigned char next_state; ++}; ++ ++struct hwlat_entry { ++ struct trace_entry ent; ++ u64 duration; ++ u64 outer_duration; ++ u64 nmi_total_ts; ++ struct timespec64 timestamp; ++ unsigned int nmi_count; ++ unsigned int seqnum; ++}; ++ ++struct trace_mark { ++ long long unsigned int val; ++ char sym; ++}; ++ ++struct tracer_stat { ++ const char *name; ++ void * (*stat_start)(struct tracer_stat *); ++ void * (*stat_next)(void *, int); ++ int (*stat_cmp)(void *, void *); ++ int (*stat_show)(struct seq_file *, void *); ++ void (*stat_release)(void *); ++ int (*stat_headers)(struct seq_file *); ++}; ++ ++struct stat_node { ++ struct rb_node node; ++ void *stat; ++}; ++ ++struct stat_session { ++ struct list_head session_list; ++ struct tracer_stat *ts; ++ struct rb_root stat_root; ++ struct mutex stat_mutex; ++ struct dentry *file; ++}; ++ ++typedef int (*cmp_stat_t)(void *, void *); ++ ++struct trace_bprintk_fmt { ++ struct list_head list; ++ const char *fmt; ++}; ++ ++typedef int (*tracing_map_cmp_fn_t)(void *, void *); ++ ++struct tracing_map_field { ++ tracing_map_cmp_fn_t cmp_fn; ++ union { ++ atomic64_t sum; ++ unsigned int offset; ++ }; ++}; ++ ++struct tracing_map; ++ ++struct tracing_map_elt { ++ struct tracing_map *map; ++ struct tracing_map_field *fields; ++ atomic64_t *vars; ++ bool *var_set; ++ void *key; ++ void *private_data; ++}; ++ ++struct tracing_map_sort_key { ++ unsigned int field_idx; ++ bool descending; ++}; ++ ++struct tracing_map_array; ++ ++struct tracing_map_ops; ++ ++struct tracing_map { ++ unsigned int key_size; ++ unsigned int map_bits; ++ unsigned int map_size; ++ unsigned int max_elts; ++ atomic_t next_elt; ++ struct tracing_map_array *elts; ++ struct tracing_map_array *map; ++ const struct tracing_map_ops *ops; ++ void *private_data; ++ struct tracing_map_field fields[6]; ++ unsigned int n_fields; ++ int key_idx[3]; ++ unsigned int n_keys; ++ struct tracing_map_sort_key sort_key; ++ unsigned int n_vars; ++ atomic64_t hits; ++ atomic64_t drops; ++}; ++ ++struct tracing_map_entry { ++ u32 key; ++ struct tracing_map_elt *val; ++}; ++ ++struct tracing_map_sort_entry { ++ void *key; ++ struct tracing_map_elt *elt; ++ bool elt_copied; ++ bool dup; ++}; ++ ++struct tracing_map_array { ++ unsigned int entries_per_page; ++ unsigned int entry_size_shift; ++ unsigned int entry_shift; ++ unsigned int entry_mask; ++ unsigned int n_pages; ++ void **pages; ++}; ++ ++struct tracing_map_ops { ++ int (*elt_alloc)(struct tracing_map_elt *); ++ void (*elt_free)(struct tracing_map_elt *); ++ void (*elt_clear)(struct tracing_map_elt *); ++ void (*elt_init)(struct tracing_map_elt *); ++}; ++ ++enum { ++ TRACE_FUNC_OPT_STACK = 1, ++}; ++ ++struct hwlat_sample { ++ u64 seqnum; ++ u64 duration; ++ u64 outer_duration; ++ u64 nmi_total_ts; ++ struct timespec64 timestamp; ++ int nmi_count; ++}; ++ ++struct hwlat_data { ++ struct mutex lock; ++ u64 count; ++ u64 sample_window; ++ u64 sample_width; ++}; ++ ++enum { ++ TRACE_NOP_OPT_ACCEPT = 1, ++ TRACE_NOP_OPT_REFUSE = 2, ++}; ++ ++struct ftrace_graph_ent_entry { ++ struct trace_entry ent; ++ struct ftrace_graph_ent graph_ent; ++} __attribute__((packed)); ++ ++struct ftrace_graph_ret_entry { ++ struct trace_entry ent; ++ struct ftrace_graph_ret ret; ++} __attribute__((packed)); ++ ++struct fgraph_cpu_data { ++ pid_t last_pid; ++ int depth; ++ int depth_irq; ++ int ignore; ++ long unsigned int enter_funcs[50]; ++}; ++ ++struct fgraph_data { ++ struct fgraph_cpu_data *cpu_data; ++ struct ftrace_graph_ent_entry ent; ++ struct ftrace_graph_ret_entry ret; ++ int failed; ++ int cpu; ++} __attribute__((packed)); ++ ++enum { ++ FLAGS_FILL_FULL = 268435456, ++ FLAGS_FILL_START = 536870912, ++ FLAGS_FILL_END = 805306368, ++}; ++ ++typedef __u32 blk_mq_req_flags_t; ++ ++struct blk_mq_ctxs; ++ ++struct blk_mq_ctx { ++ struct { ++ spinlock_t lock; ++ struct list_head rq_list; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ }; ++ unsigned int cpu; ++ unsigned int index_hw; ++ long unsigned int rq_dispatched[2]; ++ long unsigned int rq_merged; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long unsigned int rq_completed[2]; ++ struct request_queue *queue; ++ struct blk_mq_ctxs *ctxs; ++ struct kobject kobj; ++}; ++ ++struct sbitmap_word; ++ ++struct sbitmap { ++ unsigned int depth; ++ unsigned int shift; ++ unsigned int map_nr; ++ struct sbitmap_word *map; ++}; ++ ++struct blk_mq_tags; ++ ++struct blk_mq_hw_ctx { ++ struct { ++ spinlock_t lock; ++ struct list_head dispatch; ++ long unsigned int state; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ }; ++ struct delayed_work run_work; ++ cpumask_var_t cpumask; ++ int next_cpu; ++ int next_cpu_batch; ++ long unsigned int flags; ++ void *sched_data; ++ struct request_queue *queue; ++ struct blk_flush_queue *fq; ++ void *driver_data; ++ struct sbitmap ctx_map; ++ struct blk_mq_ctx *dispatch_from; ++ unsigned int dispatch_busy; ++ unsigned int nr_ctx; ++ struct blk_mq_ctx **ctxs; ++ spinlock_t dispatch_wait_lock; ++ wait_queue_entry_t dispatch_wait; ++ atomic_t wait_index; ++ struct blk_mq_tags *tags; ++ struct blk_mq_tags *sched_tags; ++ long unsigned int queued; ++ long unsigned int run; ++ long unsigned int dispatched[7]; ++ unsigned int numa_node; ++ unsigned int queue_num; ++ atomic_t nr_active; ++ unsigned int nr_expired; ++ struct hlist_node cpuhp_dead; ++ struct kobject kobj; ++ long unsigned int poll_considered; ++ long unsigned int poll_invoked; ++ long unsigned int poll_success; ++ struct dentry *debugfs_dir; ++ struct dentry *sched_debugfs_dir; ++ struct list_head hctx_list; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ struct srcu_struct srcu[0]; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct blk_mq_alloc_data { ++ struct request_queue *q; ++ blk_mq_req_flags_t flags; ++ unsigned int shallow_depth; ++ struct blk_mq_ctx *ctx; ++ struct blk_mq_hw_ctx *hctx; ++}; ++ ++struct blk_stat_callback { ++ struct list_head list; ++ struct timer_list timer; ++ struct blk_rq_stat *cpu_stat; ++ int (*bucket_fn)(const struct request *); ++ unsigned int buckets; ++ struct blk_rq_stat *stat; ++ void (*timer_fn)(struct blk_stat_callback *); ++ void *data; ++ struct callback_head rcu; ++}; ++ ++struct blk_trace { ++ int trace_state; ++ struct rchan *rchan; ++ long unsigned int *sequence; ++ unsigned char *msg_data; ++ u16 act_mask; ++ u64 start_lba; ++ u64 end_lba; ++ u32 pid; ++ u32 dev; ++ struct dentry *dir; ++ struct dentry *dropped_file; ++ struct dentry *msg_file; ++ struct list_head running_list; ++ atomic_t dropped; ++}; ++ ++struct blk_flush_queue { ++ unsigned int flush_queue_delayed: 1; ++ unsigned int flush_pending_idx: 1; ++ unsigned int flush_running_idx: 1; ++ blk_status_t rq_status; ++ long unsigned int flush_pending_since; ++ struct list_head flush_queue[2]; ++ struct list_head flush_data_in_flight; ++ struct request *flush_rq; ++ struct request *orig_rq; ++ spinlock_t mq_flush_lock; ++}; ++ ++struct blk_mq_tag_set { ++ unsigned int *mq_map; ++ const struct blk_mq_ops *ops; ++ unsigned int nr_hw_queues; ++ unsigned int queue_depth; ++ unsigned int reserved_tags; ++ unsigned int cmd_size; ++ int numa_node; ++ unsigned int timeout; ++ unsigned int flags; ++ void *driver_data; ++ struct blk_mq_tags **tags; ++ struct mutex tag_list_lock; ++ struct list_head tag_list; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++enum blktrace_cat { ++ BLK_TC_READ = 1, ++ BLK_TC_WRITE = 2, ++ BLK_TC_FLUSH = 4, ++ BLK_TC_SYNC = 8, ++ BLK_TC_SYNCIO = 8, ++ BLK_TC_QUEUE = 16, ++ BLK_TC_REQUEUE = 32, ++ BLK_TC_ISSUE = 64, ++ BLK_TC_COMPLETE = 128, ++ BLK_TC_FS = 256, ++ BLK_TC_PC = 512, ++ BLK_TC_NOTIFY = 1024, ++ BLK_TC_AHEAD = 2048, ++ BLK_TC_META = 4096, ++ BLK_TC_DISCARD = 8192, ++ BLK_TC_DRV_DATA = 16384, ++ BLK_TC_FUA = 32768, ++ BLK_TC_END = 32768, ++}; ++ ++enum blktrace_act { ++ __BLK_TA_QUEUE = 1, ++ __BLK_TA_BACKMERGE = 2, ++ __BLK_TA_FRONTMERGE = 3, ++ __BLK_TA_GETRQ = 4, ++ __BLK_TA_SLEEPRQ = 5, ++ __BLK_TA_REQUEUE = 6, ++ __BLK_TA_ISSUE = 7, ++ __BLK_TA_COMPLETE = 8, ++ __BLK_TA_PLUG = 9, ++ __BLK_TA_UNPLUG_IO = 10, ++ __BLK_TA_UNPLUG_TIMER = 11, ++ __BLK_TA_INSERT = 12, ++ __BLK_TA_SPLIT = 13, ++ __BLK_TA_BOUNCE = 14, ++ __BLK_TA_REMAP = 15, ++ __BLK_TA_ABORT = 16, ++ __BLK_TA_DRV_DATA = 17, ++ __BLK_TA_CGROUP = 256, ++}; ++ ++enum blktrace_notify { ++ __BLK_TN_PROCESS = 0, ++ __BLK_TN_TIMESTAMP = 1, ++ __BLK_TN_MESSAGE = 2, ++ __BLK_TN_CGROUP = 256, ++}; ++ ++struct blk_io_trace { ++ __u32 magic; ++ __u32 sequence; ++ __u64 time; ++ __u64 sector; ++ __u32 bytes; ++ __u32 action; ++ __u32 pid; ++ __u32 device; ++ __u32 cpu; ++ __u16 error; ++ __u16 pdu_len; ++}; ++ ++struct blk_io_trace_remap { ++ __be32 device_from; ++ __be32 device_to; ++ __be64 sector_from; ++}; ++ ++enum { ++ Blktrace_setup = 1, ++ Blktrace_running = 2, ++ Blktrace_stopped = 3, ++}; ++ ++struct blk_user_trace_setup { ++ char name[32]; ++ __u16 act_mask; ++ __u32 buf_size; ++ __u32 buf_nr; ++ __u64 start_lba; ++ __u64 end_lba; ++ __u32 pid; ++}; ++ ++struct sbitmap_word { ++ long unsigned int word; ++ long unsigned int depth; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct sbq_wait_state { ++ atomic_t wait_cnt; ++ wait_queue_head_t wait; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct sbitmap_queue { ++ struct sbitmap sb; ++ unsigned int *alloc_hint; ++ unsigned int wake_batch; ++ atomic_t wake_index; ++ struct sbq_wait_state *ws; ++ bool round_robin; ++ unsigned int min_shallow_depth; ++}; ++ ++struct blk_mq_tags { ++ unsigned int nr_tags; ++ unsigned int nr_reserved_tags; ++ atomic_t active_queues; ++ struct sbitmap_queue bitmap_tags; ++ struct sbitmap_queue breserved_tags; ++ struct request **rqs; ++ struct request **static_rqs; ++ struct list_head page_list; ++}; ++ ++struct blk_mq_queue_data { ++ struct request *rq; ++ bool last; ++ long unsigned int kabi_reserved1; ++}; ++ ++struct blk_mq_ctxs { ++ struct kobject kobj; ++ struct blk_mq_ctx *queue_ctx; ++}; ++ ++typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); ++ ++struct ftrace_event_field { ++ struct list_head link; ++ const char *name; ++ const char *type; ++ int filter_type; ++ int offset; ++ int size; ++ int is_signed; ++}; ++ ++enum { ++ FORMAT_HEADER = 1, ++ FORMAT_FIELD_SEPERATOR = 2, ++ FORMAT_PRINTFMT = 3, ++}; ++ ++struct event_probe_data { ++ struct trace_event_file *file; ++ long unsigned int count; ++ int ref; ++ bool enable; ++}; ++ ++struct mmiotrace_rw { ++ resource_size_t phys; ++ long unsigned int value; ++ long unsigned int pc; ++ int map_id; ++ unsigned char opcode; ++ unsigned char width; ++}; ++ ++struct mmiotrace_map { ++ resource_size_t phys; ++ long unsigned int virt; ++ long unsigned int len; ++ int map_id; ++ unsigned char opcode; ++}; ++ ++struct trace_mmiotrace_rw { ++ struct trace_entry ent; ++ struct mmiotrace_rw rw; ++}; ++ ++struct trace_mmiotrace_map { ++ struct trace_entry ent; ++ struct mmiotrace_map map; ++}; ++ ++struct trace_branch { ++ struct trace_entry ent; ++ unsigned int line; ++ char func[31]; ++ char file[21]; ++ char correct; ++ char constant; ++}; ++ ++struct syscall_trace_enter { ++ struct trace_entry ent; ++ int nr; ++ long unsigned int args[0]; ++}; ++ ++struct syscall_trace_exit { ++ struct trace_entry ent; ++ int nr; ++ long int ret; ++}; ++ ++struct syscall_tp_t { ++ long long unsigned int regs; ++ long unsigned int syscall_nr; ++ long unsigned int ret; ++}; ++ ++struct syscall_tp_t___2 { ++ long long unsigned int regs; ++ long unsigned int syscall_nr; ++ long unsigned int args[6]; ++}; ++ ++enum perf_event_sample_format { ++ PERF_SAMPLE_IP = 1, ++ PERF_SAMPLE_TID = 2, ++ PERF_SAMPLE_TIME = 4, ++ PERF_SAMPLE_ADDR = 8, ++ PERF_SAMPLE_READ = 16, ++ PERF_SAMPLE_CALLCHAIN = 32, ++ PERF_SAMPLE_ID = 64, ++ PERF_SAMPLE_CPU = 128, ++ PERF_SAMPLE_PERIOD = 256, ++ PERF_SAMPLE_STREAM_ID = 512, ++ PERF_SAMPLE_RAW = 1024, ++ PERF_SAMPLE_BRANCH_STACK = 2048, ++ PERF_SAMPLE_REGS_USER = 4096, ++ PERF_SAMPLE_STACK_USER = 8192, ++ PERF_SAMPLE_WEIGHT = 16384, ++ PERF_SAMPLE_DATA_SRC = 32768, ++ PERF_SAMPLE_IDENTIFIER = 65536, ++ PERF_SAMPLE_TRANSACTION = 131072, ++ PERF_SAMPLE_REGS_INTR = 262144, ++ PERF_SAMPLE_PHYS_ADDR = 524288, ++ PERF_SAMPLE_MAX = 1048576, ++ __PERF_SAMPLE_CALLCHAIN_EARLY = 0, ++}; ++ ++enum { ++ FETCH_MTD_reg = 0, ++ FETCH_MTD_stack = 1, ++ FETCH_MTD_retval = 2, ++ FETCH_MTD_comm = 3, ++ FETCH_MTD_memory = 4, ++ FETCH_MTD_symbol = 5, ++ FETCH_MTD_deref = 6, ++ FETCH_MTD_bitfield = 7, ++ FETCH_MTD_file_offset = 8, ++ FETCH_MTD_END = 9, ++}; ++ ++typedef long unsigned int perf_trace_t[256]; ++ ++struct filter_pred; ++ ++struct prog_entry { ++ int target; ++ int when_to_branch; ++ struct filter_pred *pred; ++}; ++ ++typedef int (*filter_pred_fn_t)(struct filter_pred *, void *); ++ ++struct regex; ++ ++typedef int (*regex_match_func)(char *, struct regex *, int); ++ ++struct regex { ++ char pattern[256]; ++ int len; ++ int field_len; ++ regex_match_func match; ++}; ++ ++struct filter_pred { ++ filter_pred_fn_t fn; ++ u64 val; ++ struct regex regex; ++ short unsigned int *ops; ++ struct ftrace_event_field *field; ++ int offset; ++ int not; ++ int op; ++}; ++ ++enum filter_op_ids { ++ OP_GLOB = 0, ++ OP_NE = 1, ++ OP_EQ = 2, ++ OP_LE = 3, ++ OP_LT = 4, ++ OP_GE = 5, ++ OP_GT = 6, ++ OP_BAND = 7, ++ OP_MAX = 8, ++}; ++ ++enum { ++ FILT_ERR_NONE = 0, ++ FILT_ERR_INVALID_OP = 1, ++ FILT_ERR_TOO_MANY_OPEN = 2, ++ FILT_ERR_TOO_MANY_CLOSE = 3, ++ FILT_ERR_MISSING_QUOTE = 4, ++ FILT_ERR_OPERAND_TOO_LONG = 5, ++ FILT_ERR_EXPECT_STRING = 6, ++ FILT_ERR_EXPECT_DIGIT = 7, ++ FILT_ERR_ILLEGAL_FIELD_OP = 8, ++ FILT_ERR_FIELD_NOT_FOUND = 9, ++ FILT_ERR_ILLEGAL_INTVAL = 10, ++ FILT_ERR_BAD_SUBSYS_FILTER = 11, ++ FILT_ERR_TOO_MANY_PREDS = 12, ++ FILT_ERR_INVALID_FILTER = 13, ++ FILT_ERR_IP_FIELD_ONLY = 14, ++ FILT_ERR_INVALID_VALUE = 15, ++ FILT_ERR_NO_FILTER = 16, ++}; ++ ++struct filter_parse_error { ++ int lasterr; ++ int lasterr_pos; ++}; ++ ++typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); ++ ++enum { ++ INVERT = 1, ++ PROCESS_AND = 2, ++ PROCESS_OR = 4, ++}; ++ ++enum { ++ TOO_MANY_CLOSE = 4294967295, ++ TOO_MANY_OPEN = 4294967294, ++ MISSING_QUOTE = 4294967293, ++}; ++ ++struct filter_list { ++ struct list_head list; ++ struct event_filter *filter; ++}; ++ ++struct function_filter_data { ++ struct ftrace_ops *ops; ++ int first_filter; ++ int first_notrace; ++}; ++ ++struct event_trigger_ops; ++ ++struct event_command; ++ ++struct event_trigger_data { ++ long unsigned int count; ++ int ref; ++ struct event_trigger_ops *ops; ++ struct event_command *cmd_ops; ++ struct event_filter *filter; ++ char *filter_str; ++ void *private_data; ++ bool paused; ++ bool paused_tmp; ++ struct list_head list; ++ char *name; ++ struct list_head named_list; ++ struct event_trigger_data *named_data; ++}; ++ ++struct event_trigger_ops { ++ void (*func)(struct event_trigger_data *, void *, struct ring_buffer_event *); ++ int (*init)(struct event_trigger_ops *, struct event_trigger_data *); ++ void (*free)(struct event_trigger_ops *, struct event_trigger_data *); ++ int (*print)(struct seq_file *, struct event_trigger_ops *, struct event_trigger_data *); ++}; ++ ++struct event_command { ++ struct list_head list; ++ char *name; ++ enum event_trigger_type trigger_type; ++ int flags; ++ int (*func)(struct event_command *, struct trace_event_file *, char *, char *, char *); ++ int (*reg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *); ++ void (*unreg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *); ++ void (*unreg_all)(struct trace_event_file *); ++ int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); ++ struct event_trigger_ops * (*get_trigger_ops)(char *, char *); ++}; ++ ++struct enable_trigger_data { ++ struct trace_event_file *file; ++ bool enable; ++ bool hist; ++}; ++ ++enum event_command_flags { ++ EVENT_CMD_FL_POST_TRIGGER = 1, ++ EVENT_CMD_FL_NEEDS_REC = 2, ++}; ++ ++struct hist_field; ++ ++typedef u64 (*hist_field_fn_t)(struct hist_field *, struct tracing_map_elt *, struct ring_buffer_event *, void *); ++ ++struct hist_trigger_data; ++ ++struct hist_var { ++ char *name; ++ struct hist_trigger_data *hist_data; ++ unsigned int idx; ++}; ++ ++enum field_op_id { ++ FIELD_OP_NONE = 0, ++ FIELD_OP_PLUS = 1, ++ FIELD_OP_MINUS = 2, ++ FIELD_OP_UNARY_MINUS = 3, ++}; ++ ++struct hist_field { ++ struct ftrace_event_field *field; ++ long unsigned int flags; ++ hist_field_fn_t fn; ++ unsigned int size; ++ unsigned int offset; ++ unsigned int is_signed; ++ const char *type; ++ struct hist_field *operands[2]; ++ struct hist_trigger_data *hist_data; ++ struct hist_var var; ++ enum field_op_id operator; ++ char *system; ++ char *event_name; ++ char *name; ++ unsigned int var_idx; ++ unsigned int var_ref_idx; ++ bool read_once; ++}; ++ ++struct hist_trigger_attrs; ++ ++struct action_data; ++ ++struct field_var; ++ ++struct field_var_hist; ++ ++struct hist_trigger_data { ++ struct hist_field *fields[22]; ++ unsigned int n_vals; ++ unsigned int n_keys; ++ unsigned int n_fields; ++ unsigned int n_vars; ++ unsigned int key_size; ++ struct tracing_map_sort_key sort_keys[2]; ++ unsigned int n_sort_keys; ++ struct trace_event_file *event_file; ++ struct hist_trigger_attrs *attrs; ++ struct tracing_map *map; ++ bool enable_timestamps; ++ bool remove; ++ struct hist_field *var_refs[16]; ++ unsigned int n_var_refs; ++ struct action_data *actions[8]; ++ unsigned int n_actions; ++ struct hist_field *synth_var_refs[16]; ++ unsigned int n_synth_var_refs; ++ struct field_var *field_vars[16]; ++ unsigned int n_field_vars; ++ unsigned int n_field_var_str; ++ struct field_var_hist *field_var_hists[16]; ++ unsigned int n_field_var_hists; ++ struct field_var *max_vars[16]; ++ unsigned int n_max_vars; ++ unsigned int n_max_var_str; ++}; ++ ++enum hist_field_flags { ++ HIST_FIELD_FL_HITCOUNT = 1, ++ HIST_FIELD_FL_KEY = 2, ++ HIST_FIELD_FL_STRING = 4, ++ HIST_FIELD_FL_HEX = 8, ++ HIST_FIELD_FL_SYM = 16, ++ HIST_FIELD_FL_SYM_OFFSET = 32, ++ HIST_FIELD_FL_EXECNAME = 64, ++ HIST_FIELD_FL_SYSCALL = 128, ++ HIST_FIELD_FL_STACKTRACE = 256, ++ HIST_FIELD_FL_LOG2 = 512, ++ HIST_FIELD_FL_TIMESTAMP = 1024, ++ HIST_FIELD_FL_TIMESTAMP_USECS = 2048, ++ HIST_FIELD_FL_VAR = 4096, ++ HIST_FIELD_FL_EXPR = 8192, ++ HIST_FIELD_FL_VAR_REF = 16384, ++ HIST_FIELD_FL_CPU = 32768, ++ HIST_FIELD_FL_ALIAS = 65536, ++}; ++ ++struct var_defs { ++ unsigned int n_vars; ++ char *name[16]; ++ char *expr[16]; ++}; ++ ++struct hist_trigger_attrs { ++ char *keys_str; ++ char *vals_str; ++ char *sort_key_str; ++ char *name; ++ char *clock; ++ bool pause; ++ bool cont; ++ bool clear; ++ bool ts_in_usecs; ++ unsigned int map_bits; ++ char *assignment_str[16]; ++ unsigned int n_assignments; ++ char *action_str[8]; ++ unsigned int n_actions; ++ struct var_defs var_defs; ++}; ++ ++struct field_var { ++ struct hist_field *var; ++ struct hist_field *val; ++}; ++ ++struct field_var_hist { ++ struct hist_trigger_data *hist_data; ++ char *cmd; ++}; ++ ++typedef void (*action_fn_t)(struct hist_trigger_data *, struct tracing_map_elt *, void *, struct ring_buffer_event *, struct action_data *, u64 *); ++ ++struct synth_event; ++ ++struct action_data { ++ action_fn_t fn; ++ unsigned int n_params; ++ char *params[16]; ++ union { ++ struct { ++ unsigned int var_ref_idx; ++ char *match_event; ++ char *match_event_system; ++ char *synth_event_name; ++ struct synth_event *synth_event; ++ } onmatch; ++ struct { ++ char *var_str; ++ char *fn_name; ++ unsigned int max_var_ref_idx; ++ struct hist_field *max_var; ++ struct hist_field *var; ++ } onmax; ++ }; ++}; ++ ++struct synth_field { ++ char *type; ++ char *name; ++ size_t size; ++ bool is_signed; ++ bool is_string; ++}; ++ ++struct synth_event { ++ struct list_head list; ++ int ref; ++ char *name; ++ struct synth_field **fields; ++ unsigned int n_fields; ++ unsigned int n_u64; ++ struct trace_event_class class; ++ struct trace_event_call call; ++ struct tracepoint *tp; ++}; ++ ++struct synth_trace_event { ++ struct trace_entry ent; ++ u64 fields[0]; ++}; ++ ++typedef void (*synth_probe_func_t)(void *, u64 *, unsigned int); ++ ++struct hist_var_data { ++ struct list_head list; ++ struct hist_trigger_data *hist_data; ++}; ++ ++struct hist_elt_data { ++ char *comm; ++ u64 *var_ref_vals; ++ char *field_var_str[16]; ++}; ++ ++struct bpf_perf_event_value { ++ __u64 counter; ++ __u64 enabled; ++ __u64 running; ++}; ++ ++struct bpf_raw_tracepoint_args { ++ __u64 args[0]; ++}; ++ ++enum bpf_task_fd_type { ++ BPF_FD_TYPE_RAW_TRACEPOINT = 0, ++ BPF_FD_TYPE_TRACEPOINT = 1, ++ BPF_FD_TYPE_KPROBE = 2, ++ BPF_FD_TYPE_KRETPROBE = 3, ++ BPF_FD_TYPE_UPROBE = 4, ++ BPF_FD_TYPE_URETPROBE = 5, ++}; ++ ++struct bpf_array { ++ struct bpf_map map; ++ u32 elem_size; ++ u32 index_mask; ++ enum bpf_prog_type owner_prog_type; ++ bool owner_jited; ++ union { ++ char value[0]; ++ void *ptrs[0]; ++ void *pptrs[0]; ++ }; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct bpf_event_entry { ++ struct perf_event *event; ++ struct file *perf_file; ++ struct file *map_file; ++ struct callback_head rcu; ++}; ++ ++typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); ++ ++typedef struct user_pt_regs bpf_user_pt_regs_t; ++ ++struct bpf_perf_event_data { ++ bpf_user_pt_regs_t regs; ++ __u64 sample_period; ++ __u64 addr; ++}; ++ ++struct perf_event_query_bpf { ++ __u32 ids_len; ++ __u32 prog_cnt; ++ __u32 ids[0]; ++}; ++ ++struct bpf_perf_event_data_kern { ++ bpf_user_pt_regs_t *regs; ++ struct perf_sample_data *data; ++ struct perf_event *event; ++}; ++ ++struct bpf_trace_module { ++ struct module *module; ++ struct list_head list; ++}; ++ ++struct bpf_trace_sample_data { ++ struct perf_sample_data sds[3]; ++}; ++ ++struct bpf_raw_tp_regs { ++ struct pt_regs regs[3]; ++}; ++ ++struct kprobe_trace_entry_head { ++ struct trace_entry ent; ++ long unsigned int ip; ++}; ++ ++struct kretprobe_trace_entry_head { ++ struct trace_entry ent; ++ long unsigned int func; ++ long unsigned int ret_ip; ++}; ++ ++typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); ++ ++typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, void *); ++ ++struct fetch_type { ++ const char *name; ++ size_t size; ++ int is_signed; ++ print_type_func_t print; ++ const char *fmt; ++ const char *fmttype; ++ fetch_func_t fetch[9]; ++}; ++ ++struct fetch_param { ++ fetch_func_t fn; ++ void *data; ++}; ++ ++typedef u32 string; ++ ++typedef u32 string_size; ++ ++struct probe_arg { ++ struct fetch_param fetch; ++ struct fetch_param fetch_size; ++ unsigned int offset; ++ const char *name; ++ const char *comm; ++ const struct fetch_type *type; ++}; ++ ++struct trace_probe { ++ unsigned int flags; ++ struct trace_event_class class; ++ struct trace_event_call call; ++ struct list_head files; ++ ssize_t size; ++ unsigned int nr_args; ++ struct probe_arg args[0]; ++}; ++ ++struct event_file_link { ++ struct trace_event_file *file; ++ struct list_head list; ++}; ++ ++struct trace_kprobe { ++ struct list_head list; ++ struct kretprobe rp; ++ long unsigned int *nhit; ++ const char *symbol; ++ struct trace_probe tp; ++}; ++ ++struct symbol_cache { ++ char *symbol; ++ long int offset; ++ long unsigned int addr; ++}; ++ ++struct trace_event_raw_cpu { ++ struct trace_entry ent; ++ u32 state; ++ u32 cpu_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_powernv_throttle { ++ struct trace_entry ent; ++ int chip_id; ++ u32 __data_loc_reason; ++ int pmax; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_pstate_sample { ++ struct trace_entry ent; ++ u32 core_busy; ++ u32 scaled_busy; ++ u32 from; ++ u32 to; ++ u64 mperf; ++ u64 aperf; ++ u64 tsc; ++ u32 freq; ++ u32 io_boost; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_cpu_frequency_limits { ++ struct trace_entry ent; ++ u32 min_freq; ++ u32 max_freq; ++ u32 cpu_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_device_pm_callback_start { ++ struct trace_entry ent; ++ u32 __data_loc_device; ++ u32 __data_loc_driver; ++ u32 __data_loc_parent; ++ u32 __data_loc_pm_ops; ++ int event; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_device_pm_callback_end { ++ struct trace_entry ent; ++ u32 __data_loc_device; ++ u32 __data_loc_driver; ++ int error; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_suspend_resume { ++ struct trace_entry ent; ++ const char *action; ++ int val; ++ bool start; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_wakeup_source { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ u64 state; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_clock { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ u64 state; ++ u64 cpu_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_power_domain { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ u64 state; ++ u64 cpu_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_pm_qos_request { ++ struct trace_entry ent; ++ int pm_qos_class; ++ s32 value; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_pm_qos_update_request_timeout { ++ struct trace_entry ent; ++ int pm_qos_class; ++ s32 value; ++ long unsigned int timeout_us; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_pm_qos_update { ++ struct trace_entry ent; ++ enum pm_qos_req_action action; ++ int prev_value; ++ int curr_value; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dev_pm_qos_request { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ enum dev_pm_qos_req_type type; ++ s32 new_value; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_cpu {}; ++ ++struct trace_event_data_offsets_powernv_throttle { ++ u32 reason; ++}; ++ ++struct trace_event_data_offsets_pstate_sample {}; ++ ++struct trace_event_data_offsets_cpu_frequency_limits {}; ++ ++struct trace_event_data_offsets_device_pm_callback_start { ++ u32 device; ++ u32 driver; ++ u32 parent; ++ u32 pm_ops; ++}; ++ ++struct trace_event_data_offsets_device_pm_callback_end { ++ u32 device; ++ u32 driver; ++}; ++ ++struct trace_event_data_offsets_suspend_resume {}; ++ ++struct trace_event_data_offsets_wakeup_source { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_clock { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_power_domain { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_pm_qos_request {}; ++ ++struct trace_event_data_offsets_pm_qos_update_request_timeout {}; ++ ++struct trace_event_data_offsets_pm_qos_update {}; ++ ++struct trace_event_data_offsets_dev_pm_qos_request { ++ u32 name; ++}; ++ ++struct trace_event_raw_rpm_internal { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ int flags; ++ int usage_count; ++ int disable_depth; ++ int runtime_auto; ++ int request_pending; ++ int irq_safe; ++ int child_count; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_rpm_return_int { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ long unsigned int ip; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_rpm_internal { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_rpm_return_int { ++ u32 name; ++}; ++ ++struct deref_fetch_param { ++ struct fetch_param orig; ++ long int offset; ++ fetch_func_t fetch; ++ fetch_func_t fetch_size; ++}; ++ ++struct bitfield_fetch_param { ++ struct fetch_param orig; ++ unsigned char hi_shift; ++ unsigned char low_shift; ++}; ++ ++enum uprobe_filter_ctx { ++ UPROBE_FILTER_REGISTER = 0, ++ UPROBE_FILTER_UNREGISTER = 1, ++ UPROBE_FILTER_MMAP = 2, ++}; ++ ++struct uprobe_consumer { ++ int (*handler)(struct uprobe_consumer *, struct pt_regs *); ++ int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *); ++ bool (*filter)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); ++ struct uprobe_consumer *next; ++}; ++ ++struct uprobe_trace_entry_head { ++ struct trace_entry ent; ++ long unsigned int vaddr[0]; ++}; ++ ++struct trace_uprobe_filter { ++ rwlock_t rwlock; ++ int nr_systemwide; ++ struct list_head perf_events; ++}; ++ ++struct trace_uprobe { ++ struct list_head list; ++ struct trace_uprobe_filter filter; ++ struct uprobe_consumer consumer; ++ struct path path; ++ struct inode *inode; ++ char *filename; ++ long unsigned int offset; ++ long unsigned int nhit; ++ struct trace_probe tp; ++}; ++ ++struct uprobe_dispatch_data { ++ struct trace_uprobe *tu; ++ long unsigned int bp_addr; ++}; ++ ++struct uprobe_cpu_buffer { ++ struct mutex mutex; ++ void *buf; ++}; ++ ++typedef bool (*filter_func_t)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); ++ ++enum xdp_action { ++ XDP_ABORTED = 0, ++ XDP_DROP = 1, ++ XDP_PASS = 2, ++ XDP_TX = 3, ++ XDP_REDIRECT = 4, ++}; ++ ++typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); ++ ++struct bpf_prog_dummy { ++ struct bpf_prog prog; ++}; ++ ++struct trace_event_raw_xdp_exception { ++ struct trace_entry ent; ++ int prog_id; ++ u32 act; ++ int ifindex; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xdp_redirect_template { ++ struct trace_entry ent; ++ int prog_id; ++ u32 act; ++ int ifindex; ++ int err; ++ int to_ifindex; ++ u32 map_id; ++ int map_index; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xdp_cpumap_kthread { ++ struct trace_entry ent; ++ int map_id; ++ u32 act; ++ int cpu; ++ unsigned int drops; ++ unsigned int processed; ++ int sched; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xdp_cpumap_enqueue { ++ struct trace_entry ent; ++ int map_id; ++ u32 act; ++ int cpu; ++ unsigned int drops; ++ unsigned int processed; ++ int to_cpu; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xdp_devmap_xmit { ++ struct trace_entry ent; ++ int map_id; ++ u32 act; ++ u32 map_index; ++ int drops; ++ int sent; ++ int from_ifindex; ++ int to_ifindex; ++ int err; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_xdp_exception {}; ++ ++struct trace_event_data_offsets_xdp_redirect_template {}; ++ ++struct trace_event_data_offsets_xdp_cpumap_kthread {}; ++ ++struct trace_event_data_offsets_xdp_cpumap_enqueue {}; ++ ++struct trace_event_data_offsets_xdp_devmap_xmit {}; ++ ++enum bpf_cmd { ++ BPF_MAP_CREATE = 0, ++ BPF_MAP_LOOKUP_ELEM = 1, ++ BPF_MAP_UPDATE_ELEM = 2, ++ BPF_MAP_DELETE_ELEM = 3, ++ BPF_MAP_GET_NEXT_KEY = 4, ++ BPF_PROG_LOAD = 5, ++ BPF_OBJ_PIN = 6, ++ BPF_OBJ_GET = 7, ++ BPF_PROG_ATTACH = 8, ++ BPF_PROG_DETACH = 9, ++ BPF_PROG_TEST_RUN = 10, ++ BPF_PROG_GET_NEXT_ID = 11, ++ BPF_MAP_GET_NEXT_ID = 12, ++ BPF_PROG_GET_FD_BY_ID = 13, ++ BPF_MAP_GET_FD_BY_ID = 14, ++ BPF_OBJ_GET_INFO_BY_FD = 15, ++ BPF_PROG_QUERY = 16, ++ BPF_RAW_TRACEPOINT_OPEN = 17, ++ BPF_BTF_LOAD = 18, ++ BPF_BTF_GET_FD_BY_ID = 19, ++ BPF_TASK_FD_QUERY = 20, ++}; ++ ++struct bpf_prog_info { ++ __u32 type; ++ __u32 id; ++ __u8 tag[8]; ++ __u32 jited_prog_len; ++ __u32 xlated_prog_len; ++ __u64 jited_prog_insns; ++ __u64 xlated_prog_insns; ++ __u64 load_time; ++ __u32 created_by_uid; ++ __u32 nr_map_ids; ++ __u64 map_ids; ++ char name[16]; ++ __u32 ifindex; ++ __u32 gpl_compatible: 1; ++ __u64 netns_dev; ++ __u64 netns_ino; ++ __u32 nr_jited_ksyms; ++ __u32 nr_jited_func_lens; ++ __u64 jited_ksyms; ++ __u64 jited_func_lens; ++}; ++ ++struct bpf_map_info { ++ __u32 type; ++ __u32 id; ++ __u32 key_size; ++ __u32 value_size; ++ __u32 max_entries; ++ __u32 map_flags; ++ char name[16]; ++ __u32 ifindex; ++ __u64 netns_dev; ++ __u64 netns_ino; ++ __u32 btf_id; ++ __u32 btf_key_type_id; ++ __u32 btf_value_type_id; ++}; ++ ++struct bpf_btf_info { ++ __u64 btf; ++ __u32 btf_size; ++ __u32 id; ++}; ++ ++struct btf_header { ++ __u16 magic; ++ __u8 version; ++ __u8 flags; ++ __u32 hdr_len; ++ __u32 type_off; ++ __u32 type_len; ++ __u32 str_off; ++ __u32 str_len; ++}; ++ ++struct btf { ++ void *data; ++ struct btf_type **types; ++ u32 *resolved_ids; ++ u32 *resolved_sizes; ++ const char *strings; ++ void *nohdr_data; ++ struct btf_header hdr; ++ u32 nr_types; ++ u32 types_size; ++ u32 data_size; ++ refcount_t refcnt; ++ u32 id; ++ struct callback_head rcu; ++}; ++ ++struct bpf_raw_tracepoint { ++ struct bpf_raw_event_map *btp; ++ struct bpf_prog *prog; ++}; ++ ++struct bpf_verifier_log { ++ u32 level; ++ char kbuf[1024]; ++ char *ubuf; ++ u32 len_used; ++ u32 len_total; ++}; ++ ++struct bpf_subprog_info { ++ u32 start; ++ u16 stack_depth; ++}; ++ ++struct bpf_verifier_stack_elem; ++ ++struct bpf_verifier_state; ++ ++struct bpf_verifier_state_list; ++ ++struct bpf_insn_aux_data; ++ ++struct bpf_verifier_env { ++ u32 insn_idx; ++ u32 prev_insn_idx; ++ struct bpf_prog *prog; ++ const struct bpf_verifier_ops *ops; ++ struct bpf_verifier_stack_elem *head; ++ int stack_size; ++ bool strict_alignment; ++ struct bpf_verifier_state *cur_state; ++ struct bpf_verifier_state_list **explored_states; ++ struct bpf_map *used_maps[64]; ++ u32 used_map_cnt; ++ u32 id_gen; ++ bool allow_ptr_leaks; ++ bool seen_direct_write; ++ struct bpf_insn_aux_data *insn_aux_data; ++ struct bpf_verifier_log log; ++ struct bpf_subprog_info subprog_info[257]; ++ u32 subprog_cnt; ++}; ++ ++struct tnum { ++ u64 value; ++ u64 mask; ++}; ++ ++enum bpf_reg_liveness { ++ REG_LIVE_NONE = 0, ++ REG_LIVE_READ = 1, ++ REG_LIVE_WRITTEN = 2, ++}; ++ ++struct bpf_reg_state { ++ enum bpf_reg_type type; ++ union { ++ u16 range; ++ struct bpf_map *map_ptr; ++ long unsigned int raw; ++ }; ++ s32 off; ++ u32 id; ++ struct tnum var_off; ++ s64 smin_value; ++ s64 smax_value; ++ u64 umin_value; ++ u64 umax_value; ++ u32 frameno; ++ enum bpf_reg_liveness live; ++}; ++ ++enum bpf_stack_slot_type { ++ STACK_INVALID = 0, ++ STACK_SPILL = 1, ++ STACK_MISC = 2, ++ STACK_ZERO = 3, ++}; ++ ++struct bpf_stack_state { ++ struct bpf_reg_state spilled_ptr; ++ u8 slot_type[8]; ++}; ++ ++struct bpf_func_state { ++ struct bpf_reg_state regs[11]; ++ struct bpf_verifier_state *parent; ++ int callsite; ++ u32 frameno; ++ u32 subprogno; ++ int allocated_stack; ++ struct bpf_stack_state *stack; ++}; ++ ++struct bpf_verifier_state { ++ struct bpf_func_state *frame[8]; ++ struct bpf_verifier_state *parent; ++ u32 curframe; ++ bool speculative; ++}; ++ ++struct bpf_verifier_state_list { ++ struct bpf_verifier_state state; ++ struct bpf_verifier_state_list *next; ++}; ++ ++struct bpf_insn_aux_data { ++ union { ++ enum bpf_reg_type ptr_type; ++ long unsigned int map_state; ++ s32 call_imm; ++ u32 alu_limit; ++ }; ++ int ctx_field_size; ++ int sanitize_stack_off; ++ bool seen; ++ u8 alu_state; ++}; ++ ++struct bpf_verifier_stack_elem { ++ struct bpf_verifier_state st; ++ int insn_idx; ++ int prev_insn_idx; ++ struct bpf_verifier_stack_elem *next; ++}; ++ ++typedef void (*bpf_insn_print_t)(void *, const char *, ...); ++ ++typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); ++ ++typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); ++ ++struct bpf_insn_cbs { ++ bpf_insn_print_t cb_print; ++ bpf_insn_revmap_call_t cb_call; ++ bpf_insn_print_imm_t cb_imm; ++ void *private_data; ++}; ++ ++struct bpf_call_arg_meta { ++ struct bpf_map *map_ptr; ++ bool raw_mode; ++ bool pkt_access; ++ int regno; ++ int access_size; ++ u64 msize_max_value; ++}; ++ ++enum reg_arg_type { ++ SRC_OP = 0, ++ DST_OP = 1, ++ DST_OP_NO_MARK = 2, ++}; ++ ++enum { ++ DISCOVERED = 16, ++ EXPLORED = 32, ++ FALLTHROUGH = 1, ++ BRANCH = 2, ++}; ++ ++struct idpair { ++ u32 old; ++ u32 cur; ++}; ++ ++struct tree_descr { ++ const char *name; ++ const struct file_operations *ops; ++ int mode; ++}; ++ ++struct match_token { ++ int token; ++ const char *pattern; ++}; ++ ++enum { ++ MAX_OPT_ARGS = 3, ++}; ++ ++enum bpf_type { ++ BPF_TYPE_UNSPEC = 0, ++ BPF_TYPE_PROG = 1, ++ BPF_TYPE_MAP = 2, ++}; ++ ++struct map_iter { ++ void *key; ++ bool done; ++}; ++ ++enum { ++ OPT_MODE = 0, ++ OPT_ERR = 1, ++}; ++ ++struct bpf_mount_opts { ++ umode_t mode; ++}; ++ ++struct pcpu_freelist_node; ++ ++struct pcpu_freelist_head { ++ struct pcpu_freelist_node *first; ++ raw_spinlock_t lock; ++}; ++ ++struct pcpu_freelist_node { ++ struct pcpu_freelist_node *next; ++}; ++ ++struct pcpu_freelist { ++ struct pcpu_freelist_head *freelist; ++}; ++ ++struct bpf_lru_node { ++ struct list_head list; ++ u16 cpu; ++ u8 type; ++ u8 ref; ++}; ++ ++struct bpf_lru_list { ++ struct list_head lists[3]; ++ unsigned int counts[2]; ++ struct list_head *next_inactive_rotation; ++ raw_spinlock_t lock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct bpf_lru_locallist { ++ struct list_head lists[2]; ++ u16 next_steal; ++ raw_spinlock_t lock; ++}; ++ ++struct bpf_common_lru { ++ struct bpf_lru_list lru_list; ++ struct bpf_lru_locallist *local_list; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); ++ ++struct bpf_lru { ++ union { ++ struct bpf_common_lru common_lru; ++ struct bpf_lru_list *percpu_lru; ++ }; ++ del_from_htab_func del_from_htab; ++ void *del_arg; ++ unsigned int hash_offset; ++ unsigned int nr_scans; ++ bool percpu; ++ long: 56; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct bucket { ++ struct hlist_nulls_head head; ++ raw_spinlock_t lock; ++}; ++ ++struct htab_elem; ++ ++struct bpf_htab { ++ struct bpf_map map; ++ struct bucket *buckets; ++ void *elems; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ union { ++ struct pcpu_freelist freelist; ++ struct bpf_lru lru; ++ }; ++ struct htab_elem **extra_elems; ++ atomic_t count; ++ u32 n_buckets; ++ u32 elem_size; ++ u32 hashrnd; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct htab_elem { ++ union { ++ struct hlist_nulls_node hash_node; ++ struct { ++ void *padding; ++ union { ++ struct bpf_htab *htab; ++ struct pcpu_freelist_node fnode; ++ }; ++ }; ++ }; ++ union { ++ struct callback_head rcu; ++ struct bpf_lru_node lru_node; ++ }; ++ u32 hash; ++ int: 32; ++ char key[0]; ++}; ++ ++enum bpf_lru_list_type { ++ BPF_LRU_LIST_T_ACTIVE = 0, ++ BPF_LRU_LIST_T_INACTIVE = 1, ++ BPF_LRU_LIST_T_FREE = 2, ++ BPF_LRU_LOCAL_LIST_T_FREE = 3, ++ BPF_LRU_LOCAL_LIST_T_PENDING = 4, ++}; ++ ++struct bpf_lpm_trie_key { ++ __u32 prefixlen; ++ __u8 data[0]; ++}; ++ ++struct lpm_trie_node { ++ struct callback_head rcu; ++ struct lpm_trie_node *child[2]; ++ u32 prefixlen; ++ u32 flags; ++ u8 data[0]; ++}; ++ ++struct lpm_trie { ++ struct bpf_map map; ++ struct lpm_trie_node *root; ++ size_t n_entries; ++ size_t max_prefixlen; ++ size_t data_size; ++ raw_spinlock_t lock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct bpf_cgroup_storage_map { ++ struct bpf_map map; ++ spinlock_t lock; ++ struct bpf_prog *prog; ++ struct rb_root root; ++ struct list_head list; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct btf_enum { ++ __u32 name_off; ++ __s32 val; ++}; ++ ++struct btf_array { ++ __u32 type; ++ __u32 index_type; ++ __u32 nelems; ++}; ++ ++struct btf_member { ++ __u32 name_off; ++ __u32 type; ++ __u32 offset; ++}; ++ ++enum verifier_phase { ++ CHECK_META = 0, ++ CHECK_TYPE = 1, ++}; ++ ++struct resolve_vertex { ++ const struct btf_type *t; ++ u32 type_id; ++ u16 next_member; ++}; ++ ++enum visit_state { ++ NOT_VISITED = 0, ++ VISITED = 1, ++ RESOLVED = 2, ++}; ++ ++enum resolve_mode { ++ RESOLVE_TBD = 0, ++ RESOLVE_PTR = 1, ++ RESOLVE_STRUCT_OR_ARRAY = 2, ++}; ++ ++struct btf_sec_info { ++ u32 off; ++ u32 len; ++}; ++ ++struct btf_verifier_env { ++ struct btf *btf; ++ u8 *visit_states; ++ struct resolve_vertex stack[32]; ++ struct bpf_verifier_log log; ++ u32 log_type_id; ++ u32 top_stack; ++ enum verifier_phase phase; ++ enum resolve_mode resolve_mode; ++}; ++ ++struct btf_kind_operations { ++ s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); ++ int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); ++ int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); ++ void (*log_details)(struct btf_verifier_env *, const struct btf_type *); ++ void (*seq_show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct seq_file *); ++}; ++ ++enum xdp_mem_type { ++ MEM_TYPE_PAGE_SHARED = 0, ++ MEM_TYPE_PAGE_ORDER0 = 1, ++ MEM_TYPE_PAGE_POOL = 2, ++ MEM_TYPE_ZERO_COPY = 3, ++ MEM_TYPE_MAX = 4, ++}; ++ ++struct xdp_buff { ++ void *data; ++ void *data_end; ++ void *data_meta; ++ void *data_hard_start; ++ long unsigned int handle; ++ struct xdp_rxq_info *rxq; ++}; ++ ++struct xdp_bulk_queue { ++ struct xdp_frame *q[16]; ++ struct net_device *dev_rx; ++ unsigned int count; ++}; ++ ++struct bpf_dtab; ++ ++struct bpf_dtab_netdev { ++ struct net_device *dev; ++ struct bpf_dtab *dtab; ++ unsigned int bit; ++ struct xdp_bulk_queue *bulkq; ++ struct callback_head rcu; ++}; ++ ++struct bpf_dtab { ++ struct bpf_map map; ++ struct bpf_dtab_netdev **netdev_map; ++ long unsigned int *flush_needed; ++ struct list_head list; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct ptr_ring { ++ int producer; ++ spinlock_t producer_lock; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ int consumer_head; ++ int consumer_tail; ++ spinlock_t consumer_lock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ int size; ++ int batch; ++ void **queue; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct xdp_bulk_queue___2 { ++ void *q[8]; ++ unsigned int count; ++}; ++ ++struct bpf_cpu_map_entry { ++ u32 cpu; ++ int map_id; ++ u32 qsize; ++ struct xdp_bulk_queue___2 *bulkq; ++ struct ptr_ring *queue; ++ struct task_struct *kthread; ++ struct work_struct kthread_stop_wq; ++ atomic_t refcnt; ++ struct callback_head rcu; ++}; ++ ++struct bpf_cpu_map { ++ struct bpf_map map; ++ struct bpf_cpu_map_entry **cpu_map; ++ long unsigned int *flush_needed; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct rhlist_head { ++ struct rhash_head rhead; ++ struct rhlist_head *next; ++}; ++ ++struct bucket_table { ++ unsigned int size; ++ unsigned int nest; ++ unsigned int rehash; ++ u32 hash_rnd; ++ unsigned int locks_mask; ++ spinlock_t *locks; ++ struct list_head walkers; ++ struct callback_head rcu; ++ struct bucket_table *future_tbl; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct rhash_head *buckets[0]; ++}; ++ ++struct bpf_offload_dev { ++ struct list_head netdevs; ++}; ++ ++struct bpf_offload_netdev { ++ struct rhash_head l; ++ struct net_device *netdev; ++ struct bpf_offload_dev *offdev; ++ struct list_head progs; ++ struct list_head maps; ++ struct list_head offdev_netdevs; ++}; ++ ++struct ns_get_path_bpf_prog_args { ++ struct bpf_prog *prog; ++ struct bpf_prog_info *info; ++}; ++ ++struct ns_get_path_bpf_map_args { ++ struct bpf_offloaded_map *offmap; ++ struct bpf_map_info *info; ++}; ++ ++enum bpf_stack_build_id_status { ++ BPF_STACK_BUILD_ID_EMPTY = 0, ++ BPF_STACK_BUILD_ID_VALID = 1, ++ BPF_STACK_BUILD_ID_IP = 2, ++}; ++ ++struct bpf_stack_build_id { ++ __s32 status; ++ unsigned char build_id[20]; ++ union { ++ __u64 offset; ++ __u64 ip; ++ }; ++}; ++ ++typedef struct elf32_hdr Elf32_Ehdr; ++ ++typedef struct elf32_phdr Elf32_Phdr; ++ ++struct elf64_phdr { ++ Elf64_Word p_type; ++ Elf64_Word p_flags; ++ Elf64_Off p_offset; ++ Elf64_Addr p_vaddr; ++ Elf64_Addr p_paddr; ++ Elf64_Xword p_filesz; ++ Elf64_Xword p_memsz; ++ Elf64_Xword p_align; ++}; ++ ++typedef struct elf64_phdr Elf64_Phdr; ++ ++typedef struct elf32_note Elf32_Nhdr; ++ ++struct stack_map_bucket { ++ struct pcpu_freelist_node fnode; ++ u32 hash; ++ u32 nr; ++ u64 data[0]; ++}; ++ ++struct bpf_stack_map { ++ struct bpf_map map; ++ void *elems; ++ struct pcpu_freelist freelist; ++ u32 n_buckets; ++ struct stack_map_bucket *buckets[0]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct stack_map_irq_work { ++ struct irq_work irq_work; ++ struct rw_semaphore *sem; ++}; ++ ++struct __sk_buff { ++ __u32 len; ++ __u32 pkt_type; ++ __u32 mark; ++ __u32 queue_mapping; ++ __u32 protocol; ++ __u32 vlan_present; ++ __u32 vlan_tci; ++ __u32 vlan_proto; ++ __u32 priority; ++ __u32 ingress_ifindex; ++ __u32 ifindex; ++ __u32 tc_index; ++ __u32 cb[5]; ++ __u32 hash; ++ __u32 tc_classid; ++ __u32 data; ++ __u32 data_end; ++ __u32 napi_id; ++ __u32 family; ++ __u32 remote_ip4; ++ __u32 local_ip4; ++ __u32 remote_ip6[4]; ++ __u32 local_ip6[4]; ++ __u32 remote_port; ++ __u32 local_port; ++ __u32 data_meta; ++}; ++ ++struct bpf_cgroup_dev_ctx { ++ __u32 access_type; ++ __u32 major; ++ __u32 minor; ++}; ++ ++struct bpf_prog_list { ++ struct list_head node; ++ struct bpf_prog *prog; ++ struct bpf_cgroup_storage *storage; ++}; ++ ++struct qdisc_skb_cb { ++ unsigned int pkt_len; ++ u16 slave_dev_queue_mapping; ++ u16 tc_classid; ++ unsigned char data[20]; ++}; ++ ++struct bpf_sock_addr_kern { ++ struct sock *sk; ++ struct sockaddr *uaddr; ++ u64 tmp_reg; ++ void *t_ctx; ++}; ++ ++struct bpf_sock_ops_kern { ++ struct sock *sk; ++ u32 op; ++ union { ++ u32 args[4]; ++ u32 reply; ++ u32 replylong[4]; ++ }; ++ u32 is_fullsock; ++ u64 temp; ++}; ++ ++enum sock_flags { ++ SOCK_DEAD = 0, ++ SOCK_DONE = 1, ++ SOCK_URGINLINE = 2, ++ SOCK_KEEPOPEN = 3, ++ SOCK_LINGER = 4, ++ SOCK_DESTROY = 5, ++ SOCK_BROADCAST = 6, ++ SOCK_TIMESTAMP = 7, ++ SOCK_ZAPPED = 8, ++ SOCK_USE_WRITE_QUEUE = 9, ++ SOCK_DBG = 10, ++ SOCK_RCVTSTAMP = 11, ++ SOCK_RCVTSTAMPNS = 12, ++ SOCK_LOCALROUTE = 13, ++ SOCK_QUEUE_SHRUNK = 14, ++ SOCK_MEMALLOC = 15, ++ SOCK_TIMESTAMPING_RX_SOFTWARE = 16, ++ SOCK_FASYNC = 17, ++ SOCK_RXQ_OVFL = 18, ++ SOCK_ZEROCOPY = 19, ++ SOCK_WIFI_STATUS = 20, ++ SOCK_NOFCS = 21, ++ SOCK_FILTER_LOCKED = 22, ++ SOCK_SELECT_ERR_QUEUE = 23, ++ SOCK_RCU_FREE = 24, ++ SOCK_TXTIME = 25, ++}; ++ ++struct reuseport_array { ++ struct bpf_map map; ++ struct sock *ptrs[0]; ++}; ++ ++struct module___2; ++ ++struct file___2; ++ ++struct kiocb___2; ++ ++struct iov_iter___2; ++ ++struct poll_table_struct___2; ++ ++struct vm_area_struct___2; ++ ++struct file_lock___2; ++ ++struct page___2; ++ ++struct pipe_inode_info___2; ++ ++struct file_operations___2 { ++ struct module___2 *owner; ++ loff_t (*llseek)(struct file___2 *, loff_t, int); ++ ssize_t (*read)(struct file___2 *, char *, size_t, loff_t *); ++ ssize_t (*write)(struct file___2 *, const char *, size_t, loff_t *); ++ ssize_t (*read_iter)(struct kiocb___2 *, struct iov_iter___2 *); ++ ssize_t (*write_iter)(struct kiocb___2 *, struct iov_iter___2 *); ++ int (*iterate)(struct file___2 *, struct dir_context *); ++ int (*iterate_shared)(struct file___2 *, struct dir_context *); ++ __poll_t (*poll)(struct file___2 *, struct poll_table_struct___2 *); ++ long int (*unlocked_ioctl)(struct file___2 *, unsigned int, long unsigned int); ++ long int (*compat_ioctl)(struct file___2 *, unsigned int, long unsigned int); ++ int (*mmap)(struct file___2 *, struct vm_area_struct___2 *); ++ long unsigned int mmap_supported_flags; ++ int (*open)(struct inode___2 *, struct file___2 *); ++ int (*flush)(struct file___2 *, fl_owner_t); ++ int (*release)(struct inode___2 *, struct file___2 *); ++ int (*fsync)(struct file___2 *, loff_t, loff_t, int); ++ int (*fasync)(int, struct file___2 *, int); ++ int (*lock)(struct file___2 *, int, struct file_lock___2 *); ++ ssize_t (*sendpage)(struct file___2 *, struct page___2 *, int, size_t, loff_t *, int); ++ long unsigned int (*get_unmapped_area)(struct file___2 *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); ++ int (*check_flags)(int); ++ int (*flock)(struct file___2 *, int, struct file_lock___2 *); ++ ssize_t (*splice_write)(struct pipe_inode_info___2 *, struct file___2 *, loff_t *, size_t, unsigned int); ++ ssize_t (*splice_read)(struct file___2 *, loff_t *, struct pipe_inode_info___2 *, size_t, unsigned int); ++ int (*setlease)(struct file___2 *, long int, struct file_lock___2 **, void **); ++ long int (*fallocate)(struct file___2 *, int, loff_t, loff_t); ++ void (*show_fdinfo)(struct seq_file___2 *, struct file___2 *); ++ ssize_t (*copy_file_range)(struct file___2 *, loff_t, struct file___2 *, loff_t, size_t, unsigned int); ++ int (*clone_file_range)(struct file___2 *, loff_t, struct file___2 *, loff_t, u64); ++ int (*dedupe_file_range)(struct file___2 *, loff_t, struct file___2 *, loff_t, u64); ++ int (*fadvise)(struct file___2 *, loff_t, loff_t, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct vmacache___2 { ++ u64 seqnum; ++ struct vm_area_struct___2 *vmas[4]; ++}; ++ ++struct page_frag___2 { ++ struct page___2 *page; ++ __u32 offset; ++ __u32 size; ++}; ++ ++struct perf_event___2; ++ ++struct debug_info___2 { ++ int suspended_step; ++ int bps_disabled; ++ int wps_disabled; ++ struct perf_event___2 *hbp_break[16]; ++ struct perf_event___2 *hbp_watch[16]; ++}; ++ ++struct thread_struct___2 { ++ struct cpu_context cpu_context; ++ long: 64; ++ struct { ++ long unsigned int tp_value; ++ long unsigned int tp2_value; ++ struct user_fpsimd_state fpsimd_state; ++ } uw; ++ unsigned int fpsimd_cpu; ++ void *sve_state; ++ unsigned int sve_vl; ++ unsigned int sve_vl_onexec; ++ long unsigned int fault_address; ++ long unsigned int fault_code; ++ struct debug_info___2 debug; ++ long: 64; ++}; ++ ++struct mm_struct___2; ++ ++struct pid___2; ++ ++struct cred___2; ++ ++struct nsproxy___2; ++ ++struct signal_struct___2; ++ ++struct css_set___2; ++ ++struct perf_event_context___2; ++ ++struct vm_struct___2; ++ ++struct task_struct___2 { ++ struct thread_info thread_info; ++ volatile long int state; ++ void *stack; ++ atomic_t usage; ++ unsigned int flags; ++ unsigned int ptrace; ++ struct llist_node wake_entry; ++ int on_cpu; ++ unsigned int cpu; ++ unsigned int wakee_flips; ++ long unsigned int wakee_flip_decay_ts; ++ struct task_struct___2 *last_wakee; ++ int recent_used_cpu; ++ int wake_cpu; ++ int on_rq; ++ int prio; ++ int static_prio; ++ int normal_prio; ++ unsigned int rt_priority; ++ const struct sched_class *sched_class; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sched_entity se; ++ struct sched_rt_entity rt; ++ struct task_group *sched_task_group; ++ struct sched_dl_entity dl; ++ struct hlist_head preempt_notifiers; ++ unsigned int btrace_seq; ++ unsigned int policy; ++ int nr_cpus_allowed; ++ cpumask_t cpus_allowed; ++ struct sched_info sched_info; ++ struct list_head tasks; ++ struct plist_node pushable_tasks; ++ struct rb_node pushable_dl_tasks; ++ struct mm_struct___2 *mm; ++ struct mm_struct___2 *active_mm; ++ struct vmacache___2 vmacache; ++ struct task_rss_stat rss_stat; ++ int exit_state; ++ int exit_code; ++ int exit_signal; ++ int pdeath_signal; ++ long unsigned int jobctl; ++ unsigned int personality; ++ unsigned int sched_reset_on_fork: 1; ++ unsigned int sched_contributes_to_load: 1; ++ unsigned int sched_migrated: 1; ++ unsigned int sched_remote_wakeup: 1; ++ int: 28; ++ unsigned int in_execve: 1; ++ unsigned int in_iowait: 1; ++ unsigned int in_user_fault: 1; ++ unsigned int memcg_kmem_skip_account: 1; ++ unsigned int no_cgroup_migration: 1; ++ unsigned int use_memdelay: 1; ++ long unsigned int atomic_flags; ++ struct restart_block restart_block; ++ pid_t pid; ++ pid_t tgid; ++ long unsigned int stack_canary; ++ struct task_struct___2 *real_parent; ++ struct task_struct___2 *parent; ++ struct list_head children; ++ struct list_head sibling; ++ struct task_struct___2 *group_leader; ++ struct list_head ptraced; ++ struct list_head ptrace_entry; ++ struct pid___2 *thread_pid; ++ struct hlist_node pid_links[4]; ++ struct list_head thread_group; ++ struct list_head thread_node; ++ struct completion *vfork_done; ++ int *set_child_tid; ++ int *clear_child_tid; ++ u64 utime; ++ u64 stime; ++ u64 gtime; ++ struct prev_cputime prev_cputime; ++ struct vtime vtime; ++ atomic_t tick_dep_mask; ++ long unsigned int nvcsw; ++ long unsigned int nivcsw; ++ u64 start_time; ++ u64 real_start_time; ++ long unsigned int min_flt; ++ long unsigned int maj_flt; ++ struct task_cputime cputime_expires; ++ struct list_head cpu_timers[3]; ++ const struct cred___2 *ptracer_cred; ++ const struct cred___2 *real_cred; ++ const struct cred___2 *cred; ++ char comm[16]; ++ struct nameidata *nameidata; ++ struct sysv_sem sysvsem; ++ struct sysv_shm sysvshm; ++ long unsigned int last_switch_count; ++ long unsigned int last_switch_time; ++ struct fs_struct *fs; ++ struct files_struct *files; ++ struct nsproxy___2 *nsproxy; ++ struct signal_struct___2 *signal; ++ struct sighand_struct *sighand; ++ sigset_t blocked; ++ sigset_t real_blocked; ++ sigset_t saved_sigmask; ++ struct sigpending pending; ++ long unsigned int sas_ss_sp; ++ size_t sas_ss_size; ++ unsigned int sas_ss_flags; ++ struct callback_head *task_works; ++ struct audit_context *audit_context; ++ kuid_t loginuid; ++ unsigned int sessionid; ++ struct seccomp seccomp; ++ u32 parent_exec_id; ++ u32 self_exec_id; ++ spinlock_t alloc_lock; ++ raw_spinlock_t pi_lock; ++ struct wake_q_node wake_q; ++ struct rb_root_cached pi_waiters; ++ struct task_struct___2 *pi_top_task; ++ struct rt_mutex_waiter *pi_blocked_on; ++ void *journal_info; ++ struct bio_list *bio_list; ++ struct blk_plug *plug; ++ struct reclaim_state *reclaim_state; ++ struct backing_dev_info *backing_dev_info; ++ struct io_context *io_context; ++ long unsigned int ptrace_message; ++ siginfo_t *last_siginfo; ++ struct task_io_accounting ioac; ++ u64 acct_rss_mem1; ++ u64 acct_vm_mem1; ++ u64 acct_timexpd; ++ nodemask_t mems_allowed; ++ seqcount_t mems_allowed_seq; ++ int cpuset_mem_spread_rotor; ++ int cpuset_slab_spread_rotor; ++ struct css_set___2 *cgroups; ++ struct list_head cg_list; ++ u32 closid; ++ u32 rmid; ++ struct robust_list_head *robust_list; ++ struct compat_robust_list_head *compat_robust_list; ++ struct list_head pi_state_list; ++ struct futex_pi_state *pi_state_cache; ++ struct perf_event_context___2 *perf_event_ctxp[2]; ++ struct mutex perf_event_mutex; ++ struct list_head perf_event_list; ++ struct mempolicy *mempolicy; ++ short int il_prev; ++ short int pref_node_fork; ++ int numa_scan_seq; ++ unsigned int numa_scan_period; ++ unsigned int numa_scan_period_max; ++ int numa_preferred_nid; ++ long unsigned int numa_migrate_retry; ++ u64 node_stamp; ++ u64 last_task_numa_placement; ++ u64 last_sum_exec_runtime; ++ struct callback_head numa_work; ++ struct numa_group *numa_group; ++ long unsigned int *numa_faults; ++ long unsigned int total_numa_faults; ++ long unsigned int numa_faults_locality[3]; ++ long unsigned int numa_pages_migrated; ++ struct rseq *rseq; ++ u32 rseq_len; ++ u32 rseq_sig; ++ long unsigned int rseq_event_mask; ++ struct tlbflush_unmap_batch tlb_ubc; ++ struct callback_head rcu; ++ struct pipe_inode_info___2 *splice_pipe; ++ struct page_frag___2 task_frag; ++ struct task_delay_info *delays; ++ int nr_dirtied; ++ int nr_dirtied_pause; ++ long unsigned int dirty_paused_when; ++ u64 timer_slack_ns; ++ u64 default_timer_slack_ns; ++ int curr_ret_stack; ++ int curr_ret_depth; ++ struct ftrace_ret_stack *ret_stack; ++ long long unsigned int ftrace_timestamp; ++ atomic_t trace_overrun; ++ atomic_t tracing_graph_pause; ++ long unsigned int trace; ++ long unsigned int trace_recursion; ++ struct mem_cgroup *memcg_in_oom; ++ gfp_t memcg_oom_gfp_mask; ++ int memcg_oom_order; ++ unsigned int memcg_nr_pages_over_high; ++ struct mem_cgroup *active_memcg; ++ struct request_queue *throttle_queue; ++ struct uprobe_task *utask; ++ unsigned int sequential_io; ++ unsigned int sequential_io_avg; ++ int pagefault_disabled; ++ struct task_struct___2 *oom_reaper_list; ++ struct vm_struct___2 *stack_vm_area; ++ atomic_t stack_refcount; ++ int patch_state; ++ void *security; ++ u64 parent_exec_id_u64; ++ u64 self_exec_id_u64; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ struct thread_struct___2 thread; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct hw_perf_event___2 { ++ union { ++ struct { ++ u64 config; ++ u64 last_tag; ++ long unsigned int config_base; ++ long unsigned int event_base; ++ int event_base_rdpmc; ++ int idx; ++ int last_cpu; ++ int flags; ++ struct hw_perf_event_extra extra_reg; ++ struct hw_perf_event_extra branch_reg; ++ }; ++ struct { ++ struct hrtimer hrtimer; ++ }; ++ struct { ++ struct list_head tp_list; ++ }; ++ struct { ++ u64 pwr_acc; ++ u64 ptsc; ++ }; ++ struct { ++ struct arch_hw_breakpoint info; ++ struct list_head bp_list; ++ }; ++ struct { ++ u8 iommu_bank; ++ u8 iommu_cntr; ++ u16 padding; ++ u64 conf; ++ u64 conf1; ++ }; ++ }; ++ struct task_struct___2 *target; ++ void *addr_filters; ++ long unsigned int addr_filters_gen; ++ int state; ++ local64_t prev_count; ++ u64 sample_period; ++ u64 last_period; ++ local64_t period_left; ++ u64 interrupts_seq; ++ u64 interrupts; ++ u64 freq_time_stamp; ++ u64 freq_count_stamp; ++}; ++ ++typedef void (*perf_overflow_handler_t___2)(struct perf_event___2 *, struct perf_sample_data *, struct pt_regs *); ++ ++struct pmu___2; ++ ++struct ring_buffer___2; ++ ++struct fasync_struct___2; ++ ++struct pid_namespace___2; ++ ++struct bpf_prog___2; ++ ++struct trace_event_call___2; ++ ++struct perf_cgroup___2; ++ ++struct perf_event___2 { ++ struct list_head event_entry; ++ struct list_head sibling_list; ++ struct list_head active_list; ++ struct rb_node group_node; ++ u64 group_index; ++ struct list_head migrate_entry; ++ struct hlist_node hlist_entry; ++ struct list_head active_entry; ++ int nr_siblings; ++ int event_caps; ++ int group_caps; ++ struct perf_event___2 *group_leader; ++ struct pmu___2 *pmu; ++ void *pmu_private; ++ enum perf_event_state state; ++ unsigned int attach_state; ++ local64_t count; ++ atomic64_t child_count; ++ u64 total_time_enabled; ++ u64 total_time_running; ++ u64 tstamp; ++ u64 shadow_ctx_time; ++ struct perf_event_attr attr; ++ u16 header_size; ++ u16 id_header_size; ++ u16 read_size; ++ struct hw_perf_event___2 hw; ++ struct perf_event_context___2 *ctx; ++ atomic_long_t refcount; ++ atomic64_t child_total_time_enabled; ++ atomic64_t child_total_time_running; ++ struct mutex child_mutex; ++ struct list_head child_list; ++ struct perf_event___2 *parent; ++ int oncpu; ++ int cpu; ++ struct list_head owner_entry; ++ struct task_struct___2 *owner; ++ struct mutex mmap_mutex; ++ atomic_t mmap_count; ++ struct ring_buffer___2 *rb; ++ struct list_head rb_entry; ++ long unsigned int rcu_batches; ++ int rcu_pending; ++ wait_queue_head_t waitq; ++ struct fasync_struct___2 *fasync; ++ int pending_wakeup; ++ int pending_kill; ++ int pending_disable; ++ struct irq_work pending; ++ atomic_t event_limit; ++ struct perf_addr_filters_head addr_filters; ++ struct perf_addr_filter_range *addr_filter_ranges; ++ long unsigned int addr_filters_gen; ++ void (*destroy)(struct perf_event___2 *); ++ struct callback_head callback_head; ++ struct pid_namespace___2 *ns; ++ u64 id; ++ u64 (*clock)(); ++ perf_overflow_handler_t___2 overflow_handler; ++ void *overflow_handler_context; ++ perf_overflow_handler_t___2 orig_overflow_handler; ++ struct bpf_prog___2 *prog; ++ struct trace_event_call___2 *tp_event; ++ struct event_filter *filter; ++ struct ftrace_ops ftrace_ops; ++ struct perf_cgroup___2 *cgrp; ++ struct list_head sb_list; ++}; ++ ++struct dentry_operations___2; ++ ++struct dentry___2 { ++ unsigned int d_flags; ++ seqcount_t d_seq; ++ struct hlist_bl_node d_hash; ++ struct dentry___2 *d_parent; ++ struct qstr d_name; ++ struct inode___2 *d_inode; ++ unsigned char d_iname[32]; ++ struct lockref d_lockref; ++ const struct dentry_operations___2 *d_op; ++ struct super_block___2 *d_sb; ++ long unsigned int d_time; ++ void *d_fsdata; ++ union { ++ struct list_head d_lru; ++ wait_queue_head_t *d_wait; ++ }; ++ struct list_head d_child; ++ struct list_head d_subdirs; ++ union { ++ struct hlist_node d_alias; ++ struct hlist_bl_node d_in_lookup_hash; ++ struct callback_head d_rcu; ++ } d_u; ++ atomic_t d_neg_dnum; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct rw_semaphore___3 { ++ atomic_long_t count; ++ struct list_head wait_list; ++ raw_spinlock_t wait_lock; ++ struct optimistic_spin_queue osq; ++ struct task_struct___2 *owner; ++}; ++ ++struct address_space_operations___2; ++ ++struct address_space___2 { ++ struct inode___2 *host; ++ struct radix_tree_root i_pages; ++ atomic_t i_mmap_writable; ++ struct rb_root_cached i_mmap; ++ struct rw_semaphore___3 i_mmap_rwsem; ++ long unsigned int nrpages; ++ long unsigned int nrexceptional; ++ long unsigned int writeback_index; ++ const struct address_space_operations___2 *a_ops; ++ long unsigned int flags; ++ spinlock_t private_lock; ++ gfp_t gfp_mask; ++ struct list_head private_list; ++ void *private_data; ++ errseq_t wb_err; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct inode_operations___2; ++ ++struct block_device___2; ++ ++struct inode___2 { ++ umode_t i_mode; ++ short unsigned int i_opflags; ++ kuid_t i_uid; ++ kgid_t i_gid; ++ unsigned int i_flags; ++ struct posix_acl *i_acl; ++ struct posix_acl *i_default_acl; ++ const struct inode_operations___2 *i_op; ++ struct super_block___2 *i_sb; ++ struct address_space___2 *i_mapping; ++ void *i_security; ++ long unsigned int i_ino; ++ union { ++ const unsigned int i_nlink; ++ unsigned int __i_nlink; ++ }; ++ dev_t i_rdev; ++ loff_t i_size; ++ struct timespec64 i_atime; ++ struct timespec64 i_mtime; ++ struct timespec64 i_ctime; ++ spinlock_t i_lock; ++ short unsigned int i_bytes; ++ u8 i_blkbits; ++ u8 i_write_hint; ++ blkcnt_t i_blocks; ++ long unsigned int i_state; ++ struct rw_semaphore___3 i_rwsem; ++ long unsigned int dirtied_when; ++ long unsigned int dirtied_time_when; ++ struct hlist_node i_hash; ++ struct list_head i_io_list; ++ struct bdi_writeback *i_wb; ++ int i_wb_frn_winner; ++ u16 i_wb_frn_avg_time; ++ u16 i_wb_frn_history; ++ struct list_head i_lru; ++ struct list_head i_sb_list; ++ struct list_head i_wb_list; ++ union { ++ struct hlist_head i_dentry; ++ struct callback_head i_rcu; ++ }; ++ atomic64_t i_version; ++ atomic_t i_count; ++ atomic_t i_dio_count; ++ atomic_t i_writecount; ++ const struct file_operations___2 *i_fop; ++ struct file_lock_context *i_flctx; ++ struct address_space___2 i_data; ++ struct list_head i_devices; ++ union { ++ struct pipe_inode_info___2 *i_pipe; ++ struct block_device___2 *i_bdev; ++ struct cdev *i_cdev; ++ char *i_link; ++ unsigned int i_dir_seq; ++ }; ++ __u32 i_generation; ++ __u32 i_fsnotify_mask; ++ struct fsnotify_mark_connector *i_fsnotify_marks; ++ void *i_private; ++ atomic64_t i_sequence; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct vfsmount___2; ++ ++struct path___2; ++ ++struct dentry_operations___2 { ++ int (*d_revalidate)(struct dentry___2 *, unsigned int); ++ int (*d_weak_revalidate)(struct dentry___2 *, unsigned int); ++ int (*d_hash)(const struct dentry___2 *, struct qstr *); ++ int (*d_compare)(const struct dentry___2 *, unsigned int, const char *, const struct qstr *); ++ int (*d_delete)(const struct dentry___2 *); ++ int (*d_init)(struct dentry___2 *); ++ void (*d_release)(struct dentry___2 *); ++ void (*d_prune)(struct dentry___2 *); ++ void (*d_iput)(struct dentry___2 *, struct inode___2 *); ++ char * (*d_dname)(struct dentry___2 *, char *, int); ++ struct vfsmount___2 * (*d_automount)(struct path___2 *); ++ int (*d_manage)(const struct path___2 *, bool); ++ struct dentry___2 * (*d_real)(struct dentry___2 *, const struct inode___2 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct quota_format_type___2; ++ ++struct mem_dqinfo___2 { ++ struct quota_format_type___2 *dqi_format; ++ int dqi_fmt_id; ++ struct list_head dqi_dirty_list; ++ long unsigned int dqi_flags; ++ unsigned int dqi_bgrace; ++ unsigned int dqi_igrace; ++ qsize_t dqi_max_spc_limit; ++ qsize_t dqi_max_ino_limit; ++ void *dqi_priv; ++}; ++ ++struct quota_format_ops___2; ++ ++struct quota_info___2 { ++ unsigned int flags; ++ struct rw_semaphore___3 dqio_sem; ++ struct inode___2 *files[3]; ++ struct mem_dqinfo___2 info[3]; ++ const struct quota_format_ops___2 *ops[3]; ++}; ++ ++struct rcuwait___2 { ++ struct task_struct___2 *task; ++}; ++ ++struct percpu_rw_semaphore___2 { ++ struct rcu_sync rss; ++ unsigned int *read_count; ++ struct rw_semaphore___3 rw_sem; ++ struct rcuwait___2 writer; ++ int readers_block; ++}; ++ ++struct sb_writers___2 { ++ int frozen; ++ wait_queue_head_t wait_unfrozen; ++ struct percpu_rw_semaphore___2 rw_sem[3]; ++}; ++ ++struct file_system_type___2; ++ ++struct super_operations___2; ++ ++struct dquot_operations___2; ++ ++struct quotactl_ops___2; ++ ++struct user_namespace___2; ++ ++struct super_block___2 { ++ struct list_head s_list; ++ dev_t s_dev; ++ unsigned char s_blocksize_bits; ++ long unsigned int s_blocksize; ++ loff_t s_maxbytes; ++ struct file_system_type___2 *s_type; ++ const struct super_operations___2 *s_op; ++ const struct dquot_operations___2 *dq_op; ++ const struct quotactl_ops___2 *s_qcop; ++ const struct export_operations *s_export_op; ++ long unsigned int s_flags; ++ long unsigned int s_iflags; ++ long unsigned int s_magic; ++ struct dentry___2 *s_root; ++ struct rw_semaphore___3 s_umount; ++ int s_count; ++ atomic_t s_active; ++ void *s_security; ++ const struct xattr_handler **s_xattr; ++ struct hlist_bl_head s_roots; ++ struct list_head s_mounts; ++ struct block_device___2 *s_bdev; ++ struct backing_dev_info *s_bdi; ++ struct mtd_info *s_mtd; ++ struct hlist_node s_instances; ++ unsigned int s_quota_types; ++ struct quota_info___2 s_dquot; ++ struct sb_writers___2 s_writers; ++ char s_id[32]; ++ uuid_t s_uuid; ++ void *s_fs_info; ++ unsigned int s_max_links; ++ fmode_t s_mode; ++ u32 s_time_gran; ++ struct mutex s_vfs_rename_mutex; ++ char *s_subtype; ++ const struct dentry_operations___2 *s_d_op; ++ int cleancache_poolid; ++ struct shrinker s_shrink; ++ atomic_long_t s_remove_count; ++ atomic_long_t s_fsnotify_inode_refs; ++ int s_readonly_remount; ++ struct workqueue_struct *s_dio_done_wq; ++ struct hlist_head s_pins; ++ struct user_namespace___2 *s_user_ns; ++ struct list_lru s_dentry_lru; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct list_lru s_inode_lru; ++ struct callback_head rcu; ++ struct work_struct destroy_work; ++ struct mutex s_sync_lock; ++ int s_stack_depth; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t s_inode_list_lock; ++ struct list_head s_inodes; ++ spinlock_t s_inode_wblist_lock; ++ struct list_head s_inodes_wb; ++ long: 64; ++ long: 64; ++}; ++ ++struct vfsmount___2 { ++ struct dentry___2 *mnt_root; ++ struct super_block___2 *mnt_sb; ++ int mnt_flags; ++}; ++ ++struct path___2 { ++ struct vfsmount___2 *mnt; ++ struct dentry___2 *dentry; ++}; ++ ++struct upid___2 { ++ int nr; ++ struct pid_namespace___2 *ns; ++}; ++ ++struct proc_ns_operations___2; ++ ++struct ns_common___2 { ++ atomic_long_t stashed; ++ const struct proc_ns_operations___2 *ops; ++ unsigned int inum; ++}; ++ ++struct ucounts___2; ++ ++struct pid_namespace___2 { ++ struct kref kref; ++ struct idr idr; ++ struct callback_head rcu; ++ unsigned int pid_allocated; ++ struct task_struct___2 *child_reaper; ++ struct kmem_cache *pid_cachep; ++ unsigned int level; ++ struct pid_namespace___2 *parent; ++ struct vfsmount___2 *proc_mnt; ++ struct dentry___2 *proc_self; ++ struct dentry___2 *proc_thread_self; ++ struct fs_pin *bacct; ++ struct user_namespace___2 *user_ns; ++ struct ucounts___2 *ucounts; ++ struct work_struct proc_work; ++ kgid_t pid_gid; ++ int hide_pid; ++ int pid_max; ++ int reboot; ++ struct ns_common___2 ns; ++}; ++ ++struct pid___2 { ++ atomic_t count; ++ unsigned int level; ++ struct hlist_head tasks[4]; ++ struct callback_head rcu; ++ struct upid___2 numbers[1]; ++}; ++ ++struct key___2; ++ ++struct user_namespace___2 { ++ struct uid_gid_map uid_map; ++ struct uid_gid_map gid_map; ++ struct uid_gid_map projid_map; ++ atomic_t count; ++ struct user_namespace___2 *parent; ++ int level; ++ kuid_t owner; ++ kgid_t group; ++ struct ns_common___2 ns; ++ long unsigned int flags; ++ struct key___2 *persistent_keyring_register; ++ struct rw_semaphore___3 persistent_keyring_register_sem; ++ struct work_struct work; ++ struct ctl_table_set set; ++ struct ctl_table_header *sysctls; ++ struct ucounts___2 *ucounts; ++ int ucount_max[9]; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct secondary_data___2 { ++ void *stack; ++ struct task_struct___2 *task; ++ long int status; ++}; ++ ++struct clock_event_device___2; ++ ++struct arch_timer_erratum_workaround___2 { ++ enum arch_timer_erratum_match_type match_type; ++ const void *id; ++ const char *desc; ++ u32 (*read_cntp_tval_el0)(); ++ u32 (*read_cntv_tval_el0)(); ++ u64 (*read_cntpct_el0)(); ++ u64 (*read_cntvct_el0)(); ++ int (*set_next_event_phys)(long unsigned int, struct clock_event_device___2 *); ++ int (*set_next_event_virt)(long unsigned int, struct clock_event_device___2 *); ++}; ++ ++struct clock_event_device___2 { ++ void (*event_handler)(struct clock_event_device___2 *); ++ int (*set_next_event)(long unsigned int, struct clock_event_device___2 *); ++ int (*set_next_ktime)(ktime_t, struct clock_event_device___2 *); ++ ktime_t next_event; ++ u64 max_delta_ns; ++ u64 min_delta_ns; ++ u32 mult; ++ u32 shift; ++ enum clock_event_state state_use_accessors; ++ unsigned int features; ++ long unsigned int retries; ++ int (*set_state_periodic)(struct clock_event_device___2 *); ++ int (*set_state_oneshot)(struct clock_event_device___2 *); ++ int (*set_state_oneshot_stopped)(struct clock_event_device___2 *); ++ int (*set_state_shutdown)(struct clock_event_device___2 *); ++ int (*tick_resume)(struct clock_event_device___2 *); ++ void (*broadcast)(const struct cpumask *); ++ void (*suspend)(struct clock_event_device___2 *); ++ void (*resume)(struct clock_event_device___2 *); ++ long unsigned int min_delta_ticks; ++ long unsigned int max_delta_ticks; ++ const char *name; ++ int rating; ++ int irq; ++ int bound_on; ++ const struct cpumask *cpumask; ++ struct list_head list; ++ struct module___2 *owner; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++typedef struct page___2 *pgtable_t___2; ++ ++struct dev_pagemap___2; ++ ++struct page___2 { ++ long unsigned int flags; ++ union { ++ struct { ++ struct list_head lru; ++ struct address_space___2 *mapping; ++ long unsigned int index; ++ long unsigned int private; ++ }; ++ struct { ++ union { ++ struct list_head slab_list; ++ struct { ++ struct page___2 *next; ++ int pages; ++ int pobjects; ++ }; ++ }; ++ struct kmem_cache *slab_cache; ++ void *freelist; ++ union { ++ void *s_mem; ++ long unsigned int counters; ++ struct { ++ unsigned int inuse: 16; ++ unsigned int objects: 15; ++ unsigned int frozen: 1; ++ }; ++ }; ++ }; ++ struct { ++ long unsigned int compound_head; ++ unsigned char compound_dtor; ++ unsigned char compound_order; ++ atomic_t compound_mapcount; ++ }; ++ struct { ++ long unsigned int _compound_pad_1; ++ long unsigned int _compound_pad_2; ++ struct list_head deferred_list; ++ }; ++ struct { ++ long unsigned int _pt_pad_1; ++ pgtable_t___2 pmd_huge_pte; ++ long unsigned int _pt_pad_2; ++ union { ++ struct mm_struct___2 *pt_mm; ++ atomic_t pt_frag_refcount; ++ }; ++ spinlock_t ptl; ++ }; ++ struct { ++ struct dev_pagemap___2 *pgmap; ++ long unsigned int hmm_data; ++ long unsigned int _zd_pad_1; ++ }; ++ struct callback_head callback_head; ++ }; ++ union { ++ atomic_t _mapcount; ++ unsigned int page_type; ++ unsigned int active; ++ int units; ++ }; ++ atomic_t _refcount; ++ struct mem_cgroup *mem_cgroup; ++}; ++ ++struct user_struct___2 { ++ refcount_t __count; ++ atomic_t processes; ++ atomic_t sigpending; ++ atomic_t fanotify_listeners; ++ atomic_long_t epoll_watches; ++ long unsigned int mq_bytes; ++ long unsigned int locked_shm; ++ long unsigned int unix_inflight; ++ atomic_long_t pipe_bufs; ++ struct key___2 *uid_keyring; ++ struct key___2 *session_keyring; ++ struct hlist_node uidhash_node; ++ kuid_t uid; ++ atomic_long_t locked_vm; ++ struct ratelimit_state ratelimit; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct vm_operations_struct___2; ++ ++struct vm_area_struct___2 { ++ long unsigned int vm_start; ++ long unsigned int vm_end; ++ struct vm_area_struct___2 *vm_next; ++ struct vm_area_struct___2 *vm_prev; ++ struct rb_node vm_rb; ++ long unsigned int rb_subtree_gap; ++ struct mm_struct___2 *vm_mm; ++ pgprot_t vm_page_prot; ++ long unsigned int vm_flags; ++ struct { ++ struct rb_node rb; ++ long unsigned int rb_subtree_last; ++ } shared; ++ struct list_head anon_vma_chain; ++ struct anon_vma *anon_vma; ++ const struct vm_operations_struct___2 *vm_ops; ++ long unsigned int vm_pgoff; ++ struct file___2 *vm_file; ++ void *vm_private_data; ++ atomic_long_t swap_readahead_info; ++ struct mempolicy *vm_policy; ++ struct vm_userfaultfd_ctx vm_userfaultfd_ctx; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct core_state___2; ++ ++struct mm_struct___2 { ++ struct { ++ struct vm_area_struct___2 *mmap; ++ struct rb_root mm_rb; ++ u64 vmacache_seqnum; ++ long unsigned int (*get_unmapped_area)(struct file___2 *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); ++ long unsigned int mmap_base; ++ long unsigned int mmap_legacy_base; ++ long unsigned int task_size; ++ long unsigned int highest_vm_end; ++ pgd_t *pgd; ++ atomic_t membarrier_state; ++ atomic_t mm_users; ++ atomic_t mm_count; ++ atomic_long_t pgtables_bytes; ++ int map_count; ++ spinlock_t page_table_lock; ++ struct rw_semaphore___3 mmap_sem; ++ struct list_head mmlist; ++ long unsigned int hiwater_rss; ++ long unsigned int hiwater_vm; ++ long unsigned int total_vm; ++ atomic_long_t locked_vm; ++ long unsigned int pinned_vm; ++ long unsigned int data_vm; ++ long unsigned int exec_vm; ++ long unsigned int stack_vm; ++ long unsigned int def_flags; ++ spinlock_t arg_lock; ++ long unsigned int start_code; ++ long unsigned int end_code; ++ long unsigned int start_data; ++ long unsigned int end_data; ++ long unsigned int start_brk; ++ long unsigned int brk; ++ long unsigned int start_stack; ++ long unsigned int arg_start; ++ long unsigned int arg_end; ++ long unsigned int env_start; ++ long unsigned int env_end; ++ long unsigned int saved_auxv[46]; ++ struct mm_rss_stat rss_stat; ++ struct linux_binfmt *binfmt; ++ mm_context_t context; ++ long unsigned int flags; ++ struct core_state___2 *core_state; ++ spinlock_t ioctx_lock; ++ struct kioctx_table *ioctx_table; ++ struct task_struct___2 *owner; ++ struct user_namespace___2 *user_ns; ++ struct file___2 *exe_file; ++ struct mmu_notifier_mm *mmu_notifier_mm; ++ pgtable_t___2 pmd_huge_pte; ++ long unsigned int numa_next_scan; ++ long unsigned int numa_scan_offset; ++ int numa_scan_seq; ++ atomic_t tlb_flush_pending; ++ struct uprobes_state uprobes_state; ++ atomic_long_t hugetlb_usage; ++ struct work_struct async_put_work; ++ }; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int cpu_bitmap[0]; ++}; ++ ++struct cred___2 { ++ atomic_t usage; ++ kuid_t uid; ++ kgid_t gid; ++ kuid_t suid; ++ kgid_t sgid; ++ kuid_t euid; ++ kgid_t egid; ++ kuid_t fsuid; ++ kgid_t fsgid; ++ unsigned int securebits; ++ kernel_cap_t cap_inheritable; ++ kernel_cap_t cap_permitted; ++ kernel_cap_t cap_effective; ++ kernel_cap_t cap_bset; ++ kernel_cap_t cap_ambient; ++ unsigned char jit_keyring; ++ struct key___2 *session_keyring; ++ struct key___2 *process_keyring; ++ struct key___2 *thread_keyring; ++ struct key___2 *request_key_auth; ++ void *security; ++ struct user_struct___2 *user; ++ struct user_namespace___2 *user_ns; ++ struct group_info *group_info; ++ union { ++ int non_rcu; ++ struct callback_head rcu; ++ }; ++}; ++ ++struct net___2; ++ ++struct cgroup_namespace___2; ++ ++struct nsproxy___2 { ++ atomic_t count; ++ struct uts_namespace *uts_ns; ++ struct ipc_namespace *ipc_ns; ++ struct mnt_namespace *mnt_ns; ++ struct pid_namespace___2 *pid_ns_for_children; ++ struct net___2 *net_ns; ++ struct cgroup_namespace___2 *cgroup_ns; ++}; ++ ++struct signal_struct___2 { ++ atomic_t sigcnt; ++ atomic_t live; ++ int nr_threads; ++ struct list_head thread_head; ++ wait_queue_head_t wait_chldexit; ++ struct task_struct___2 *curr_target; ++ struct sigpending shared_pending; ++ struct hlist_head multiprocess; ++ int group_exit_code; ++ int notify_count; ++ struct task_struct___2 *group_exit_task; ++ int group_stop_count; ++ unsigned int flags; ++ unsigned int is_child_subreaper: 1; ++ unsigned int has_child_subreaper: 1; ++ int posix_timer_id; ++ struct list_head posix_timers; ++ struct hrtimer real_timer; ++ ktime_t it_real_incr; ++ struct cpu_itimer it[2]; ++ struct thread_group_cputimer cputimer; ++ struct task_cputime cputime_expires; ++ struct list_head cpu_timers[3]; ++ struct pid___2 *pids[4]; ++ atomic_t tick_dep_mask; ++ struct pid___2 *tty_old_pgrp; ++ int leader; ++ struct tty_struct *tty; ++ struct autogroup *autogroup; ++ seqlock_t stats_lock; ++ u64 utime; ++ u64 stime; ++ u64 cutime; ++ u64 cstime; ++ u64 gtime; ++ u64 cgtime; ++ struct prev_cputime prev_cputime; ++ long unsigned int nvcsw; ++ long unsigned int nivcsw; ++ long unsigned int cnvcsw; ++ long unsigned int cnivcsw; ++ long unsigned int min_flt; ++ long unsigned int maj_flt; ++ long unsigned int cmin_flt; ++ long unsigned int cmaj_flt; ++ long unsigned int inblock; ++ long unsigned int oublock; ++ long unsigned int cinblock; ++ long unsigned int coublock; ++ long unsigned int maxrss; ++ long unsigned int cmaxrss; ++ struct task_io_accounting ioac; ++ long long unsigned int sum_sched_runtime; ++ struct rlimit rlim[16]; ++ struct pacct_struct pacct; ++ struct taskstats *stats; ++ unsigned int audit_tty; ++ struct tty_audit_buf *tty_audit_buf; ++ bool oom_flag_origin; ++ short int oom_score_adj; ++ short int oom_score_adj_min; ++ struct mm_struct___2 *oom_mm; ++ struct mutex cred_guard_mutex; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct cgroup_subsys_state___2; ++ ++struct cgroup___2; ++ ++struct css_set___2 { ++ struct cgroup_subsys_state___2 *subsys[14]; ++ refcount_t refcount; ++ struct css_set___2 *dom_cset; ++ struct cgroup___2 *dfl_cgrp; ++ int nr_tasks; ++ struct list_head tasks; ++ struct list_head mg_tasks; ++ struct list_head dying_tasks; ++ struct list_head task_iters; ++ struct list_head e_cset_node[14]; ++ struct list_head threaded_csets; ++ struct list_head threaded_csets_node; ++ struct hlist_node hlist; ++ struct list_head cgrp_links; ++ struct list_head mg_preload_node; ++ struct list_head mg_node; ++ struct cgroup___2 *mg_src_cgrp; ++ struct cgroup___2 *mg_dst_cgrp; ++ struct css_set___2 *mg_dst_cset; ++ bool dead; ++ struct callback_head callback_head; ++}; ++ ++struct perf_event_context___2 { ++ struct pmu___2 *pmu; ++ raw_spinlock_t lock; ++ struct mutex mutex; ++ struct list_head active_ctx_list; ++ struct perf_event_groups pinned_groups; ++ struct perf_event_groups flexible_groups; ++ struct list_head event_list; ++ struct list_head pinned_active; ++ struct list_head flexible_active; ++ int nr_events; ++ int nr_active; ++ int is_active; ++ int nr_stat; ++ int nr_freq; ++ int rotate_disable; ++ atomic_t refcount; ++ struct task_struct___2 *task; ++ u64 time; ++ u64 timestamp; ++ struct perf_event_context___2 *parent_ctx; ++ u64 parent_gen; ++ u64 generation; ++ int pin_count; ++ int nr_cgroups; ++ void *task_ctx_data; ++ struct callback_head callback_head; ++}; ++ ++struct pipe_buffer___2; ++ ++struct pipe_inode_info___2 { ++ struct mutex mutex; ++ wait_queue_head_t wait; ++ unsigned int nrbufs; ++ unsigned int curbuf; ++ unsigned int buffers; ++ unsigned int readers; ++ unsigned int writers; ++ unsigned int files; ++ unsigned int waiting_writers; ++ unsigned int r_counter; ++ unsigned int w_counter; ++ struct page___2 *tmp_page; ++ struct fasync_struct___2 *fasync_readers; ++ struct fasync_struct___2 *fasync_writers; ++ struct pipe_buffer___2 *bufs; ++ struct user_struct___2 *user; ++}; ++ ++struct vm_struct___2 { ++ struct vm_struct___2 *next; ++ void *addr; ++ long unsigned int size; ++ long unsigned int flags; ++ struct page___2 **pages; ++ unsigned int nr_pages; ++ phys_addr_t phys_addr; ++ const void *caller; ++}; ++ ++union thread_union___2 { ++ struct task_struct___2 task; ++ long unsigned int stack[8192]; ++}; ++ ++typedef int (*dev_page_fault_t___2)(struct vm_area_struct___2 *, long unsigned int, const struct page___2 *, unsigned int, pmd_t *); ++ ++typedef void (*dev_page_free_t___2)(struct page___2 *, void *); ++ ++struct device___2; ++ ++struct dev_pagemap___2 { ++ dev_page_fault_t___2 page_fault; ++ dev_page_free_t___2 page_free; ++ struct vmem_altmap altmap; ++ bool altmap_valid; ++ struct resource res; ++ struct percpu_ref *ref; ++ void (*kill)(struct percpu_ref *); ++ struct device___2 *dev; ++ void *data; ++ enum memory_type type; ++}; ++ ++struct fown_struct___2 { ++ rwlock_t lock; ++ struct pid___2 *pid; ++ enum pid_type pid_type; ++ kuid_t uid; ++ kuid_t euid; ++ int signum; ++}; ++ ++struct file___2 { ++ union { ++ struct llist_node fu_llist; ++ struct callback_head fu_rcuhead; ++ } f_u; ++ struct path___2 f_path; ++ struct inode___2 *f_inode; ++ const struct file_operations___2 *f_op; ++ spinlock_t f_lock; ++ enum rw_hint f_write_hint; ++ atomic_long_t f_count; ++ unsigned int f_flags; ++ fmode_t f_mode; ++ struct mutex f_pos_lock; ++ loff_t f_pos; ++ struct fown_struct___2 f_owner; ++ const struct cred___2 *f_cred; ++ struct file_ra_state f_ra; ++ u64 f_version; ++ void *f_security; ++ void *private_data; ++ struct list_head f_ep_links; ++ struct list_head f_tfile_llink; ++ struct address_space___2 *f_mapping; ++ errseq_t f_wb_err; ++}; ++ ++struct vm_fault___2; ++ ++struct vm_operations_struct___2 { ++ void (*open)(struct vm_area_struct___2 *); ++ void (*close)(struct vm_area_struct___2 *); ++ int (*split)(struct vm_area_struct___2 *, long unsigned int); ++ int (*mremap)(struct vm_area_struct___2 *); ++ vm_fault_t (*fault)(struct vm_fault___2 *); ++ vm_fault_t (*huge_fault)(struct vm_fault___2 *, enum page_entry_size); ++ void (*map_pages)(struct vm_fault___2 *, long unsigned int, long unsigned int); ++ long unsigned int (*pagesize)(struct vm_area_struct___2 *); ++ vm_fault_t (*page_mkwrite)(struct vm_fault___2 *); ++ vm_fault_t (*pfn_mkwrite)(struct vm_fault___2 *); ++ int (*access)(struct vm_area_struct___2 *, long unsigned int, void *, int, int); ++ const char * (*name)(struct vm_area_struct___2 *); ++ int (*set_policy)(struct vm_area_struct___2 *, struct mempolicy *); ++ struct mempolicy * (*get_policy)(struct vm_area_struct___2 *, long unsigned int); ++ struct page___2 * (*find_special_page)(struct vm_area_struct___2 *, long unsigned int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct core_thread___2 { ++ struct task_struct___2 *task; ++ struct core_thread___2 *next; ++}; ++ ++struct core_state___2 { ++ atomic_t nr_threads; ++ struct core_thread___2 dumper; ++ struct completion startup; ++}; ++ ++struct vm_fault___2 { ++ struct vm_area_struct___2 *vma; ++ unsigned int flags; ++ gfp_t gfp_mask; ++ long unsigned int pgoff; ++ long unsigned int address; ++ pmd_t *pmd; ++ pud_t *pud; ++ pte_t orig_pte; ++ struct page___2 *cow_page; ++ struct mem_cgroup *memcg; ++ struct page___2 *page; ++ pte_t *pte; ++ spinlock_t *ptl; ++ pgtable_t___2 prealloc_pte; ++}; ++ ++struct kiocb___2 { ++ struct file___2 *ki_filp; ++ loff_t ki_pos; ++ void (*ki_complete)(struct kiocb___2 *, long int, long int); ++ void *private; ++ int ki_flags; ++ u16 ki_hint; ++ u16 ki_ioprio; ++}; ++ ++struct iattr___2 { ++ unsigned int ia_valid; ++ umode_t ia_mode; ++ kuid_t ia_uid; ++ kgid_t ia_gid; ++ loff_t ia_size; ++ struct timespec64 ia_atime; ++ struct timespec64 ia_mtime; ++ struct timespec64 ia_ctime; ++ struct file___2 *ia_file; ++}; ++ ++struct pglist_data___2; ++ ++struct lruvec___2 { ++ struct list_head lists[5]; ++ struct zone_reclaim_stat reclaim_stat; ++ atomic_long_t inactive_age; ++ long unsigned int refaults; ++ struct pglist_data___2 *pgdat; ++}; ++ ++struct zone___2 { ++ long unsigned int watermark[3]; ++ long unsigned int nr_reserved_highatomic; ++ long int lowmem_reserve[3]; ++ int node; ++ struct pglist_data___2 *zone_pgdat; ++ struct per_cpu_pageset *pageset; ++ long unsigned int zone_start_pfn; ++ long unsigned int managed_pages; ++ long unsigned int spanned_pages; ++ long unsigned int present_pages; ++ const char *name; ++ long unsigned int nr_isolate_pageblock; ++ seqlock_t span_seqlock; ++ int initialized; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad1_; ++ struct free_area free_area[14]; ++ long unsigned int flags; ++ spinlock_t lock; ++ int: 32; ++ struct zone_padding _pad2_; ++ long unsigned int percpu_drift_mark; ++ long unsigned int compact_cached_free_pfn; ++ long unsigned int compact_cached_migrate_pfn[2]; ++ unsigned int compact_considered; ++ unsigned int compact_defer_shift; ++ int compact_order_failed; ++ bool compact_blockskip_flush; ++ bool contiguous; ++ long: 16; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad3_; ++ atomic_long_t vm_stat[13]; ++ atomic_long_t vm_numa_stat[6]; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct zoneref___2 { ++ struct zone___2 *zone; ++ int zone_idx; ++}; ++ ++struct zonelist___2 { ++ struct zoneref___2 _zonerefs[49]; ++}; ++ ++struct pglist_data___2 { ++ struct zone___2 node_zones[3]; ++ struct zonelist___2 node_zonelists[2]; ++ int nr_zones; ++ spinlock_t node_size_lock; ++ long unsigned int node_start_pfn; ++ long unsigned int node_present_pages; ++ long unsigned int node_spanned_pages; ++ int node_id; ++ wait_queue_head_t kswapd_wait; ++ wait_queue_head_t pfmemalloc_wait; ++ struct task_struct___2 *kswapd; ++ int kswapd_order; ++ enum zone_type kswapd_classzone_idx; ++ int kswapd_failures; ++ int kcompactd_max_order; ++ enum zone_type kcompactd_classzone_idx; ++ wait_queue_head_t kcompactd_wait; ++ struct task_struct___2 *kcompactd; ++ long unsigned int totalreserve_pages; ++ long unsigned int min_unmapped_pages; ++ long unsigned int min_slab_pages; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad1_; ++ spinlock_t lru_lock; ++ spinlock_t split_queue_lock; ++ struct list_head split_queue; ++ long unsigned int split_queue_len; ++ struct lruvec___2 lruvec; ++ long unsigned int flags; ++ long: 64; ++ long: 64; ++ struct zone_padding _pad2_; ++ struct per_cpu_nodestat *per_cpu_nodestats; ++ atomic_long_t vm_stat[28]; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long: 64; ++}; ++ ++typedef struct pglist_data___2 pg_data_t___2; ++ ++struct blocking_notifier_head___2 { ++ struct rw_semaphore___3 rwsem; ++ struct notifier_block *head; ++}; ++ ++struct dquot___2 { ++ struct hlist_node dq_hash; ++ struct list_head dq_inuse; ++ struct list_head dq_free; ++ struct list_head dq_dirty; ++ struct mutex dq_lock; ++ spinlock_t dq_dqb_lock; ++ atomic_t dq_count; ++ struct super_block___2 *dq_sb; ++ struct kqid dq_id; ++ loff_t dq_off; ++ long unsigned int dq_flags; ++ struct mem_dqblk dq_dqb; ++}; ++ ++struct quota_format_type___2 { ++ int qf_fmt_id; ++ const struct quota_format_ops___2 *qf_ops; ++ struct module___2 *qf_owner; ++ struct quota_format_type___2 *qf_next; ++}; ++ ++struct quota_format_ops___2 { ++ int (*check_quota_file)(struct super_block___2 *, int); ++ int (*read_file_info)(struct super_block___2 *, int); ++ int (*write_file_info)(struct super_block___2 *, int); ++ int (*free_file_info)(struct super_block___2 *, int); ++ int (*read_dqblk)(struct dquot___2 *); ++ int (*commit_dqblk)(struct dquot___2 *); ++ int (*release_dqblk)(struct dquot___2 *); ++ int (*get_next_id)(struct super_block___2 *, struct kqid *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct dquot_operations___2 { ++ int (*write_dquot)(struct dquot___2 *); ++ struct dquot___2 * (*alloc_dquot)(struct super_block___2 *, int); ++ void (*destroy_dquot)(struct dquot___2 *); ++ int (*acquire_dquot)(struct dquot___2 *); ++ int (*release_dquot)(struct dquot___2 *); ++ int (*mark_dirty)(struct dquot___2 *); ++ int (*write_info)(struct super_block___2 *, int); ++ qsize_t * (*get_reserved_space)(struct inode___2 *); ++ int (*get_projid)(struct inode___2 *, kprojid_t *); ++ int (*get_inode_usage)(struct inode___2 *, qsize_t *); ++ int (*get_next_id)(struct super_block___2 *, struct kqid *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct quotactl_ops___2 { ++ int (*quota_on)(struct super_block___2 *, int, int, const struct path___2 *); ++ int (*quota_off)(struct super_block___2 *, int); ++ int (*quota_enable)(struct super_block___2 *, unsigned int); ++ int (*quota_disable)(struct super_block___2 *, unsigned int); ++ int (*quota_sync)(struct super_block___2 *, int); ++ int (*set_info)(struct super_block___2 *, int, struct qc_info *); ++ int (*get_dqblk)(struct super_block___2 *, struct kqid, struct qc_dqblk *); ++ int (*get_nextdqblk)(struct super_block___2 *, struct kqid *, struct qc_dqblk *); ++ int (*set_dqblk)(struct super_block___2 *, struct kqid, struct qc_dqblk *); ++ int (*get_state)(struct super_block___2 *, struct qc_state *); ++ int (*rm_xquota)(struct super_block___2 *, unsigned int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kset___2; ++ ++struct kobj_type___2; ++ ++struct kernfs_node___2; ++ ++struct kobject___3 { ++ const char *name; ++ struct list_head entry; ++ struct kobject___3 *parent; ++ struct kset___2 *kset; ++ struct kobj_type___2 *ktype; ++ struct kernfs_node___2 *sd; ++ struct kref kref; ++ unsigned int state_initialized: 1; ++ unsigned int state_in_sysfs: 1; ++ unsigned int state_add_uevent_sent: 1; ++ unsigned int state_remove_uevent_sent: 1; ++ unsigned int uevent_suppress: 1; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct module_kobject___2 { ++ struct kobject___3 kobj; ++ struct module___2 *mod; ++ struct kobject___3 *drivers_dir; ++ struct module_param_attrs *mp; ++ struct completion *kobj_completion; ++}; ++ ++struct mod_tree_node___2 { ++ struct module___2 *mod; ++ struct latch_tree_node node; ++}; ++ ++struct module_layout___2 { ++ void *base; ++ unsigned int size; ++ unsigned int text_size; ++ unsigned int ro_size; ++ unsigned int ro_after_init_size; ++ struct mod_tree_node___2 mtn; ++}; ++ ++struct module_attribute___2; ++ ++struct kernel_param___2; ++ ++struct module___2 { ++ enum module_state state; ++ struct list_head list; ++ char name[56]; ++ struct module_kobject___2 mkobj; ++ struct module_attribute___2 *modinfo_attrs; ++ const char *version; ++ const char *srcversion; ++ struct kobject___3 *holders_dir; ++ const struct kernel_symbol *syms; ++ const s32 *crcs; ++ unsigned int num_syms; ++ struct mutex param_lock; ++ struct kernel_param___2 *kp; ++ unsigned int num_kp; ++ unsigned int num_gpl_syms; ++ const struct kernel_symbol *gpl_syms; ++ const s32 *gpl_crcs; ++ bool sig_ok; ++ bool async_probe_requested; ++ const struct kernel_symbol *gpl_future_syms; ++ const s32 *gpl_future_crcs; ++ unsigned int num_gpl_future_syms; ++ unsigned int num_exentries; ++ struct exception_table_entry *extable; ++ int (*init)(); ++ long: 64; ++ struct module_layout___2 core_layout; ++ struct module_layout___2 init_layout; ++ struct mod_arch_specific arch; ++ long unsigned int taints; ++ unsigned int num_bugs; ++ struct list_head bug_list; ++ struct bug_entry *bug_table; ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; ++ struct module_sect_attrs *sect_attrs; ++ struct module_notes_attrs *notes_attrs; ++ char *args; ++ void *percpu; ++ unsigned int percpu_size; ++ unsigned int num_tracepoints; ++ tracepoint_ptr_t *tracepoints_ptrs; ++ unsigned int num_bpf_raw_events; ++ struct bpf_raw_event_map *bpf_raw_events; ++ struct jump_entry *jump_entries; ++ unsigned int num_jump_entries; ++ unsigned int num_trace_bprintk_fmt; ++ const char **trace_bprintk_fmt_start; ++ struct trace_event_call___2 **trace_events; ++ unsigned int num_trace_events; ++ struct trace_eval_map **trace_evals; ++ unsigned int num_trace_evals; ++ unsigned int num_ftrace_callsites; ++ long unsigned int *ftrace_callsites; ++ bool klp; ++ bool klp_alive; ++ struct klp_modinfo *klp_info; ++ struct list_head source_list; ++ struct list_head target_list; ++ void (*exit)(); ++ atomic_t refcnt; ++ union { ++ enum MODULE_KLP_REL_STATE klp_rel_state; ++ long int klp_rel_state_KABI; ++ }; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct address_space_operations___2 { ++ int (*writepage)(struct page___2 *, struct writeback_control *); ++ int (*readpage)(struct file___2 *, struct page___2 *); ++ int (*writepages)(struct address_space___2 *, struct writeback_control *); ++ int (*set_page_dirty)(struct page___2 *); ++ int (*readpages)(struct file___2 *, struct address_space___2 *, struct list_head *, unsigned int); ++ int (*write_begin)(struct file___2 *, struct address_space___2 *, loff_t, unsigned int, unsigned int, struct page___2 **, void **); ++ int (*write_end)(struct file___2 *, struct address_space___2 *, loff_t, unsigned int, unsigned int, struct page___2 *, void *); ++ sector_t (*bmap)(struct address_space___2 *, sector_t); ++ void (*invalidatepage)(struct page___2 *, unsigned int, unsigned int); ++ int (*releasepage)(struct page___2 *, gfp_t); ++ void (*freepage)(struct page___2 *); ++ ssize_t (*direct_IO)(struct kiocb___2 *, struct iov_iter___2 *); ++ int (*migratepage)(struct address_space___2 *, struct page___2 *, struct page___2 *, enum migrate_mode); ++ bool (*isolate_page)(struct page___2 *, isolate_mode_t); ++ void (*putback_page)(struct page___2 *); ++ int (*launder_page)(struct page___2 *); ++ int (*is_partially_uptodate)(struct page___2 *, long unsigned int, long unsigned int); ++ void (*is_dirty_writeback)(struct page___2 *, bool *, bool *); ++ int (*error_remove_page)(struct address_space___2 *, struct page___2 *); ++ int (*swap_activate)(struct swap_info_struct *, struct file___2 *, sector_t *); ++ void (*swap_deactivate)(struct file___2 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct iov_iter___2 { ++ int type; ++ size_t iov_offset; ++ size_t count; ++ union { ++ const struct iovec *iov; ++ const struct kvec *kvec; ++ const struct bio_vec *bvec; ++ struct pipe_inode_info___2 *pipe; ++ }; ++ union { ++ long unsigned int nr_segs; ++ struct { ++ int idx; ++ int start_idx; ++ }; ++ }; ++}; ++ ++struct block_device___2 { ++ dev_t bd_dev; ++ int bd_openers; ++ int bd_write_openers; ++ struct inode___2 *bd_inode; ++ struct super_block___2 *bd_super; ++ struct mutex bd_mutex; ++ void *bd_claiming; ++ void *bd_holder; ++ int bd_holders; ++ bool bd_write_holder; ++ struct list_head bd_holder_disks; ++ struct block_device___2 *bd_contains; ++ unsigned int bd_block_size; ++ u8 bd_partno; ++ struct hd_struct *bd_part; ++ unsigned int bd_part_count; ++ int bd_invalidated; ++ struct gendisk *bd_disk; ++ struct request_queue *bd_queue; ++ struct backing_dev_info *bd_bdi; ++ struct list_head bd_list; ++ long unsigned int bd_private; ++ int bd_fsfreeze_count; ++ struct mutex bd_fsfreeze_mutex; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct inode_operations___2 { ++ struct dentry___2 * (*lookup)(struct inode___2 *, struct dentry___2 *, unsigned int); ++ const char * (*get_link)(struct dentry___2 *, struct inode___2 *, struct delayed_call *); ++ int (*permission)(struct inode___2 *, int); ++ struct posix_acl * (*get_acl)(struct inode___2 *, int); ++ int (*readlink)(struct dentry___2 *, char *, int); ++ int (*create)(struct inode___2 *, struct dentry___2 *, umode_t, bool); ++ int (*link)(struct dentry___2 *, struct inode___2 *, struct dentry___2 *); ++ int (*unlink)(struct inode___2 *, struct dentry___2 *); ++ int (*symlink)(struct inode___2 *, struct dentry___2 *, const char *); ++ int (*mkdir)(struct inode___2 *, struct dentry___2 *, umode_t); ++ int (*rmdir)(struct inode___2 *, struct dentry___2 *); ++ int (*mknod)(struct inode___2 *, struct dentry___2 *, umode_t, dev_t); ++ int (*rename)(struct inode___2 *, struct dentry___2 *, struct inode___2 *, struct dentry___2 *, unsigned int); ++ int (*setattr)(struct dentry___2 *, struct iattr___2 *); ++ int (*getattr)(const struct path___2 *, struct kstat *, u32, unsigned int); ++ ssize_t (*listxattr)(struct dentry___2 *, char *, size_t); ++ int (*fiemap)(struct inode___2 *, struct fiemap_extent_info *, u64, u64); ++ int (*update_time)(struct inode___2 *, struct timespec64 *, int); ++ int (*atomic_open)(struct inode___2 *, struct dentry___2 *, struct file___2 *, unsigned int, umode_t); ++ int (*tmpfile)(struct inode___2 *, struct dentry___2 *, umode_t); ++ int (*set_acl)(struct inode___2 *, struct posix_acl *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct file_lock_operations___2 { ++ void (*fl_copy_lock)(struct file_lock___2 *, struct file_lock___2 *); ++ void (*fl_release_private)(struct file_lock___2 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct lock_manager_operations___2; ++ ++struct file_lock___2 { ++ struct file_lock___2 *fl_next; ++ struct list_head fl_list; ++ struct hlist_node fl_link; ++ struct list_head fl_block; ++ fl_owner_t fl_owner; ++ unsigned int fl_flags; ++ unsigned char fl_type; ++ unsigned int fl_pid; ++ int fl_link_cpu; ++ wait_queue_head_t fl_wait; ++ struct file___2 *fl_file; ++ loff_t fl_start; ++ loff_t fl_end; ++ struct fasync_struct___2 *fl_fasync; ++ long unsigned int fl_break_time; ++ long unsigned int fl_downgrade_time; ++ const struct file_lock_operations___2 *fl_ops; ++ const struct lock_manager_operations___2 *fl_lmops; ++ union { ++ struct nfs_lock_info nfs_fl; ++ struct nfs4_lock_info nfs4_fl; ++ struct { ++ struct list_head link; ++ int state; ++ } afs; ++ } fl_u; ++}; ++ ++struct lock_manager_operations___2 { ++ int (*lm_compare_owner)(struct file_lock___2 *, struct file_lock___2 *); ++ long unsigned int (*lm_owner_key)(struct file_lock___2 *); ++ fl_owner_t (*lm_get_owner)(fl_owner_t); ++ void (*lm_put_owner)(fl_owner_t); ++ void (*lm_notify)(struct file_lock___2 *); ++ int (*lm_grant)(struct file_lock___2 *, int); ++ bool (*lm_break)(struct file_lock___2 *); ++ int (*lm_change)(struct file_lock___2 *, int, struct list_head *); ++ void (*lm_setup)(struct file_lock___2 *, void **); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct fasync_struct___2 { ++ rwlock_t fa_lock; ++ int magic; ++ int fa_fd; ++ struct fasync_struct___2 *fa_next; ++ struct file___2 *fa_file; ++ struct callback_head fa_rcu; ++}; ++ ++struct file_system_type___2 { ++ const char *name; ++ int fs_flags; ++ struct dentry___2 * (*mount)(struct file_system_type___2 *, int, const char *, void *); ++ void (*kill_sb)(struct super_block___2 *); ++ struct module___2 *owner; ++ struct file_system_type___2 *next; ++ struct hlist_head fs_supers; ++ struct lock_class_key s_lock_key; ++ struct lock_class_key s_umount_key; ++ struct lock_class_key s_vfs_rename_key; ++ struct lock_class_key s_writers_key[3]; ++ struct lock_class_key i_lock_key; ++ struct lock_class_key i_mutex_key; ++ struct lock_class_key i_mutex_dir_key; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct super_operations___2 { ++ struct inode___2 * (*alloc_inode)(struct super_block___2 *); ++ void (*destroy_inode)(struct inode___2 *); ++ void (*dirty_inode)(struct inode___2 *, int); ++ int (*write_inode)(struct inode___2 *, struct writeback_control *); ++ int (*drop_inode)(struct inode___2 *); ++ void (*evict_inode)(struct inode___2 *); ++ void (*put_super)(struct super_block___2 *); ++ int (*sync_fs)(struct super_block___2 *, int); ++ int (*freeze_super)(struct super_block___2 *); ++ int (*freeze_fs)(struct super_block___2 *); ++ int (*thaw_super)(struct super_block___2 *); ++ int (*unfreeze_fs)(struct super_block___2 *); ++ int (*statfs)(struct dentry___2 *, struct kstatfs *); ++ int (*remount_fs)(struct super_block___2 *, int *, char *); ++ void (*umount_begin)(struct super_block___2 *); ++ int (*show_options)(struct seq_file___2 *, struct dentry___2 *); ++ int (*show_devname)(struct seq_file___2 *, struct dentry___2 *); ++ int (*show_path)(struct seq_file___2 *, struct dentry___2 *); ++ int (*show_stats)(struct seq_file___2 *, struct dentry___2 *); ++ ssize_t (*quota_read)(struct super_block___2 *, int, char *, size_t, loff_t); ++ ssize_t (*quota_write)(struct super_block___2 *, int, const char *, size_t, loff_t); ++ struct dquot___2 ** (*get_dquots)(struct inode___2 *); ++ int (*bdev_try_to_free_page)(struct super_block___2 *, struct page___2 *, gfp_t); ++ long int (*nr_cached_objects)(struct super_block___2 *, struct shrink_control *); ++ long int (*free_cached_objects)(struct super_block___2 *, struct shrink_control *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++typedef void (*poll_queue_proc___2)(struct file___2 *, wait_queue_head_t *, struct poll_table_struct___2 *); ++ ++struct poll_table_struct___2 { ++ poll_queue_proc___2 _qproc; ++ __poll_t _key; ++}; ++ ++struct seq_file___2 { ++ char *buf; ++ size_t size; ++ size_t from; ++ size_t count; ++ size_t pad_until; ++ loff_t index; ++ loff_t read_pos; ++ u64 version; ++ struct mutex lock; ++ const struct seq_operations___2 *op; ++ int poll_event; ++ const struct file___2 *file; ++ void *private; ++}; ++ ++struct dev_pm_info___2 { ++ pm_message_t power_state; ++ unsigned int can_wakeup: 1; ++ unsigned int async_suspend: 1; ++ bool in_dpm_list: 1; ++ bool is_prepared: 1; ++ bool is_suspended: 1; ++ bool is_noirq_suspended: 1; ++ bool is_late_suspended: 1; ++ bool early_init: 1; ++ bool direct_complete: 1; ++ u32 driver_flags; ++ spinlock_t lock; ++ struct list_head entry; ++ struct completion completion; ++ struct wakeup_source *wakeup; ++ bool wakeup_path: 1; ++ bool syscore: 1; ++ bool no_pm_callbacks: 1; ++ unsigned int must_resume: 1; ++ unsigned int may_skip_resume: 1; ++ struct timer_list suspend_timer; ++ long unsigned int timer_expires; ++ struct work_struct work; ++ wait_queue_head_t wait_queue; ++ struct wake_irq *wakeirq; ++ atomic_t usage_count; ++ atomic_t child_count; ++ unsigned int disable_depth: 3; ++ unsigned int idle_notification: 1; ++ unsigned int request_pending: 1; ++ unsigned int deferred_resume: 1; ++ unsigned int runtime_auto: 1; ++ bool ignore_children: 1; ++ unsigned int no_callbacks: 1; ++ unsigned int irq_safe: 1; ++ unsigned int use_autosuspend: 1; ++ unsigned int timer_autosuspends: 1; ++ unsigned int memalloc_noio: 1; ++ unsigned int links_count; ++ enum rpm_request request; ++ enum rpm_status runtime_status; ++ int runtime_error; ++ int autosuspend_delay; ++ long unsigned int last_busy; ++ long unsigned int active_jiffies; ++ long unsigned int suspended_jiffies; ++ long unsigned int accounting_timestamp; ++ struct pm_subsys_data *subsys_data; ++ void (*set_latency_tolerance)(struct device___2 *, s32); ++ struct dev_pm_qos *qos; ++}; ++ ++struct device_type___2; ++ ++struct bus_type___2; ++ ++struct device_driver___2; ++ ++struct dev_pm_domain___2; ++ ++struct dma_map_ops___2; ++ ++struct device_node___2; ++ ++struct fwnode_handle___2; ++ ++struct class___2; ++ ++struct attribute_group___2; ++ ++struct device___2 { ++ struct device___2 *parent; ++ struct device_private *p; ++ struct kobject___3 kobj; ++ const char *init_name; ++ const struct device_type___2 *type; ++ struct mutex mutex; ++ struct bus_type___2 *bus; ++ struct device_driver___2 *driver; ++ void *platform_data; ++ void *driver_data; ++ struct dev_links_info links; ++ struct dev_pm_info___2 power; ++ struct dev_pm_domain___2 *pm_domain; ++ struct irq_domain *msi_domain; ++ struct dev_pin_info *pins; ++ struct list_head msi_list; ++ int numa_node; ++ const struct dma_map_ops___2 *dma_ops; ++ u64 *dma_mask; ++ u64 coherent_dma_mask; ++ u64 bus_dma_mask; ++ long unsigned int dma_pfn_offset; ++ struct device_dma_parameters *dma_parms; ++ struct list_head dma_pools; ++ struct dma_coherent_mem *dma_mem; ++ struct cma *cma_area; ++ struct dev_archdata archdata; ++ struct device_node___2 *of_node; ++ struct fwnode_handle___2 *fwnode; ++ dev_t devt; ++ u32 id; ++ spinlock_t devres_lock; ++ struct list_head devres_head; ++ struct klist_node knode_class; ++ struct class___2 *class; ++ const struct attribute_group___2 **groups; ++ void (*release)(struct device___2 *); ++ struct iommu_group *iommu_group; ++ struct iommu_fwspec *iommu_fwspec; ++ struct iommu_param *iommu_param; ++ bool offline_disabled: 1; ++ bool offline: 1; ++ bool of_node_reused: 1; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++}; ++ ++struct kobj_attribute___3 { ++ struct attribute attr; ++ ssize_t (*show)(struct kobject___3 *, struct kobj_attribute___3 *, char *); ++ ssize_t (*store)(struct kobject___3 *, struct kobj_attribute___3 *, const char *, size_t); ++}; ++ ++typedef void compound_page_dtor___2(struct page___2 *); ++ ++struct kernfs_root___2; ++ ++struct kernfs_elem_dir___2 { ++ long unsigned int subdirs; ++ struct rb_root children; ++ struct kernfs_root___2 *root; ++}; ++ ++struct kernfs_syscall_ops___2; ++ ++struct kernfs_root___2 { ++ struct kernfs_node___2 *kn; ++ unsigned int flags; ++ struct idr ino_idr; ++ u32 last_ino; ++ u32 next_generation; ++ struct kernfs_syscall_ops___2 *syscall_ops; ++ struct list_head supers; ++ wait_queue_head_t deactivate_waitq; ++}; ++ ++struct kernfs_elem_symlink___2 { ++ struct kernfs_node___2 *target_kn; ++}; ++ ++struct kernfs_ops___2; ++ ++struct kernfs_elem_attr___2 { ++ const struct kernfs_ops___2 *ops; ++ struct kernfs_open_node *open; ++ loff_t size; ++ struct kernfs_node___2 *notify_next; ++}; ++ ++struct kernfs_node___2 { ++ atomic_t count; ++ atomic_t active; ++ struct kernfs_node___2 *parent; ++ const char *name; ++ struct rb_node rb; ++ const void *ns; ++ unsigned int hash; ++ union { ++ struct kernfs_elem_dir___2 dir; ++ struct kernfs_elem_symlink___2 symlink; ++ struct kernfs_elem_attr___2 attr; ++ }; ++ void *priv; ++ union kernfs_node_id id; ++ short unsigned int flags; ++ umode_t mode; ++ struct kernfs_iattrs *iattr; ++}; ++ ++struct kernfs_open_file___2; ++ ++struct kernfs_ops___2 { ++ int (*open)(struct kernfs_open_file___2 *); ++ void (*release)(struct kernfs_open_file___2 *); ++ int (*seq_show)(struct seq_file___2 *, void *); ++ void * (*seq_start)(struct seq_file___2 *, loff_t *); ++ void * (*seq_next)(struct seq_file___2 *, void *, loff_t *); ++ void (*seq_stop)(struct seq_file___2 *, void *); ++ ssize_t (*read)(struct kernfs_open_file___2 *, char *, size_t, loff_t); ++ size_t atomic_write_len; ++ bool prealloc; ++ ssize_t (*write)(struct kernfs_open_file___2 *, char *, size_t, loff_t); ++ int (*mmap)(struct kernfs_open_file___2 *, struct vm_area_struct___2 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct kernfs_syscall_ops___2 { ++ int (*remount_fs)(struct kernfs_root___2 *, int *, char *); ++ int (*show_options)(struct seq_file___2 *, struct kernfs_root___2 *); ++ int (*mkdir)(struct kernfs_node___2 *, const char *, umode_t); ++ int (*rmdir)(struct kernfs_node___2 *); ++ int (*rename)(struct kernfs_node___2 *, struct kernfs_node___2 *, const char *); ++ int (*show_path)(struct seq_file___2 *, struct kernfs_node___2 *, struct kernfs_root___2 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kernfs_open_file___2 { ++ struct kernfs_node___2 *kn; ++ struct file___2 *file; ++ struct seq_file___2 *seq_file; ++ void *priv; ++ struct mutex mutex; ++ struct mutex prealloc_mutex; ++ int event; ++ struct list_head list; ++ char *prealloc_buf; ++ size_t atomic_write_len; ++ bool mmapped: 1; ++ bool released: 1; ++ const struct vm_operations_struct___2 *vm_ops; ++}; ++ ++struct bin_attribute___2; ++ ++struct attribute_group___2 { ++ const char *name; ++ umode_t (*is_visible)(struct kobject___3 *, struct attribute *, int); ++ umode_t (*is_bin_visible)(struct kobject___3 *, struct bin_attribute___2 *, int); ++ struct attribute **attrs; ++ struct bin_attribute___2 **bin_attrs; ++}; ++ ++struct bin_attribute___2 { ++ struct attribute attr; ++ size_t size; ++ void *private; ++ ssize_t (*read)(struct file___2 *, struct kobject___3 *, struct bin_attribute___2 *, char *, loff_t, size_t); ++ ssize_t (*write)(struct file___2 *, struct kobject___3 *, struct bin_attribute___2 *, char *, loff_t, size_t); ++ int (*mmap)(struct file___2 *, struct kobject___3 *, struct bin_attribute___2 *, struct vm_area_struct___2 *); ++}; ++ ++struct sysfs_ops___2 { ++ ssize_t (*show)(struct kobject___3 *, struct attribute *, char *); ++ ssize_t (*store)(struct kobject___3 *, struct attribute *, const char *, size_t); ++}; ++ ++struct kset_uevent_ops___2; ++ ++struct kset___2 { ++ struct list_head list; ++ spinlock_t list_lock; ++ struct kobject___3 kobj; ++ const struct kset_uevent_ops___2 *uevent_ops; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kobj_type___2 { ++ void (*release)(struct kobject___3 *); ++ const struct sysfs_ops___2 *sysfs_ops; ++ struct attribute **default_attrs; ++ const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject___3 *); ++ const void * (*namespace)(struct kobject___3 *); ++ void (*get_ownership)(struct kobject___3 *, kuid_t *, kgid_t *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct kset_uevent_ops___2 { ++ int (* const filter)(struct kset___2 *, struct kobject___3 *); ++ const char * (* const name)(struct kset___2 *, struct kobject___3 *); ++ int (* const uevent)(struct kset___2 *, struct kobject___3 *, struct kobj_uevent_env *); ++}; ++ ++struct dev_pm_ops___2 { ++ int (*prepare)(struct device___2 *); ++ void (*complete)(struct device___2 *); ++ int (*suspend)(struct device___2 *); ++ int (*resume)(struct device___2 *); ++ int (*freeze)(struct device___2 *); ++ int (*thaw)(struct device___2 *); ++ int (*poweroff)(struct device___2 *); ++ int (*restore)(struct device___2 *); ++ int (*suspend_late)(struct device___2 *); ++ int (*resume_early)(struct device___2 *); ++ int (*freeze_late)(struct device___2 *); ++ int (*thaw_early)(struct device___2 *); ++ int (*poweroff_late)(struct device___2 *); ++ int (*restore_early)(struct device___2 *); ++ int (*suspend_noirq)(struct device___2 *); ++ int (*resume_noirq)(struct device___2 *); ++ int (*freeze_noirq)(struct device___2 *); ++ int (*thaw_noirq)(struct device___2 *); ++ int (*poweroff_noirq)(struct device___2 *); ++ int (*restore_noirq)(struct device___2 *); ++ int (*runtime_suspend)(struct device___2 *); ++ int (*runtime_resume)(struct device___2 *); ++ int (*runtime_idle)(struct device___2 *); ++}; ++ ++struct dev_pm_domain___2 { ++ struct dev_pm_ops___2 ops; ++ void (*detach)(struct device___2 *, bool); ++ int (*activate)(struct device___2 *); ++ void (*sync)(struct device___2 *); ++ void (*dismiss)(struct device___2 *); ++}; ++ ++struct bus_type___2 { ++ const char *name; ++ const char *dev_name; ++ struct device___2 *dev_root; ++ const struct attribute_group___2 **bus_groups; ++ const struct attribute_group___2 **dev_groups; ++ const struct attribute_group___2 **drv_groups; ++ int (*match)(struct device___2 *, struct device_driver___2 *); ++ int (*uevent)(struct device___2 *, struct kobj_uevent_env *); ++ int (*probe)(struct device___2 *); ++ int (*remove)(struct device___2 *); ++ void (*shutdown)(struct device___2 *); ++ int (*online)(struct device___2 *); ++ int (*offline)(struct device___2 *); ++ int (*suspend)(struct device___2 *, pm_message_t); ++ int (*resume)(struct device___2 *); ++ int (*num_vf)(struct device___2 *); ++ int (*dma_configure)(struct device___2 *); ++ const struct dev_pm_ops___2 *pm; ++ const struct iommu_ops *iommu_ops; ++ struct subsys_private *p; ++ struct lock_class_key lock_key; ++ bool need_parent_lock; ++}; ++ ++struct device_driver___2 { ++ const char *name; ++ struct bus_type___2 *bus; ++ struct module___2 *owner; ++ const char *mod_name; ++ bool suppress_bind_attrs; ++ enum probe_type probe_type; ++ const struct of_device_id *of_match_table; ++ const struct acpi_device_id *acpi_match_table; ++ int (*probe)(struct device___2 *); ++ int (*remove)(struct device___2 *); ++ void (*shutdown)(struct device___2 *); ++ int (*suspend)(struct device___2 *, pm_message_t); ++ int (*resume)(struct device___2 *); ++ const struct attribute_group___2 **groups; ++ const struct dev_pm_ops___2 *pm; ++ void (*coredump)(struct device___2 *); ++ struct driver_private *p; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct device_type___2 { ++ const char *name; ++ const struct attribute_group___2 **groups; ++ int (*uevent)(struct device___2 *, struct kobj_uevent_env *); ++ char * (*devnode)(struct device___2 *, umode_t *, kuid_t *, kgid_t *); ++ void (*release)(struct device___2 *); ++ const struct dev_pm_ops___2 *pm; ++}; ++ ++struct class___2 { ++ const char *name; ++ struct module___2 *owner; ++ const struct attribute_group___2 **class_groups; ++ const struct attribute_group___2 **dev_groups; ++ struct kobject___3 *dev_kobj; ++ int (*dev_uevent)(struct device___2 *, struct kobj_uevent_env *); ++ char * (*devnode)(struct device___2 *, umode_t *); ++ void (*class_release)(struct class___2 *); ++ void (*dev_release)(struct device___2 *); ++ int (*shutdown_pre)(struct device___2 *); ++ const struct kobj_ns_type_operations *ns_type; ++ const void * (*namespace)(struct device___2 *); ++ void (*get_ownership)(struct device___2 *, kuid_t *, kgid_t *); ++ const struct dev_pm_ops___2 *pm; ++ struct subsys_private *p; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct device_attribute___2 { ++ struct attribute attr; ++ ssize_t (*show)(struct device___2 *, struct device_attribute___2 *, char *); ++ ssize_t (*store)(struct device___2 *, struct device_attribute___2 *, const char *, size_t); ++}; ++ ++struct dma_map_ops___2 { ++ void * (*alloc)(struct device___2 *, size_t, dma_addr_t *, gfp_t, long unsigned int); ++ void (*free)(struct device___2 *, size_t, void *, dma_addr_t, long unsigned int); ++ int (*mmap)(struct device___2 *, struct vm_area_struct___2 *, void *, dma_addr_t, size_t, long unsigned int); ++ int (*get_sgtable)(struct device___2 *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); ++ dma_addr_t (*map_page)(struct device___2 *, struct page___2 *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); ++ void (*unmap_page)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); ++ int (*map_sg)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); ++ void (*unmap_sg)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); ++ dma_addr_t (*map_resource)(struct device___2 *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); ++ void (*unmap_resource)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); ++ void (*sync_single_for_cpu)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction); ++ void (*sync_single_for_device)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction); ++ void (*sync_sg_for_cpu)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction); ++ void (*sync_sg_for_device)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction); ++ void (*cache_sync)(struct device___2 *, void *, size_t, enum dma_data_direction); ++ int (*mapping_error)(struct device___2 *, dma_addr_t); ++ int (*dma_supported)(struct device___2 *, u64); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++}; ++ ++struct fwnode_operations___2; ++ ++struct fwnode_handle___2 { ++ struct fwnode_handle___2 *secondary; ++ const struct fwnode_operations___2 *ops; ++}; ++ ++struct property___2; ++ ++struct device_node___2 { ++ const char *name; ++ const char *type; ++ phandle phandle; ++ const char *full_name; ++ struct fwnode_handle___2 fwnode; ++ struct property___2 *properties; ++ struct property___2 *deadprops; ++ struct device_node___2 *parent; ++ struct device_node___2 *child; ++ struct device_node___2 *sibling; ++ struct kobject___3 kobj; ++ long unsigned int _flags; ++ void *data; ++}; ++ ++struct node___3 { ++ struct device___2 dev; ++ struct work_struct node_work; ++}; ++ ++struct fd___2 { ++ struct file___2 *file; ++ unsigned int flags; ++}; ++ ++typedef struct poll_table_struct___2 poll_table___2; ++ ++struct fwnode_reference_args___2; ++ ++struct fwnode_endpoint___2; ++ ++struct fwnode_operations___2 { ++ struct fwnode_handle___2 * (*get)(struct fwnode_handle___2 *); ++ void (*put)(struct fwnode_handle___2 *); ++ bool (*device_is_available)(const struct fwnode_handle___2 *); ++ const void * (*device_get_match_data)(const struct fwnode_handle___2 *, const struct device___2 *); ++ bool (*property_present)(const struct fwnode_handle___2 *, const char *); ++ int (*property_read_int_array)(const struct fwnode_handle___2 *, const char *, unsigned int, void *, size_t); ++ int (*property_read_string_array)(const struct fwnode_handle___2 *, const char *, const char **, size_t); ++ struct fwnode_handle___2 * (*get_parent)(const struct fwnode_handle___2 *); ++ struct fwnode_handle___2 * (*get_next_child_node)(const struct fwnode_handle___2 *, struct fwnode_handle___2 *); ++ struct fwnode_handle___2 * (*get_named_child_node)(const struct fwnode_handle___2 *, const char *); ++ int (*get_reference_args)(const struct fwnode_handle___2 *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args___2 *); ++ struct fwnode_handle___2 * (*graph_get_next_endpoint)(const struct fwnode_handle___2 *, struct fwnode_handle___2 *); ++ struct fwnode_handle___2 * (*graph_get_remote_endpoint)(const struct fwnode_handle___2 *); ++ struct fwnode_handle___2 * (*graph_get_port_parent)(struct fwnode_handle___2 *); ++ int (*graph_parse_endpoint)(const struct fwnode_handle___2 *, struct fwnode_endpoint___2 *); ++}; ++ ++struct fwnode_endpoint___2 { ++ unsigned int port; ++ unsigned int id; ++ const struct fwnode_handle___2 *local_fwnode; ++}; ++ ++struct fwnode_reference_args___2 { ++ struct fwnode_handle___2 *fwnode; ++ unsigned int nargs; ++ u64 args[8]; ++}; ++ ++struct property___2 { ++ char *name; ++ int length; ++ void *value; ++ struct property___2 *next; ++ long unsigned int _flags; ++ struct bin_attribute___2 attr; ++}; ++ ++typedef int (*key_restrict_link_func_t___2)(struct key___2 *, const struct key_type *, const union key_payload *, struct key___2 *); ++ ++struct key_restriction___2; ++ ++struct key___2 { ++ refcount_t usage; ++ key_serial_t serial; ++ union { ++ struct list_head graveyard_link; ++ struct rb_node serial_node; ++ }; ++ struct rw_semaphore___3 sem; ++ struct key_user *user; ++ void *security; ++ union { ++ time64_t expiry; ++ time64_t revoked_at; ++ }; ++ time64_t last_used_at; ++ kuid_t uid; ++ kgid_t gid; ++ key_perm_t perm; ++ short unsigned int quotalen; ++ short unsigned int datalen; ++ short int state; ++ long unsigned int flags; ++ union { ++ struct keyring_index_key index_key; ++ struct { ++ struct key_type *type; ++ char *description; ++ }; ++ }; ++ union { ++ union key_payload payload; ++ struct { ++ struct list_head name_link; ++ struct assoc_array keys; ++ }; ++ }; ++ struct key_restriction___2 *restrict_link; ++}; ++ ++struct key_restriction___2 { ++ key_restrict_link_func_t___2 check; ++ struct key___2 *key; ++ struct key_type *keytype; ++}; ++ ++struct inet_frags___2; ++ ++struct netns_frags___2 { ++ long int high_thresh; ++ long int low_thresh; ++ int timeout; ++ int max_dist; ++ struct inet_frags___2 *f; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct rhashtable rhashtable; ++ long: 64; ++ long: 64; ++ long: 64; ++ atomic_long_t mem; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct netns_ipv4___2 { ++ struct ctl_table_header *forw_hdr; ++ struct ctl_table_header *frags_hdr; ++ struct ctl_table_header *ipv4_hdr; ++ struct ctl_table_header *route_hdr; ++ struct ctl_table_header *xfrm4_hdr; ++ struct ipv4_devconf *devconf_all; ++ struct ipv4_devconf *devconf_dflt; ++ struct ip_ra_chain *ra_chain; ++ struct mutex ra_mutex; ++ struct fib_rules_ops *rules_ops; ++ bool fib_has_custom_rules; ++ unsigned int fib_rules_require_fldissect; ++ struct fib_table *fib_main; ++ struct fib_table *fib_default; ++ bool fib_has_custom_local_routes; ++ int fib_num_tclassid_users; ++ struct hlist_head *fib_table_hash; ++ bool fib_offload_disabled; ++ struct sock *fibnl; ++ struct sock **icmp_sk; ++ struct sock *mc_autojoin_sk; ++ struct inet_peer_base *peers; ++ struct sock **tcp_sk; ++ struct netns_frags___2 frags; ++ struct xt_table *iptable_filter; ++ struct xt_table *iptable_mangle; ++ struct xt_table *iptable_raw; ++ struct xt_table *arptable_filter; ++ struct xt_table *iptable_security; ++ struct xt_table *nat_table; ++ int sysctl_icmp_echo_ignore_all; ++ int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_ignore_bogus_error_responses; ++ int sysctl_icmp_ratelimit; ++ int sysctl_icmp_ratemask; ++ int sysctl_icmp_errors_use_inbound_ifaddr; ++ struct local_ports ip_local_ports; ++ int sysctl_tcp_ecn; ++ int sysctl_tcp_ecn_fallback; ++ int sysctl_ip_default_ttl; ++ int sysctl_ip_no_pmtu_disc; ++ int sysctl_ip_fwd_use_pmtu; ++ int sysctl_ip_fwd_update_priority; ++ int sysctl_ip_nonlocal_bind; ++ int sysctl_ip_dynaddr; ++ int sysctl_ip_early_demux; ++ int sysctl_raw_l3mdev_accept; ++ int sysctl_tcp_early_demux; ++ int sysctl_udp_early_demux; ++ int sysctl_fwmark_reflect; ++ int sysctl_tcp_fwmark_accept; ++ int sysctl_tcp_l3mdev_accept; ++ int sysctl_tcp_mtu_probing; ++ int sysctl_tcp_base_mss; ++ int sysctl_tcp_min_snd_mss; ++ int sysctl_tcp_probe_threshold; ++ u32 sysctl_tcp_probe_interval; ++ int sysctl_tcp_keepalive_time; ++ int sysctl_tcp_keepalive_probes; ++ int sysctl_tcp_keepalive_intvl; ++ int sysctl_tcp_syn_retries; ++ int sysctl_tcp_synack_retries; ++ int sysctl_tcp_syncookies; ++ int sysctl_tcp_reordering; ++ int sysctl_tcp_retries1; ++ int sysctl_tcp_retries2; ++ int sysctl_tcp_orphan_retries; ++ int sysctl_tcp_fin_timeout; ++ unsigned int sysctl_tcp_notsent_lowat; ++ int sysctl_tcp_tw_reuse; ++ int sysctl_tcp_sack; ++ int sysctl_tcp_window_scaling; ++ int sysctl_tcp_timestamps; ++ int sysctl_tcp_early_retrans; ++ int sysctl_tcp_recovery; ++ int sysctl_tcp_thin_linear_timeouts; ++ int sysctl_tcp_slow_start_after_idle; ++ int sysctl_tcp_retrans_collapse; ++ int sysctl_tcp_stdurg; ++ int sysctl_tcp_rfc1337; ++ int sysctl_tcp_abort_on_overflow; ++ int sysctl_tcp_fack; ++ int sysctl_tcp_max_reordering; ++ int sysctl_tcp_dsack; ++ int sysctl_tcp_app_win; ++ int sysctl_tcp_adv_win_scale; ++ int sysctl_tcp_frto; ++ int sysctl_tcp_nometrics_save; ++ int sysctl_tcp_moderate_rcvbuf; ++ int sysctl_tcp_tso_win_divisor; ++ int sysctl_tcp_workaround_signed_windows; ++ int sysctl_tcp_limit_output_bytes; ++ int sysctl_tcp_challenge_ack_limit; ++ int sysctl_tcp_min_tso_segs; ++ int sysctl_tcp_min_rtt_wlen; ++ int sysctl_tcp_autocorking; ++ int sysctl_tcp_invalid_ratelimit; ++ int sysctl_tcp_pacing_ss_ratio; ++ int sysctl_tcp_pacing_ca_ratio; ++ int sysctl_tcp_wmem[3]; ++ int sysctl_tcp_rmem[3]; ++ int sysctl_tcp_comp_sack_nr; ++ long unsigned int sysctl_tcp_comp_sack_delay_ns; ++ long: 64; ++ struct inet_timewait_death_row tcp_death_row; ++ int sysctl_max_syn_backlog; ++ int sysctl_tcp_fastopen; ++ const struct tcp_congestion_ops *tcp_congestion_control; ++ struct tcp_fastopen_context *tcp_fastopen_ctx; ++ spinlock_t tcp_fastopen_ctx_lock; ++ unsigned int sysctl_tcp_fastopen_blackhole_timeout; ++ atomic_t tfo_active_disable_times; ++ long unsigned int tfo_active_disable_stamp; ++ int sysctl_udp_wmem_min; ++ int sysctl_udp_rmem_min; ++ int sysctl_udp_l3mdev_accept; ++ int sysctl_igmp_max_memberships; ++ int sysctl_igmp_max_msf; ++ int sysctl_igmp_llm_reports; ++ int sysctl_igmp_qrv; ++ struct ping_group_range ping_group_range; ++ atomic_t dev_addr_genid; ++ long unsigned int *sysctl_local_reserved_ports; ++ int sysctl_ip_prot_sock; ++ struct list_head mr_tables; ++ struct fib_rules_ops *mr_rules_ops; ++ int sysctl_fib_multipath_use_neigh; ++ int sysctl_fib_multipath_hash_policy; ++ struct fib_notifier_ops *notifier_ops; ++ unsigned int fib_seq; ++ struct fib_notifier_ops *ipmr_notifier_ops; ++ unsigned int ipmr_seq; ++ atomic_t rt_genid; ++ siphash_key_t ip_id_key; ++}; ++ ++struct net_device___2; ++ ++struct dst_ops___2 { ++ short unsigned int family; ++ unsigned int gc_thresh; ++ int (*gc)(struct dst_ops___2 *); ++ struct dst_entry * (*check)(struct dst_entry *, __u32); ++ unsigned int (*default_advmss)(const struct dst_entry *); ++ unsigned int (*mtu)(const struct dst_entry *); ++ u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); ++ void (*destroy)(struct dst_entry *); ++ void (*ifdown)(struct dst_entry *, struct net_device___2 *, int); ++ struct dst_entry * (*negative_advice)(struct dst_entry *); ++ void (*link_failure)(struct sk_buff___2 *); ++ void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff___2 *, u32, bool); ++ void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff___2 *); ++ int (*local_out)(struct net___2 *, struct sock *, struct sk_buff___2 *); ++ struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff___2 *, const void *); ++ void (*confirm_neigh)(const struct dst_entry *, const void *); ++ struct kmem_cache *kmem_cachep; ++ struct percpu_counter pcpuc_entries; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct netns_ipv6___2 { ++ struct netns_sysctl_ipv6 sysctl; ++ struct ipv6_devconf *devconf_all; ++ struct ipv6_devconf *devconf_dflt; ++ struct inet_peer_base *peers; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netns_frags___2 frags; ++ struct xt_table *ip6table_filter; ++ struct xt_table *ip6table_mangle; ++ struct xt_table *ip6table_raw; ++ struct xt_table *ip6table_security; ++ struct xt_table *ip6table_nat; ++ struct fib6_info *fib6_null_entry; ++ struct rt6_info *ip6_null_entry; ++ struct rt6_statistics *rt6_stats; ++ struct timer_list ip6_fib_timer; ++ struct hlist_head *fib_table_hash; ++ struct fib6_table *fib6_main_tbl; ++ struct list_head fib6_walkers; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct dst_ops___2 ip6_dst_ops; ++ rwlock_t fib6_walker_lock; ++ spinlock_t fib6_gc_lock; ++ unsigned int ip6_rt_gc_expire; ++ long unsigned int ip6_rt_last_gc; ++ unsigned int fib6_rules_require_fldissect; ++ bool fib6_has_custom_rules; ++ struct rt6_info *ip6_prohibit_entry; ++ struct rt6_info *ip6_blk_hole_entry; ++ struct fib6_table *fib6_local_tbl; ++ struct fib_rules_ops *fib6_rules_ops; ++ struct sock **icmp_sk; ++ struct sock *ndisc_sk; ++ struct sock *tcp_sk; ++ struct sock *igmp_sk; ++ struct sock *mc_autojoin_sk; ++ struct list_head mr6_tables; ++ struct fib_rules_ops *mr6_rules_ops; ++ atomic_t dev_addr_genid; ++ atomic_t fib6_sernum; ++ struct seg6_pernet_data *seg6_data; ++ struct fib_notifier_ops *notifier_ops; ++ struct fib_notifier_ops *ip6mr_notifier_ops; ++ unsigned int ipmr_seq; ++ struct { ++ struct hlist_head head; ++ spinlock_t lock; ++ u32 seq; ++ } ip6addrlbl_table; ++ long: 64; ++}; ++ ++struct netns_nf_frag___2 { ++ struct netns_frags___2 frags; ++}; ++ ++struct sk_buff_head___2 { ++ struct sk_buff___2 *next; ++ struct sk_buff___2 *prev; ++ __u32 qlen; ++ spinlock_t lock; ++}; ++ ++struct netns_xfrm___2 { ++ struct list_head state_all; ++ struct hlist_head *state_bydst; ++ struct hlist_head *state_bysrc; ++ struct hlist_head *state_byspi; ++ unsigned int state_hmask; ++ unsigned int state_num; ++ struct work_struct state_hash_work; ++ struct list_head policy_all; ++ struct hlist_head *policy_byidx; ++ unsigned int policy_idx_hmask; ++ struct hlist_head policy_inexact[3]; ++ struct xfrm_policy_hash policy_bydst[3]; ++ unsigned int policy_count[6]; ++ struct work_struct policy_hash_work; ++ struct xfrm_policy_hthresh policy_hthresh; ++ struct sock *nlsk; ++ struct sock *nlsk_stash; ++ u32 sysctl_aevent_etime; ++ u32 sysctl_aevent_rseqth; ++ int sysctl_larval_drop; ++ u32 sysctl_acq_expires; ++ struct ctl_table_header *sysctl_hdr; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct dst_ops___2 xfrm4_dst_ops; ++ struct dst_ops___2 xfrm6_dst_ops; ++ spinlock_t xfrm_state_lock; ++ spinlock_t xfrm_policy_lock; ++ struct mutex xfrm_cfg_mutex; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct net___2 { ++ refcount_t passive; ++ refcount_t count; ++ spinlock_t rules_mod_lock; ++ u32 hash_mix; ++ atomic64_t cookie_gen; ++ struct list_head list; ++ struct list_head exit_list; ++ struct llist_node cleanup_list; ++ struct user_namespace___2 *user_ns; ++ struct ucounts___2 *ucounts; ++ spinlock_t nsid_lock; ++ struct idr netns_ids; ++ struct ns_common___2 ns; ++ struct proc_dir_entry *proc_net; ++ struct proc_dir_entry *proc_net_stat; ++ struct ctl_table_set sysctls; ++ struct sock *rtnl; ++ struct sock *genl_sock; ++ struct uevent_sock *uevent_sock; ++ struct list_head dev_base_head; ++ struct hlist_head *dev_name_head; ++ struct hlist_head *dev_index_head; ++ unsigned int dev_base_seq; ++ int ifindex; ++ unsigned int dev_unreg_count; ++ struct list_head rules_ops; ++ struct list_head fib_notifier_ops; ++ struct net_device___2 *loopback_dev; ++ struct netns_core core; ++ struct netns_mib mib; ++ struct netns_packet packet; ++ struct netns_unix unx; ++ long: 64; ++ long: 64; ++ struct netns_ipv4___2 ipv4; ++ struct netns_ipv6___2 ipv6; ++ struct netns_sctp sctp; ++ struct netns_nf nf; ++ struct netns_xt xt; ++ struct netns_ct ct; ++ struct netns_nftables nft; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netns_nf_frag___2 nf_frag; ++ struct ctl_table_header *nf_frag_frags_hdr; ++ struct sock *nfnl; ++ struct sock *nfnl_stash; ++ struct list_head nfnl_acct_list; ++ struct list_head nfct_timeout_list; ++ struct sk_buff_head___2 wext_nlevents; ++ struct net_generic *gen; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netns_xfrm___2 xfrm; ++ struct netns_ipvs *ipvs; ++ struct netns_mpls mpls; ++ struct netns_can can; ++ struct sock *diag_nlsk; ++ atomic_t fnhe_genid; ++}; ++ ++struct cgroup_namespace___2 { ++ refcount_t count; ++ struct ns_common___2 ns; ++ struct user_namespace___2 *user_ns; ++ struct ucounts___2 *ucounts; ++ struct css_set___2 *root_cset; ++}; ++ ++struct proc_ns_operations___2 { ++ const char *name; ++ const char *real_ns_name; ++ int type; ++ struct ns_common___2 * (*get)(struct task_struct___2 *); ++ void (*put)(struct ns_common___2 *); ++ int (*install)(struct nsproxy___2 *, struct ns_common___2 *); ++ struct user_namespace___2 * (*owner)(struct ns_common___2 *); ++ struct ns_common___2 * (*get_parent)(struct ns_common___2 *); ++}; ++ ++struct ucounts___2 { ++ struct hlist_node node; ++ struct user_namespace___2 *ns; ++ kuid_t uid; ++ int count; ++ atomic_t ucount[9]; ++}; ++ ++enum perf_branch_sample_type { ++ PERF_SAMPLE_BRANCH_USER = 1, ++ PERF_SAMPLE_BRANCH_KERNEL = 2, ++ PERF_SAMPLE_BRANCH_HV = 4, ++ PERF_SAMPLE_BRANCH_ANY = 8, ++ PERF_SAMPLE_BRANCH_ANY_CALL = 16, ++ PERF_SAMPLE_BRANCH_ANY_RETURN = 32, ++ PERF_SAMPLE_BRANCH_IND_CALL = 64, ++ PERF_SAMPLE_BRANCH_ABORT_TX = 128, ++ PERF_SAMPLE_BRANCH_IN_TX = 256, ++ PERF_SAMPLE_BRANCH_NO_TX = 512, ++ PERF_SAMPLE_BRANCH_COND = 1024, ++ PERF_SAMPLE_BRANCH_CALL_STACK = 2048, ++ PERF_SAMPLE_BRANCH_IND_JUMP = 4096, ++ PERF_SAMPLE_BRANCH_CALL = 8192, ++ PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, ++ PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, ++ PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, ++ PERF_SAMPLE_BRANCH_MAX = 131072, ++}; ++ ++enum perf_event_read_format { ++ PERF_FORMAT_TOTAL_TIME_ENABLED = 1, ++ PERF_FORMAT_TOTAL_TIME_RUNNING = 2, ++ PERF_FORMAT_ID = 4, ++ PERF_FORMAT_GROUP = 8, ++ PERF_FORMAT_MAX = 16, ++}; ++ ++enum perf_event_ioc_flags { ++ PERF_IOC_FLAG_GROUP = 1, ++}; ++ ++struct perf_event_header { ++ __u32 type; ++ __u16 misc; ++ __u16 size; ++}; ++ ++struct perf_ns_link_info { ++ __u64 dev; ++ __u64 ino; ++}; ++ ++enum { ++ NET_NS_INDEX = 0, ++ UTS_NS_INDEX = 1, ++ IPC_NS_INDEX = 2, ++ PID_NS_INDEX = 3, ++ USER_NS_INDEX = 4, ++ MNT_NS_INDEX = 5, ++ CGROUP_NS_INDEX = 6, ++ NR_NAMESPACES = 7, ++}; ++ ++enum perf_event_type { ++ PERF_RECORD_MMAP = 1, ++ PERF_RECORD_LOST = 2, ++ PERF_RECORD_COMM = 3, ++ PERF_RECORD_EXIT = 4, ++ PERF_RECORD_THROTTLE = 5, ++ PERF_RECORD_UNTHROTTLE = 6, ++ PERF_RECORD_FORK = 7, ++ PERF_RECORD_READ = 8, ++ PERF_RECORD_SAMPLE = 9, ++ PERF_RECORD_MMAP2 = 10, ++ PERF_RECORD_AUX = 11, ++ PERF_RECORD_ITRACE_START = 12, ++ PERF_RECORD_LOST_SAMPLES = 13, ++ PERF_RECORD_SWITCH = 14, ++ PERF_RECORD_SWITCH_CPU_WIDE = 15, ++ PERF_RECORD_NAMESPACES = 16, ++ PERF_RECORD_MAX = 17, ++}; ++ ++struct kernel_param_ops___2 { ++ unsigned int flags; ++ int (*set)(const char *, const struct kernel_param___2 *); ++ int (*get)(char *, const struct kernel_param___2 *); ++ void (*free)(void *); ++}; ++ ++struct kparam_array___2; ++ ++struct kernel_param___2 { ++ const char *name; ++ struct module___2 *mod; ++ const struct kernel_param_ops___2 *ops; ++ const u16 perm; ++ s8 level; ++ u8 flags; ++ union { ++ void *arg; ++ const struct kparam_string *str; ++ const struct kparam_array___2 *arr; ++ }; ++}; ++ ++struct kparam_array___2 { ++ unsigned int max; ++ unsigned int elemsize; ++ unsigned int *num; ++ const struct kernel_param_ops___2 *ops; ++ void *elem; ++}; ++ ++struct module_attribute___2 { ++ struct attribute attr; ++ ssize_t (*show)(struct module_attribute___2 *, struct module_kobject___2 *, char *); ++ ssize_t (*store)(struct module_attribute___2 *, struct module_kobject___2 *, const char *, size_t); ++ void (*setup)(struct module___2 *, const char *); ++ int (*test)(struct module___2 *); ++ void (*free)(struct module___2 *); ++}; ++ ++struct trace_event_class___2; ++ ++struct bpf_prog_array___2; ++ ++struct trace_event_call___2 { ++ struct list_head list; ++ struct trace_event_class___2 *class; ++ union { ++ char *name; ++ struct tracepoint *tp; ++ }; ++ struct trace_event event; ++ char *print_fmt; ++ struct event_filter *filter; ++ void *mod; ++ void *data; ++ int flags; ++ int perf_refcount; ++ struct hlist_head *perf_events; ++ struct bpf_prog_array___2 *prog_array; ++ int (*perf_perm)(struct trace_event_call___2 *, struct perf_event___2 *); ++}; ++ ++struct bpf_prog_aux___2; ++ ++struct bpf_prog___2 { ++ u16 pages; ++ u16 jited: 1; ++ u16 jit_requested: 1; ++ u16 undo_set_mem: 1; ++ u16 gpl_compatible: 1; ++ u16 cb_access: 1; ++ u16 dst_needed: 1; ++ u16 blinded: 1; ++ u16 is_func: 1; ++ u16 kprobe_override: 1; ++ u16 has_callchain_buf: 1; ++ enum bpf_prog_type type; ++ enum bpf_attach_type expected_attach_type; ++ u32 len; ++ u32 jited_len; ++ u8 tag[8]; ++ struct bpf_prog_aux___2 *aux; ++ struct sock_fprog_kern *orig_prog; ++ unsigned int (*bpf_func)(const void *, const struct bpf_insn *); ++ union { ++ struct sock_filter insns[0]; ++ struct bpf_insn insnsi[0]; ++ }; ++}; ++ ++struct cgroup_bpf___2 { ++ struct bpf_prog_array___2 *effective[21]; ++ struct list_head progs[21]; ++ u32 flags[21]; ++ struct bpf_prog_array___2 *inactive; ++}; ++ ++struct bpf_prog_array_item___2 { ++ struct bpf_prog___2 *prog; ++ struct bpf_cgroup_storage *cgroup_storage; ++}; ++ ++struct bpf_prog_array___2 { ++ struct callback_head rcu; ++ struct bpf_prog_array_item___2 items[0]; ++}; ++ ++struct cgroup_file___2 { ++ struct kernfs_node___2 *kn; ++ long unsigned int notified_at; ++ struct timer_list notify_timer; ++}; ++ ++struct cgroup_subsys___2; ++ ++struct cgroup_subsys_state___2 { ++ struct cgroup___2 *cgroup; ++ struct cgroup_subsys___2 *ss; ++ struct percpu_ref refcnt; ++ struct list_head sibling; ++ struct list_head children; ++ struct list_head rstat_css_node; ++ int id; ++ unsigned int flags; ++ u64 serial_nr; ++ atomic_t online_cnt; ++ struct work_struct destroy_work; ++ struct rcu_work destroy_rwork; ++ struct cgroup_subsys_state___2 *parent; ++}; ++ ++struct cgroup_root___2; ++ ++struct cgroup_rstat_cpu___2; ++ ++struct cgroup___2 { ++ struct cgroup_subsys_state___2 self; ++ long unsigned int flags; ++ int id; ++ int level; ++ int max_depth; ++ int nr_descendants; ++ int nr_dying_descendants; ++ int max_descendants; ++ int nr_populated_csets; ++ int nr_populated_domain_children; ++ int nr_populated_threaded_children; ++ int nr_threaded_children; ++ struct kernfs_node___2 *kn; ++ struct cgroup_file___2 procs_file; ++ struct cgroup_file___2 events_file; ++ u16 subtree_control; ++ u16 subtree_ss_mask; ++ u16 old_subtree_control; ++ u16 old_subtree_ss_mask; ++ struct cgroup_subsys_state___2 *subsys[14]; ++ struct cgroup_root___2 *root; ++ struct list_head cset_links; ++ struct list_head e_csets[14]; ++ struct cgroup___2 *dom_cgrp; ++ struct cgroup___2 *old_dom_cgrp; ++ struct cgroup_rstat_cpu___2 *rstat_cpu; ++ struct list_head rstat_css_list; ++ struct cgroup_base_stat pending_bstat; ++ struct cgroup_base_stat bstat; ++ struct prev_cputime prev_cputime; ++ struct list_head pidlists; ++ struct mutex pidlist_mutex; ++ wait_queue_head_t offline_waitq; ++ struct work_struct release_agent_work; ++ struct cgroup_bpf___2 bpf; ++ atomic_t congestion_count; ++ int ancestor_ids[0]; ++}; ++ ++struct cftype___2; ++ ++struct cgroup_subsys___2 { ++ struct cgroup_subsys_state___2 * (*css_alloc)(struct cgroup_subsys_state___2 *); ++ int (*css_online)(struct cgroup_subsys_state___2 *); ++ void (*css_offline)(struct cgroup_subsys_state___2 *); ++ void (*css_released)(struct cgroup_subsys_state___2 *); ++ void (*css_free)(struct cgroup_subsys_state___2 *); ++ void (*css_reset)(struct cgroup_subsys_state___2 *); ++ void (*css_rstat_flush)(struct cgroup_subsys_state___2 *, int); ++ int (*css_extra_stat_show)(struct seq_file___2 *, struct cgroup_subsys_state___2 *); ++ int (*can_attach)(struct cgroup_taskset *); ++ void (*cancel_attach)(struct cgroup_taskset *); ++ void (*attach)(struct cgroup_taskset *); ++ void (*post_attach)(); ++ int (*can_fork)(struct task_struct___2 *); ++ void (*cancel_fork)(struct task_struct___2 *); ++ void (*fork)(struct task_struct___2 *); ++ void (*exit)(struct task_struct___2 *); ++ void (*release)(struct task_struct___2 *); ++ void (*bind)(struct cgroup_subsys_state___2 *); ++ bool early_init: 1; ++ bool implicit_on_dfl: 1; ++ bool threaded: 1; ++ bool broken_hierarchy: 1; ++ bool warned_broken_hierarchy: 1; ++ int id; ++ const char *name; ++ const char *legacy_name; ++ struct cgroup_root___2 *root; ++ struct idr css_idr; ++ struct list_head cfts; ++ struct cftype___2 *dfl_cftypes; ++ struct cftype___2 *legacy_cftypes; ++ unsigned int depends_on; ++}; ++ ++struct cgroup_rstat_cpu___2 { ++ struct u64_stats_sync bsync; ++ struct cgroup_base_stat bstat; ++ struct cgroup_base_stat last_bstat; ++ struct cgroup___2 *updated_children; ++ struct cgroup___2 *updated_next; ++}; ++ ++struct cgroup_root___2 { ++ struct kernfs_root___2 *kf_root; ++ unsigned int subsys_mask; ++ int hierarchy_id; ++ struct cgroup___2 cgrp; ++ int cgrp_ancestor_id_storage; ++ atomic_t nr_cgrps; ++ struct list_head root_list; ++ unsigned int flags; ++ struct idr cgroup_idr; ++ char release_agent_path[4096]; ++ char name[64]; ++}; ++ ++struct cftype___2 { ++ char name[64]; ++ long unsigned int private; ++ size_t max_write_len; ++ unsigned int flags; ++ unsigned int file_offset; ++ struct cgroup_subsys___2 *ss; ++ struct list_head node; ++ struct kernfs_ops___2 *kf_ops; ++ int (*open)(struct kernfs_open_file___2 *); ++ void (*release)(struct kernfs_open_file___2 *); ++ u64 (*read_u64)(struct cgroup_subsys_state___2 *, struct cftype___2 *); ++ s64 (*read_s64)(struct cgroup_subsys_state___2 *, struct cftype___2 *); ++ int (*seq_show)(struct seq_file___2 *, void *); ++ void * (*seq_start)(struct seq_file___2 *, loff_t *); ++ void * (*seq_next)(struct seq_file___2 *, void *, loff_t *); ++ void (*seq_stop)(struct seq_file___2 *, void *); ++ int (*write_u64)(struct cgroup_subsys_state___2 *, struct cftype___2 *, u64); ++ int (*write_s64)(struct cgroup_subsys_state___2 *, struct cftype___2 *, s64); ++ ssize_t (*write)(struct kernfs_open_file___2 *, char *, size_t, loff_t); ++}; ++ ++struct perf_cpu_context___2; ++ ++struct pmu___2 { ++ struct list_head entry; ++ struct module___2 *module; ++ struct device___2 *dev; ++ const struct attribute_group___2 **attr_groups; ++ const char *name; ++ int type; ++ int capabilities; ++ int *pmu_disable_count; ++ struct perf_cpu_context___2 *pmu_cpu_context; ++ atomic_t exclusive_cnt; ++ int task_ctx_nr; ++ int hrtimer_interval_ms; ++ unsigned int nr_addr_filters; ++ void (*pmu_enable)(struct pmu___2 *); ++ void (*pmu_disable)(struct pmu___2 *); ++ int (*event_init)(struct perf_event___2 *); ++ void (*event_mapped)(struct perf_event___2 *, struct mm_struct___2 *); ++ void (*event_unmapped)(struct perf_event___2 *, struct mm_struct___2 *); ++ int (*add)(struct perf_event___2 *, int); ++ void (*del)(struct perf_event___2 *, int); ++ void (*start)(struct perf_event___2 *, int); ++ void (*stop)(struct perf_event___2 *, int); ++ void (*read)(struct perf_event___2 *); ++ void (*start_txn)(struct pmu___2 *, unsigned int); ++ int (*commit_txn)(struct pmu___2 *); ++ void (*cancel_txn)(struct pmu___2 *); ++ int (*event_idx)(struct perf_event___2 *); ++ void (*sched_task)(struct perf_event_context___2 *, bool); ++ size_t task_ctx_size; ++ void * (*setup_aux)(struct perf_event___2 *, void **, int, bool); ++ void (*free_aux)(void *); ++ int (*addr_filters_validate)(struct list_head *); ++ void (*addr_filters_sync)(struct perf_event___2 *); ++ int (*filter_match)(struct perf_event___2 *); ++ int (*check_period)(struct perf_event___2 *, u64); ++}; ++ ++struct perf_cpu_context___2 { ++ struct perf_event_context___2 ctx; ++ struct perf_event_context___2 *task_ctx; ++ int active_oncpu; ++ int exclusive; ++ raw_spinlock_t hrtimer_lock; ++ struct hrtimer hrtimer; ++ ktime_t hrtimer_interval; ++ unsigned int hrtimer_active; ++ struct perf_cgroup___2 *cgrp; ++ struct list_head cgrp_cpuctx_entry; ++ struct list_head sched_cb_entry; ++ int sched_cb_usage; ++ int online; ++}; ++ ++enum perf_addr_filter_action_t { ++ PERF_ADDR_FILTER_ACTION_STOP = 0, ++ PERF_ADDR_FILTER_ACTION_START = 1, ++ PERF_ADDR_FILTER_ACTION_FILTER = 2, ++}; ++ ++struct perf_addr_filter { ++ struct list_head entry; ++ struct path___2 path; ++ long unsigned int offset; ++ long unsigned int size; ++ enum perf_addr_filter_action_t action; ++}; ++ ++struct swevent_hlist { ++ struct hlist_head heads[256]; ++ struct callback_head callback_head; ++}; ++ ++struct pmu_event_list { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++ ++struct ring_buffer___2 { ++ atomic_t refcount; ++ struct callback_head callback_head; ++ struct work_struct work; ++ int page_order; ++ int nr_pages; ++ int overwrite; ++ int paused; ++ atomic_t poll; ++ local_t head; ++ local_t nest; ++ local_t events; ++ local_t wakeup; ++ local_t lost; ++ long int watermark; ++ long int aux_watermark; ++ spinlock_t event_lock; ++ struct list_head event_list; ++ atomic_t mmap_count; ++ long unsigned int mmap_locked; ++ struct user_struct___2 *mmap_user; ++ long int aux_head; ++ local_t aux_nest; ++ long int aux_wakeup; ++ long unsigned int aux_pgoff; ++ int aux_nr_pages; ++ int aux_overwrite; ++ atomic_t aux_mmap_count; ++ long unsigned int aux_mmap_locked; ++ void (*free_aux)(void *); ++ atomic_t aux_refcount; ++ void **aux_pages; ++ void *aux_priv; ++ struct perf_event_mmap_page *user_page; ++ void *data_pages[0]; ++}; ++ ++struct perf_cgroup___2 { ++ struct cgroup_subsys_state___2 css; ++ struct perf_cgroup_info *info; ++}; ++ ++struct perf_output_handle { ++ struct perf_event___2 *event; ++ struct ring_buffer___2 *rb; ++ long unsigned int wakeup; ++ long unsigned int size; ++ u64 aux_flags; ++ union { ++ void *addr; ++ long unsigned int head; ++ }; ++ int page; ++}; ++ ++struct bpf_perf_event_data_kern___2 { ++ bpf_user_pt_regs_t *regs; ++ struct perf_sample_data *data; ++ struct perf_event___2 *event; ++}; ++ ++struct perf_pmu_events_attr___2 { ++ struct device_attribute___2 attr; ++ u64 id; ++ const char *event_str; ++}; ++ ++struct trace_event_class___2 { ++ const char *system; ++ void *probe; ++ void *perf_probe; ++ int (*reg)(struct trace_event_call___2 *, enum trace_reg, void *); ++ int (*define_fields)(struct trace_event_call___2 *); ++ struct list_head * (*get_fields)(struct trace_event_call___2 *); ++ struct list_head fields; ++ int (*raw_init)(struct trace_event_call___2 *); ++}; ++ ++struct syscall_metadata___2 { ++ const char *name; ++ int syscall_nr; ++ int nb_args; ++ const char **types; ++ const char **args; ++ struct list_head enter_fields; ++ struct trace_event_call___2 *enter_event; ++ struct trace_event_call___2 *exit_event; ++}; ++ ++struct bpf_map___2; ++ ++struct bpf_map_ops___2 { ++ int (*map_alloc_check)(union bpf_attr *); ++ struct bpf_map___2 * (*map_alloc)(union bpf_attr *); ++ void (*map_release)(struct bpf_map___2 *, struct file___2 *); ++ void (*map_free)(struct bpf_map___2 *); ++ int (*map_get_next_key)(struct bpf_map___2 *, void *, void *); ++ void (*map_release_uref)(struct bpf_map___2 *); ++ void * (*map_lookup_elem_sys_only)(struct bpf_map___2 *, void *); ++ void * (*map_lookup_elem)(struct bpf_map___2 *, void *); ++ int (*map_update_elem)(struct bpf_map___2 *, void *, void *, u64); ++ int (*map_delete_elem)(struct bpf_map___2 *, void *); ++ void * (*map_fd_get_ptr)(struct bpf_map___2 *, struct file___2 *, int); ++ void (*map_fd_put_ptr)(void *); ++ u32 (*map_gen_lookup)(struct bpf_map___2 *, struct bpf_insn *); ++ u32 (*map_fd_sys_lookup_elem)(void *); ++ void (*map_seq_show_elem)(struct bpf_map___2 *, void *, struct seq_file___2 *); ++ int (*map_check_btf)(const struct bpf_map___2 *, const struct btf_type *, const struct btf_type *); ++}; ++ ++struct bpf_map___2 { ++ const struct bpf_map_ops___2 *ops; ++ struct bpf_map___2 *inner_map_meta; ++ void *security; ++ enum bpf_map_type map_type; ++ u32 key_size; ++ u32 value_size; ++ u32 max_entries; ++ u32 map_flags; ++ u32 pages; ++ u32 id; ++ int numa_node; ++ u32 btf_key_type_id; ++ u32 btf_value_type_id; ++ struct btf *btf; ++ bool unpriv_array; ++ long: 56; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct user_struct___2 *user; ++ atomic_t refcnt; ++ atomic_t usercnt; ++ struct work_struct work; ++ char name[16]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct bpf_offloaded_map___2; ++ ++struct bpf_map_dev_ops___2 { ++ int (*map_get_next_key)(struct bpf_offloaded_map___2 *, void *, void *); ++ int (*map_lookup_elem)(struct bpf_offloaded_map___2 *, void *, void *); ++ int (*map_update_elem)(struct bpf_offloaded_map___2 *, void *, void *, u64); ++ int (*map_delete_elem)(struct bpf_offloaded_map___2 *, void *); ++}; ++ ++struct bpf_offloaded_map___2 { ++ struct bpf_map___2 map; ++ struct net_device___2 *netdev; ++ const struct bpf_map_dev_ops___2 *dev_ops; ++ void *dev_priv; ++ struct list_head offloads; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++typedef rx_handler_result_t rx_handler_func_t___2(struct sk_buff___2 **); ++ ++typedef struct { ++ struct net___2 *net; ++} possible_net_t___2; ++ ++struct net_device_ops___2; ++ ++struct ethtool_ops___2; ++ ++struct xfrmdev_ops___2; ++ ++struct tlsdev_ops___2; ++ ++struct header_ops___2; ++ ++struct netdev_rx_queue___2; ++ ++struct mini_Qdisc___2; ++ ++struct netdev_queue___2; ++ ++struct Qdisc___2; ++ ++struct rtnl_link_ops___2; ++ ++struct dcbnl_rtnl_ops___2; ++ ++struct net_device___2 { ++ char name[16]; ++ struct hlist_node name_hlist; ++ struct dev_ifalias *ifalias; ++ long unsigned int mem_end; ++ long unsigned int mem_start; ++ long unsigned int base_addr; ++ int irq; ++ long unsigned int state; ++ struct list_head dev_list; ++ struct list_head napi_list; ++ struct list_head unreg_list; ++ struct list_head close_list; ++ struct list_head ptype_all; ++ struct list_head ptype_specific; ++ struct { ++ struct list_head upper; ++ struct list_head lower; ++ } adj_list; ++ netdev_features_t features; ++ netdev_features_t hw_features; ++ netdev_features_t wanted_features; ++ netdev_features_t vlan_features; ++ netdev_features_t hw_enc_features; ++ netdev_features_t mpls_features; ++ netdev_features_t gso_partial_features; ++ int ifindex; ++ int group; ++ struct net_device_stats stats; ++ atomic_long_t rx_dropped; ++ atomic_long_t tx_dropped; ++ atomic_long_t rx_nohandler; ++ atomic_t carrier_up_count; ++ atomic_t carrier_down_count; ++ const struct net_device_ops___2 *netdev_ops; ++ const struct ethtool_ops___2 *ethtool_ops; ++ const struct switchdev_ops *switchdev_ops; ++ const struct l3mdev_ops *l3mdev_ops; ++ const struct ndisc_ops *ndisc_ops; ++ const struct xfrmdev_ops___2 *xfrmdev_ops; ++ const struct tlsdev_ops___2 *tlsdev_ops; ++ const struct header_ops___2 *header_ops; ++ unsigned int flags; ++ unsigned int priv_flags; ++ short unsigned int gflags; ++ short unsigned int padded; ++ unsigned char operstate; ++ unsigned char link_mode; ++ unsigned char if_port; ++ unsigned char dma; ++ unsigned int mtu; ++ unsigned int min_mtu; ++ unsigned int max_mtu; ++ short unsigned int type; ++ short unsigned int hard_header_len; ++ unsigned char min_header_len; ++ short unsigned int needed_headroom; ++ short unsigned int needed_tailroom; ++ unsigned char perm_addr[32]; ++ unsigned char addr_assign_type; ++ unsigned char addr_len; ++ unsigned char upper_level; ++ unsigned char lower_level; ++ short unsigned int neigh_priv_len; ++ short unsigned int dev_id; ++ short unsigned int dev_port; ++ spinlock_t addr_list_lock; ++ unsigned char name_assign_type; ++ bool uc_promisc; ++ struct netdev_hw_addr_list uc; ++ struct netdev_hw_addr_list mc; ++ struct netdev_hw_addr_list dev_addrs; ++ struct kset___2 *queues_kset; ++ unsigned int promiscuity; ++ unsigned int allmulti; ++ struct vlan_info *vlan_info; ++ struct tipc_bearer *tipc_ptr; ++ struct in_device *ip_ptr; ++ struct inet6_dev *ip6_ptr; ++ struct wireless_dev *ieee80211_ptr; ++ struct wpan_dev *ieee802154_ptr; ++ unsigned char *dev_addr; ++ struct netdev_rx_queue___2 *_rx; ++ unsigned int num_rx_queues; ++ unsigned int real_num_rx_queues; ++ struct bpf_prog___2 *xdp_prog; ++ long unsigned int gro_flush_timeout; ++ rx_handler_func_t___2 *rx_handler; ++ void *rx_handler_data; ++ struct mini_Qdisc___2 *miniq_ingress; ++ struct netdev_queue___2 *ingress_queue; ++ struct nf_hook_entries *nf_hooks_ingress; ++ unsigned char broadcast[32]; ++ struct cpu_rmap *rx_cpu_rmap; ++ struct hlist_node index_hlist; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct netdev_queue___2 *_tx; ++ unsigned int num_tx_queues; ++ unsigned int real_num_tx_queues; ++ struct Qdisc___2 *qdisc; ++ struct hlist_head qdisc_hash[16]; ++ unsigned int tx_queue_len; ++ spinlock_t tx_global_lock; ++ int watchdog_timeo; ++ struct xps_dev_maps *xps_cpus_map; ++ struct xps_dev_maps *xps_rxqs_map; ++ struct mini_Qdisc___2 *miniq_egress; ++ struct timer_list watchdog_timer; ++ int *pcpu_refcnt; ++ struct list_head todo_list; ++ struct list_head link_watch_list; ++ enum { ++ NETREG_UNINITIALIZED___2 = 0, ++ NETREG_REGISTERED___2 = 1, ++ NETREG_UNREGISTERING___2 = 2, ++ NETREG_UNREGISTERED___2 = 3, ++ NETREG_RELEASED___2 = 4, ++ NETREG_DUMMY___2 = 5, ++ } reg_state: 8; ++ bool dismantle; ++ enum { ++ RTNL_LINK_INITIALIZED___2 = 0, ++ RTNL_LINK_INITIALIZING___2 = 1, ++ } rtnl_link_state: 16; ++ bool needs_free_netdev; ++ void (*priv_destructor)(struct net_device___2 *); ++ struct netpoll_info *npinfo; ++ possible_net_t___2 nd_net; ++ union { ++ void *ml_priv; ++ struct pcpu_lstats *lstats; ++ struct pcpu_sw_netstats *tstats; ++ struct pcpu_dstats *dstats; ++ struct pcpu_vstats *vstats; ++ }; ++ struct garp_port *garp_port; ++ struct mrp_port *mrp_port; ++ struct device___2 dev; ++ const struct attribute_group___2 *sysfs_groups[4]; ++ const struct attribute_group___2 *sysfs_rx_queue_group; ++ const struct rtnl_link_ops___2 *rtnl_link_ops; ++ unsigned int gso_max_size; ++ u16 gso_max_segs; ++ const struct dcbnl_rtnl_ops___2 *dcbnl_ops; ++ s16 num_tc; ++ struct netdev_tc_txq tc_to_txq[16]; ++ u8 prio_tc_map[16]; ++ unsigned int fcoe_ddp_xid; ++ struct netprio_map *priomap; ++ struct phy_device *phydev; ++ struct sfp_bus *sfp_bus; ++ struct lock_class_key *qdisc_tx_busylock; ++ struct lock_class_key *qdisc_running_key; ++ bool proto_down; ++ unsigned int wol_enabled: 1; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++ long unsigned int kabi_reserved17; ++ long unsigned int kabi_reserved18; ++ long unsigned int kabi_reserved19; ++ long unsigned int kabi_reserved20; ++ long unsigned int kabi_reserved21; ++ long unsigned int kabi_reserved22; ++ long unsigned int kabi_reserved23; ++ long unsigned int kabi_reserved24; ++ long unsigned int kabi_reserved25; ++ long unsigned int kabi_reserved26; ++ long unsigned int kabi_reserved27; ++ long unsigned int kabi_reserved28; ++ long unsigned int kabi_reserved29; ++ long unsigned int kabi_reserved30; ++ long: 64; ++}; ++ ++struct bpf_prog_ops___2 { ++ int (*test_run)(struct bpf_prog___2 *, const union bpf_attr *, union bpf_attr *); ++}; ++ ++struct bpf_verifier_ops___2 { ++ const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog___2 *); ++ bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog___2 *, struct bpf_insn_access_aux *); ++ int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog___2 *); ++ int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); ++ u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog___2 *, u32 *); ++}; ++ ++struct bpf_prog_offload___2 { ++ struct bpf_prog___2 *prog; ++ struct net_device___2 *netdev; ++ void *dev_priv; ++ struct list_head offloads; ++ bool dev_state; ++ const struct bpf_prog_offload_ops *dev_ops; ++ void *jited_image; ++ u32 jited_len; ++}; ++ ++struct bpf_prog_aux___2 { ++ atomic_t refcnt; ++ u32 used_map_cnt; ++ u32 max_ctx_offset; ++ u32 stack_depth; ++ u32 id; ++ u32 func_cnt; ++ bool offload_requested; ++ struct bpf_prog___2 **func; ++ void *jit_data; ++ struct latch_tree_node ksym_tnode; ++ struct list_head ksym_lnode; ++ const struct bpf_prog_ops___2 *ops; ++ struct bpf_map___2 **used_maps; ++ struct bpf_prog___2 *prog; ++ struct user_struct___2 *user; ++ u64 load_time; ++ struct bpf_map___2 *cgroup_storage; ++ char name[16]; ++ void *security; ++ struct bpf_prog_offload___2 *offload; ++ union { ++ struct work_struct work; ++ struct callback_head rcu; ++ }; ++}; ++ ++struct nf_bridge_info___2; ++ ++struct sk_buff___2 { ++ union { ++ struct { ++ struct sk_buff___2 *next; ++ struct sk_buff___2 *prev; ++ union { ++ struct net_device___2 *dev; ++ long unsigned int dev_scratch; ++ }; ++ }; ++ struct rb_node rbnode; ++ struct list_head list; ++ }; ++ union { ++ struct sock *sk; ++ int ip_defrag_offset; ++ }; ++ union { ++ ktime_t tstamp; ++ u64 skb_mstamp; ++ }; ++ char cb[48]; ++ union { ++ struct { ++ long unsigned int _skb_refdst; ++ void (*destructor)(struct sk_buff___2 *); ++ }; ++ struct list_head tcp_tsorted_anchor; ++ }; ++ struct sec_path *sp; ++ long unsigned int _nfct; ++ struct nf_bridge_info___2 *nf_bridge; ++ unsigned int len; ++ unsigned int data_len; ++ __u16 mac_len; ++ __u16 hdr_len; ++ __u16 queue_mapping; ++ __u8 __cloned_offset[0]; ++ __u8 cloned: 1; ++ __u8 nohdr: 1; ++ __u8 fclone: 2; ++ __u8 peeked: 1; ++ __u8 head_frag: 1; ++ __u8 xmit_more: 1; ++ __u8 pfmemalloc: 1; ++ __u32 headers_start[0]; ++ __u8 __pkt_type_offset[0]; ++ __u8 pkt_type: 3; ++ __u8 ignore_df: 1; ++ __u8 nf_trace: 1; ++ __u8 ip_summed: 2; ++ __u8 ooo_okay: 1; ++ __u8 l4_hash: 1; ++ __u8 sw_hash: 1; ++ __u8 wifi_acked_valid: 1; ++ __u8 wifi_acked: 1; ++ __u8 no_fcs: 1; ++ __u8 encapsulation: 1; ++ __u8 encap_hdr_csum: 1; ++ __u8 csum_valid: 1; ++ __u8 csum_complete_sw: 1; ++ __u8 csum_level: 2; ++ __u8 csum_not_inet: 1; ++ __u8 dst_pending_confirm: 1; ++ __u8 ndisc_nodetype: 2; ++ __u8 ipvs_property: 1; ++ __u8 inner_protocol_type: 1; ++ __u8 remcsum_offload: 1; ++ __u8 offload_fwd_mark: 1; ++ __u8 offload_mr_fwd_mark: 1; ++ __u8 tc_skip_classify: 1; ++ __u8 tc_at_ingress: 1; ++ __u8 tc_redirected: 1; ++ __u8 tc_from_ingress: 1; ++ __u8 decrypted: 1; ++ __u16 tc_index; ++ union { ++ __wsum csum; ++ struct { ++ __u16 csum_start; ++ __u16 csum_offset; ++ }; ++ }; ++ __u32 priority; ++ int skb_iif; ++ __u32 hash; ++ __be16 vlan_proto; ++ __u16 vlan_tci; ++ union { ++ unsigned int napi_id; ++ unsigned int sender_cpu; ++ }; ++ __u32 secmark; ++ union { ++ __u32 mark; ++ __u32 reserved_tailroom; ++ }; ++ union { ++ __be16 inner_protocol; ++ __u8 inner_ipproto; ++ }; ++ __u16 inner_transport_header; ++ __u16 inner_network_header; ++ __u16 inner_mac_header; ++ __be16 protocol; ++ __u16 transport_header; ++ __u16 network_header; ++ __u16 mac_header; ++ __u32 headers_end[0]; ++ sk_buff_data_t tail; ++ sk_buff_data_t end; ++ unsigned char *head; ++ unsigned char *data; ++ unsigned int truesize; ++ refcount_t users; ++}; ++ ++struct pipe_buf_operations___2; ++ ++struct pipe_buffer___2 { ++ struct page___2 *page; ++ unsigned int offset; ++ unsigned int len; ++ const struct pipe_buf_operations___2 *ops; ++ unsigned int flags; ++ long unsigned int private; ++}; ++ ++struct pipe_buf_operations___2 { ++ int can_merge; ++ int (*confirm)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *); ++ void (*release)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *); ++ int (*steal)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *); ++ bool (*get)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *); ++}; ++ ++struct nf_bridge_info___2 { ++ refcount_t use; ++ enum { ++ BRNF_PROTO_UNCHANGED___2 = 0, ++ BRNF_PROTO_8021Q___2 = 1, ++ BRNF_PROTO_PPPOE___2 = 2, ++ } orig_proto: 8; ++ u8 pkt_otherhost: 1; ++ u8 in_prerouting: 1; ++ u8 bridged_dnat: 1; ++ __u16 frag_max_size; ++ struct net_device___2 *physindev; ++ struct net_device___2 *physoutdev; ++ union { ++ __be32 ipv4_daddr; ++ struct in6_addr ipv6_daddr; ++ char neigh_header[8]; ++ }; ++}; ++ ++struct ethtool_ops___2 { ++ int (*get_settings)(struct net_device___2 *, struct ethtool_cmd *); ++ int (*set_settings)(struct net_device___2 *, struct ethtool_cmd *); ++ void (*get_drvinfo)(struct net_device___2 *, struct ethtool_drvinfo *); ++ int (*get_regs_len)(struct net_device___2 *); ++ void (*get_regs)(struct net_device___2 *, struct ethtool_regs *, void *); ++ void (*get_wol)(struct net_device___2 *, struct ethtool_wolinfo *); ++ int (*set_wol)(struct net_device___2 *, struct ethtool_wolinfo *); ++ u32 (*get_msglevel)(struct net_device___2 *); ++ void (*set_msglevel)(struct net_device___2 *, u32); ++ int (*nway_reset)(struct net_device___2 *); ++ u32 (*get_link)(struct net_device___2 *); ++ int (*get_eeprom_len)(struct net_device___2 *); ++ int (*get_eeprom)(struct net_device___2 *, struct ethtool_eeprom *, u8 *); ++ int (*set_eeprom)(struct net_device___2 *, struct ethtool_eeprom *, u8 *); ++ int (*get_coalesce)(struct net_device___2 *, struct ethtool_coalesce *); ++ int (*set_coalesce)(struct net_device___2 *, struct ethtool_coalesce *); ++ void (*get_ringparam)(struct net_device___2 *, struct ethtool_ringparam *); ++ int (*set_ringparam)(struct net_device___2 *, struct ethtool_ringparam *); ++ void (*get_pauseparam)(struct net_device___2 *, struct ethtool_pauseparam *); ++ int (*set_pauseparam)(struct net_device___2 *, struct ethtool_pauseparam *); ++ void (*self_test)(struct net_device___2 *, struct ethtool_test *, u64 *); ++ void (*get_strings)(struct net_device___2 *, u32, u8 *); ++ int (*set_phys_id)(struct net_device___2 *, enum ethtool_phys_id_state); ++ void (*get_ethtool_stats)(struct net_device___2 *, struct ethtool_stats *, u64 *); ++ int (*begin)(struct net_device___2 *); ++ void (*complete)(struct net_device___2 *); ++ u32 (*get_priv_flags)(struct net_device___2 *); ++ int (*set_priv_flags)(struct net_device___2 *, u32); ++ int (*get_sset_count)(struct net_device___2 *, int); ++ int (*get_rxnfc)(struct net_device___2 *, struct ethtool_rxnfc *, u32 *); ++ int (*set_rxnfc)(struct net_device___2 *, struct ethtool_rxnfc *); ++ int (*flash_device)(struct net_device___2 *, struct ethtool_flash *); ++ int (*reset)(struct net_device___2 *, u32 *); ++ u32 (*get_rxfh_key_size)(struct net_device___2 *); ++ u32 (*get_rxfh_indir_size)(struct net_device___2 *); ++ int (*get_rxfh)(struct net_device___2 *, u32 *, u8 *, u8 *); ++ int (*set_rxfh)(struct net_device___2 *, const u32 *, const u8 *, const u8); ++ int (*get_rxfh_context)(struct net_device___2 *, u32 *, u8 *, u8 *, u32); ++ int (*set_rxfh_context)(struct net_device___2 *, const u32 *, const u8 *, const u8, u32 *, bool); ++ void (*get_channels)(struct net_device___2 *, struct ethtool_channels *); ++ int (*set_channels)(struct net_device___2 *, struct ethtool_channels *); ++ int (*get_dump_flag)(struct net_device___2 *, struct ethtool_dump *); ++ int (*get_dump_data)(struct net_device___2 *, struct ethtool_dump *, void *); ++ int (*set_dump)(struct net_device___2 *, struct ethtool_dump *); ++ int (*get_ts_info)(struct net_device___2 *, struct ethtool_ts_info *); ++ int (*get_module_info)(struct net_device___2 *, struct ethtool_modinfo *); ++ int (*get_module_eeprom)(struct net_device___2 *, struct ethtool_eeprom *, u8 *); ++ int (*get_eee)(struct net_device___2 *, struct ethtool_eee *); ++ int (*set_eee)(struct net_device___2 *, struct ethtool_eee *); ++ int (*get_tunable)(struct net_device___2 *, const struct ethtool_tunable *, void *); ++ int (*set_tunable)(struct net_device___2 *, const struct ethtool_tunable *, const void *); ++ int (*get_per_queue_coalesce)(struct net_device___2 *, u32, struct ethtool_coalesce *); ++ int (*set_per_queue_coalesce)(struct net_device___2 *, u32, struct ethtool_coalesce *); ++ int (*get_link_ksettings)(struct net_device___2 *, struct ethtool_link_ksettings *); ++ int (*set_link_ksettings)(struct net_device___2 *, const struct ethtool_link_ksettings *); ++ int (*get_fecparam)(struct net_device___2 *, struct ethtool_fecparam *); ++ int (*set_fecparam)(struct net_device___2 *, struct ethtool_fecparam *); ++ void (*get_ethtool_phy_stats)(struct net_device___2 *, struct ethtool_stats *, u64 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++ long unsigned int kabi_reserved17; ++ long unsigned int kabi_reserved18; ++ long unsigned int kabi_reserved19; ++ long unsigned int kabi_reserved20; ++ long unsigned int kabi_reserved21; ++ long unsigned int kabi_reserved22; ++ long unsigned int kabi_reserved23; ++ long unsigned int kabi_reserved24; ++ long unsigned int kabi_reserved25; ++ long unsigned int kabi_reserved26; ++ long unsigned int kabi_reserved27; ++ long unsigned int kabi_reserved28; ++ long unsigned int kabi_reserved29; ++ long unsigned int kabi_reserved30; ++ long unsigned int kabi_reserved31; ++ long unsigned int kabi_reserved32; ++}; ++ ++struct inet_frag_queue___2; ++ ++struct inet_frags___2 { ++ unsigned int qsize; ++ void (*constructor)(struct inet_frag_queue___2 *, const void *); ++ void (*destructor)(struct inet_frag_queue___2 *); ++ void (*frag_expire)(struct timer_list *); ++ struct kmem_cache *frags_cachep; ++ const char *frags_cache_name; ++ struct rhashtable_params rhash_params; ++}; ++ ++struct inet_frag_queue___2 { ++ struct rhash_head node; ++ union { ++ struct frag_v4_compare_key v4; ++ struct frag_v6_compare_key v6; ++ } key; ++ struct timer_list timer; ++ spinlock_t lock; ++ refcount_t refcnt; ++ struct sk_buff___2 *fragments; ++ struct rb_root rb_fragments; ++ struct sk_buff___2 *fragments_tail; ++ struct sk_buff___2 *last_run_head; ++ ktime_t stamp; ++ int len; ++ int meat; ++ __u8 flags; ++ u16 max_size; ++ struct netns_frags___2 *net; ++ struct callback_head rcu; ++}; ++ ++struct pernet_operations___2 { ++ struct list_head list; ++ int (*init)(struct net___2 *); ++ void (*exit)(struct net___2 *); ++ void (*exit_batch)(struct list_head *); ++ unsigned int *id; ++ size_t size; ++}; ++ ++struct dcbnl_rtnl_ops___2 { ++ int (*ieee_getets)(struct net_device___2 *, struct ieee_ets *); ++ int (*ieee_setets)(struct net_device___2 *, struct ieee_ets *); ++ int (*ieee_getmaxrate)(struct net_device___2 *, struct ieee_maxrate *); ++ int (*ieee_setmaxrate)(struct net_device___2 *, struct ieee_maxrate *); ++ int (*ieee_getqcn)(struct net_device___2 *, struct ieee_qcn *); ++ int (*ieee_setqcn)(struct net_device___2 *, struct ieee_qcn *); ++ int (*ieee_getqcnstats)(struct net_device___2 *, struct ieee_qcn_stats *); ++ int (*ieee_getpfc)(struct net_device___2 *, struct ieee_pfc *); ++ int (*ieee_setpfc)(struct net_device___2 *, struct ieee_pfc *); ++ int (*ieee_getapp)(struct net_device___2 *, struct dcb_app *); ++ int (*ieee_setapp)(struct net_device___2 *, struct dcb_app *); ++ int (*ieee_delapp)(struct net_device___2 *, struct dcb_app *); ++ int (*ieee_peer_getets)(struct net_device___2 *, struct ieee_ets *); ++ int (*ieee_peer_getpfc)(struct net_device___2 *, struct ieee_pfc *); ++ u8 (*getstate)(struct net_device___2 *); ++ u8 (*setstate)(struct net_device___2 *, u8); ++ void (*getpermhwaddr)(struct net_device___2 *, u8 *); ++ void (*setpgtccfgtx)(struct net_device___2 *, int, u8, u8, u8, u8); ++ void (*setpgbwgcfgtx)(struct net_device___2 *, int, u8); ++ void (*setpgtccfgrx)(struct net_device___2 *, int, u8, u8, u8, u8); ++ void (*setpgbwgcfgrx)(struct net_device___2 *, int, u8); ++ void (*getpgtccfgtx)(struct net_device___2 *, int, u8 *, u8 *, u8 *, u8 *); ++ void (*getpgbwgcfgtx)(struct net_device___2 *, int, u8 *); ++ void (*getpgtccfgrx)(struct net_device___2 *, int, u8 *, u8 *, u8 *, u8 *); ++ void (*getpgbwgcfgrx)(struct net_device___2 *, int, u8 *); ++ void (*setpfccfg)(struct net_device___2 *, int, u8); ++ void (*getpfccfg)(struct net_device___2 *, int, u8 *); ++ u8 (*setall)(struct net_device___2 *); ++ u8 (*getcap)(struct net_device___2 *, int, u8 *); ++ int (*getnumtcs)(struct net_device___2 *, int, u8 *); ++ int (*setnumtcs)(struct net_device___2 *, int, u8); ++ u8 (*getpfcstate)(struct net_device___2 *); ++ void (*setpfcstate)(struct net_device___2 *, u8); ++ void (*getbcncfg)(struct net_device___2 *, int, u32 *); ++ void (*setbcncfg)(struct net_device___2 *, int, u32); ++ void (*getbcnrp)(struct net_device___2 *, int, u8 *); ++ void (*setbcnrp)(struct net_device___2 *, int, u8); ++ int (*setapp)(struct net_device___2 *, u8, u16, u8); ++ int (*getapp)(struct net_device___2 *, u8, u16); ++ u8 (*getfeatcfg)(struct net_device___2 *, int, u8 *); ++ u8 (*setfeatcfg)(struct net_device___2 *, int, u8); ++ u8 (*getdcbx)(struct net_device___2 *); ++ u8 (*setdcbx)(struct net_device___2 *, u8); ++ int (*peer_getappinfo)(struct net_device___2 *, struct dcb_peer_app_info *, u16 *); ++ int (*peer_getapptable)(struct net_device___2 *, struct dcb_app *); ++ int (*cee_peer_getpg)(struct net_device___2 *, struct cee_pg *); ++ int (*cee_peer_getpfc)(struct net_device___2 *, struct cee_pfc *); ++ int (*dcbnl_getbuffer)(struct net_device___2 *, struct dcbnl_buffer *); ++ int (*dcbnl_setbuffer)(struct net_device___2 *, struct dcbnl_buffer *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++}; ++ ++struct xdp_rxq_info___2 { ++ struct net_device___2 *dev; ++ u32 queue_index; ++ u32 reg_state; ++ struct xdp_mem_info mem; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct xdp_frame___2 { ++ void *data; ++ u16 len; ++ u16 headroom; ++ u16 metasize; ++ struct xdp_mem_info mem; ++ struct net_device___2 *dev_rx; ++}; ++ ++struct netlink_callback___2 { ++ struct sk_buff___2 *skb; ++ const struct nlmsghdr *nlh; ++ int (*dump)(struct sk_buff___2 *, struct netlink_callback___2 *); ++ int (*done)(struct netlink_callback___2 *); ++ void *data; ++ struct module___2 *module; ++ u16 family; ++ u16 min_dump_alloc; ++ unsigned int prev_seq; ++ unsigned int seq; ++ long int args[6]; ++}; ++ ++struct header_ops___2 { ++ int (*create)(struct sk_buff___2 *, struct net_device___2 *, short unsigned int, const void *, const void *, unsigned int); ++ int (*parse)(const struct sk_buff___2 *, unsigned char *); ++ int (*cache)(const struct neighbour *, struct hh_cache *, __be16); ++ void (*cache_update)(struct hh_cache *, const struct net_device___2 *, const unsigned char *); ++ bool (*validate)(const char *, unsigned int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++}; ++ ++struct napi_struct___2 { ++ struct list_head poll_list; ++ long unsigned int state; ++ int weight; ++ long unsigned int gro_bitmask; ++ int (*poll)(struct napi_struct___2 *, int); ++ int poll_owner; ++ struct net_device___2 *dev; ++ struct gro_list gro_hash[8]; ++ struct sk_buff___2 *skb; ++ struct hrtimer timer; ++ struct list_head dev_list; ++ struct hlist_node napi_hash_node; ++ unsigned int napi_id; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct netdev_queue___2 { ++ struct net_device___2 *dev; ++ struct Qdisc___2 *qdisc; ++ struct Qdisc___2 *qdisc_sleeping; ++ struct kobject___3 kobj; ++ int numa_node; ++ long unsigned int tx_maxrate; ++ long unsigned int trans_timeout; ++ struct net_device___2 *sb_dev; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t _xmit_lock; ++ int xmit_lock_owner; ++ long unsigned int trans_start; ++ long unsigned int state; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct dql dql; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct qdisc_skb_head___2 { ++ struct sk_buff___2 *head; ++ struct sk_buff___2 *tail; ++ union { ++ u32 qlen; ++ atomic_t atomic_qlen; ++ }; ++ spinlock_t lock; ++}; ++ ++struct Qdisc_ops___2; ++ ++struct Qdisc___2 { ++ int (*enqueue)(struct sk_buff___2 *, struct Qdisc___2 *, struct sk_buff___2 **); ++ struct sk_buff___2 * (*dequeue)(struct Qdisc___2 *); ++ unsigned int flags; ++ u32 limit; ++ const struct Qdisc_ops___2 *ops; ++ struct qdisc_size_table *stab; ++ struct hlist_node hash; ++ u32 handle; ++ u32 parent; ++ struct netdev_queue___2 *dev_queue; ++ struct net_rate_estimator *rate_est; ++ struct gnet_stats_basic_cpu *cpu_bstats; ++ struct gnet_stats_queue *cpu_qstats; ++ int padded; ++ refcount_t refcnt; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sk_buff_head___2 gso_skb; ++ struct qdisc_skb_head___2 q; ++ struct gnet_stats_basic_packed bstats; ++ seqcount_t running; ++ struct gnet_stats_queue qstats; ++ long unsigned int state; ++ struct Qdisc___2 *next_sched; ++ struct sk_buff_head___2 skb_bad_txq; ++ spinlock_t busylock; ++ spinlock_t seqlock; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct netdev_rx_queue___2 { ++ struct rps_map *rps_map; ++ struct rps_dev_flow_table *rps_flow_table; ++ struct kobject___3 kobj; ++ struct net_device___2 *dev; ++ long: 64; ++ struct xdp_rxq_info___2 xdp_rxq; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++typedef u16 (*select_queue_fallback_t___2)(struct net_device___2 *, struct sk_buff___2 *, struct net_device___2 *); ++ ++struct netdev_bpf___2 { ++ enum bpf_netdev_command command; ++ union { ++ struct { ++ u32 flags; ++ struct bpf_prog___2 *prog; ++ struct netlink_ext_ack *extack; ++ }; ++ struct { ++ u32 prog_id; ++ u32 prog_flags; ++ }; ++ struct { ++ struct bpf_prog___2 *prog; ++ const struct bpf_prog_offload_ops *ops; ++ } verifier; ++ struct { ++ struct bpf_prog___2 *prog; ++ } offload; ++ struct { ++ struct bpf_offloaded_map___2 *offmap; ++ }; ++ struct { ++ struct xdp_umem *umem; ++ u16 queue_id; ++ } xsk; ++ }; ++}; ++ ++struct xfrmdev_ops___2 { ++ int (*xdo_dev_state_add)(struct xfrm_state *); ++ void (*xdo_dev_state_delete)(struct xfrm_state *); ++ void (*xdo_dev_state_free)(struct xfrm_state *); ++ bool (*xdo_dev_offload_ok)(struct sk_buff___2 *, struct xfrm_state *); ++ void (*xdo_dev_state_advance_esn)(struct xfrm_state *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct tlsdev_ops___2 { ++ int (*tls_dev_add)(struct net_device___2 *, struct sock *, enum tls_offload_ctx_dir, struct tls_crypto_info *, u32); ++ void (*tls_dev_del)(struct net_device___2 *, struct tls_context *, enum tls_offload_ctx_dir); ++ void (*tls_dev_resync_rx)(struct net_device___2 *, struct sock *, u32, u64); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++}; ++ ++struct net_device_ops___2 { ++ int (*ndo_init)(struct net_device___2 *); ++ void (*ndo_uninit)(struct net_device___2 *); ++ int (*ndo_open)(struct net_device___2 *); ++ int (*ndo_stop)(struct net_device___2 *); ++ netdev_tx_t (*ndo_start_xmit)(struct sk_buff___2 *, struct net_device___2 *); ++ netdev_features_t (*ndo_features_check)(struct sk_buff___2 *, struct net_device___2 *, netdev_features_t); ++ u16 (*ndo_select_queue)(struct net_device___2 *, struct sk_buff___2 *, struct net_device___2 *, select_queue_fallback_t___2); ++ void (*ndo_change_rx_flags)(struct net_device___2 *, int); ++ void (*ndo_set_rx_mode)(struct net_device___2 *); ++ int (*ndo_set_mac_address)(struct net_device___2 *, void *); ++ int (*ndo_validate_addr)(struct net_device___2 *); ++ int (*ndo_do_ioctl)(struct net_device___2 *, struct ifreq *, int); ++ int (*ndo_set_config)(struct net_device___2 *, struct ifmap *); ++ int (*ndo_change_mtu)(struct net_device___2 *, int); ++ int (*ndo_neigh_setup)(struct net_device___2 *, struct neigh_parms *); ++ void (*ndo_tx_timeout)(struct net_device___2 *); ++ void (*ndo_get_stats64)(struct net_device___2 *, struct rtnl_link_stats64 *); ++ bool (*ndo_has_offload_stats)(const struct net_device___2 *, int); ++ int (*ndo_get_offload_stats)(int, const struct net_device___2 *, void *); ++ struct net_device_stats * (*ndo_get_stats)(struct net_device___2 *); ++ int (*ndo_vlan_rx_add_vid)(struct net_device___2 *, __be16, u16); ++ int (*ndo_vlan_rx_kill_vid)(struct net_device___2 *, __be16, u16); ++ void (*ndo_poll_controller)(struct net_device___2 *); ++ int (*ndo_netpoll_setup)(struct net_device___2 *, struct netpoll_info *); ++ void (*ndo_netpoll_cleanup)(struct net_device___2 *); ++ int (*ndo_set_vf_mac)(struct net_device___2 *, int, u8 *); ++ int (*ndo_set_vf_vlan)(struct net_device___2 *, int, u16, u8, __be16); ++ int (*ndo_set_vf_rate)(struct net_device___2 *, int, int, int); ++ int (*ndo_set_vf_spoofchk)(struct net_device___2 *, int, bool); ++ int (*ndo_set_vf_trust)(struct net_device___2 *, int, bool); ++ int (*ndo_get_vf_config)(struct net_device___2 *, int, struct ifla_vf_info *); ++ int (*ndo_set_vf_link_state)(struct net_device___2 *, int, int); ++ int (*ndo_get_vf_stats)(struct net_device___2 *, int, struct ifla_vf_stats *); ++ int (*ndo_set_vf_port)(struct net_device___2 *, int, struct nlattr **); ++ int (*ndo_get_vf_port)(struct net_device___2 *, int, struct sk_buff___2 *); ++ int (*ndo_set_vf_guid)(struct net_device___2 *, int, u64, int); ++ int (*ndo_set_vf_rss_query_en)(struct net_device___2 *, int, bool); ++ int (*ndo_setup_tc)(struct net_device___2 *, enum tc_setup_type, void *); ++ int (*ndo_fcoe_enable)(struct net_device___2 *); ++ int (*ndo_fcoe_disable)(struct net_device___2 *); ++ int (*ndo_fcoe_ddp_setup)(struct net_device___2 *, u16, struct scatterlist *, unsigned int); ++ int (*ndo_fcoe_ddp_done)(struct net_device___2 *, u16); ++ int (*ndo_fcoe_ddp_target)(struct net_device___2 *, u16, struct scatterlist *, unsigned int); ++ int (*ndo_fcoe_get_hbainfo)(struct net_device___2 *, struct netdev_fcoe_hbainfo *); ++ int (*ndo_fcoe_get_wwn)(struct net_device___2 *, u64 *, int); ++ int (*ndo_rx_flow_steer)(struct net_device___2 *, const struct sk_buff___2 *, u16, u32); ++ int (*ndo_add_slave)(struct net_device___2 *, struct net_device___2 *, struct netlink_ext_ack *); ++ int (*ndo_del_slave)(struct net_device___2 *, struct net_device___2 *); ++ netdev_features_t (*ndo_fix_features)(struct net_device___2 *, netdev_features_t); ++ int (*ndo_set_features)(struct net_device___2 *, netdev_features_t); ++ int (*ndo_neigh_construct)(struct net_device___2 *, struct neighbour *); ++ void (*ndo_neigh_destroy)(struct net_device___2 *, struct neighbour *); ++ int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device___2 *, const unsigned char *, u16, u16); ++ int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device___2 *, const unsigned char *, u16); ++ int (*ndo_fdb_dump)(struct sk_buff___2 *, struct netlink_callback___2 *, struct net_device___2 *, struct net_device___2 *, int *); ++ int (*ndo_bridge_setlink)(struct net_device___2 *, struct nlmsghdr *, u16); ++ int (*ndo_bridge_getlink)(struct sk_buff___2 *, u32, u32, struct net_device___2 *, u32, int); ++ int (*ndo_bridge_dellink)(struct net_device___2 *, struct nlmsghdr *, u16); ++ int (*ndo_change_carrier)(struct net_device___2 *, bool); ++ int (*ndo_get_phys_port_id)(struct net_device___2 *, struct netdev_phys_item_id *); ++ int (*ndo_get_phys_port_name)(struct net_device___2 *, char *, size_t); ++ void (*ndo_udp_tunnel_add)(struct net_device___2 *, struct udp_tunnel_info *); ++ void (*ndo_udp_tunnel_del)(struct net_device___2 *, struct udp_tunnel_info *); ++ void * (*ndo_dfwd_add_station)(struct net_device___2 *, struct net_device___2 *); ++ void (*ndo_dfwd_del_station)(struct net_device___2 *, void *); ++ int (*ndo_get_lock_subclass)(struct net_device___2 *); ++ int (*ndo_set_tx_maxrate)(struct net_device___2 *, int, u32); ++ int (*ndo_get_iflink)(const struct net_device___2 *); ++ int (*ndo_change_proto_down)(struct net_device___2 *, bool); ++ int (*ndo_fill_metadata_dst)(struct net_device___2 *, struct sk_buff___2 *); ++ void (*ndo_set_rx_headroom)(struct net_device___2 *, int); ++ int (*ndo_bpf)(struct net_device___2 *, struct netdev_bpf___2 *); ++ int (*ndo_xdp_xmit)(struct net_device___2 *, int, struct xdp_frame___2 **, u32); ++ int (*ndo_xsk_async_xmit)(struct net_device___2 *, u32); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++ long unsigned int kabi_reserved16; ++ long unsigned int kabi_reserved17; ++ long unsigned int kabi_reserved18; ++ long unsigned int kabi_reserved19; ++ long unsigned int kabi_reserved20; ++ long unsigned int kabi_reserved21; ++ long unsigned int kabi_reserved22; ++ long unsigned int kabi_reserved23; ++ long unsigned int kabi_reserved24; ++ long unsigned int kabi_reserved25; ++ long unsigned int kabi_reserved26; ++ long unsigned int kabi_reserved27; ++ long unsigned int kabi_reserved28; ++ long unsigned int kabi_reserved29; ++ long unsigned int kabi_reserved30; ++ long unsigned int kabi_reserved31; ++ long unsigned int kabi_reserved32; ++ long unsigned int kabi_reserved33; ++ long unsigned int kabi_reserved34; ++ long unsigned int kabi_reserved35; ++ long unsigned int kabi_reserved36; ++ long unsigned int kabi_reserved37; ++ long unsigned int kabi_reserved38; ++ long unsigned int kabi_reserved39; ++ long unsigned int kabi_reserved40; ++ long unsigned int kabi_reserved41; ++ long unsigned int kabi_reserved42; ++ long unsigned int kabi_reserved43; ++ long unsigned int kabi_reserved44; ++ long unsigned int kabi_reserved45; ++ long unsigned int kabi_reserved46; ++ long unsigned int kabi_reserved47; ++}; ++ ++struct tcf_proto___2; ++ ++struct mini_Qdisc___2 { ++ struct tcf_proto___2 *filter_list; ++ struct gnet_stats_basic_cpu *cpu_bstats; ++ struct gnet_stats_queue *cpu_qstats; ++ struct callback_head rcu; ++}; ++ ++struct rtnl_link_ops___2 { ++ struct list_head list; ++ const char *kind; ++ size_t priv_size; ++ void (*setup)(struct net_device___2 *); ++ unsigned int maxtype; ++ const struct nla_policy *policy; ++ int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ int (*newlink)(struct net___2 *, struct net_device___2 *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ int (*changelink)(struct net_device___2 *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ void (*dellink)(struct net_device___2 *, struct list_head *); ++ size_t (*get_size)(const struct net_device___2 *); ++ int (*fill_info)(struct sk_buff___2 *, const struct net_device___2 *); ++ size_t (*get_xstats_size)(const struct net_device___2 *); ++ int (*fill_xstats)(struct sk_buff___2 *, const struct net_device___2 *); ++ unsigned int (*get_num_tx_queues)(); ++ unsigned int (*get_num_rx_queues)(); ++ unsigned int slave_maxtype; ++ const struct nla_policy *slave_policy; ++ int (*slave_changelink)(struct net_device___2 *, struct net_device___2 *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); ++ size_t (*get_slave_size)(const struct net_device___2 *, const struct net_device___2 *); ++ int (*fill_slave_info)(struct sk_buff___2 *, const struct net_device___2 *, const struct net_device___2 *); ++ struct net___2 * (*get_link_net)(const struct net_device___2 *); ++ size_t (*get_linkxstats_size)(const struct net_device___2 *, int); ++ int (*fill_linkxstats)(struct sk_buff___2 *, const struct net_device___2 *, int *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++ long unsigned int kabi_reserved9; ++ long unsigned int kabi_reserved10; ++ long unsigned int kabi_reserved11; ++ long unsigned int kabi_reserved12; ++ long unsigned int kabi_reserved13; ++ long unsigned int kabi_reserved14; ++ long unsigned int kabi_reserved15; ++}; ++ ++struct softnet_data___2 { ++ struct list_head poll_list; ++ struct sk_buff_head___2 process_queue; ++ unsigned int processed; ++ unsigned int time_squeeze; ++ unsigned int received_rps; ++ struct softnet_data___2 *rps_ipi_list; ++ struct sd_flow_limit *flow_limit; ++ struct Qdisc___2 *output_queue; ++ struct Qdisc___2 **output_queue_tailp; ++ struct sk_buff___2 *completion_queue; ++ struct sk_buff_head___2 xfrm_backlog; ++ struct { ++ u16 recursion; ++ u8 more; ++ } xmit; ++ int: 32; ++ unsigned int input_queue_head; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ call_single_data_t csd; ++ struct softnet_data___2 *rps_ipi_next; ++ unsigned int cpu; ++ unsigned int input_queue_tail; ++ unsigned int dropped; ++ struct sk_buff_head___2 input_pkt_queue; ++ struct napi_struct___2 backlog; ++ long: 64; ++}; ++ ++struct gnet_dump___2 { ++ spinlock_t *lock; ++ struct sk_buff___2 *skb; ++ struct nlattr *tail; ++ int compat_tc_stats; ++ int compat_xstats; ++ int padattr; ++ void *xstats; ++ int xstats_len; ++ struct tc_stats tc_stats; ++}; ++ ++struct Qdisc_class_ops___2; ++ ++struct Qdisc_ops___2 { ++ struct Qdisc_ops___2 *next; ++ const struct Qdisc_class_ops___2 *cl_ops; ++ char id[16]; ++ int priv_size; ++ unsigned int static_flags; ++ int (*enqueue)(struct sk_buff___2 *, struct Qdisc___2 *, struct sk_buff___2 **); ++ struct sk_buff___2 * (*dequeue)(struct Qdisc___2 *); ++ struct sk_buff___2 * (*peek)(struct Qdisc___2 *); ++ int (*init)(struct Qdisc___2 *, struct nlattr *, struct netlink_ext_ack *); ++ void (*reset)(struct Qdisc___2 *); ++ void (*destroy)(struct Qdisc___2 *); ++ int (*change)(struct Qdisc___2 *, struct nlattr *, struct netlink_ext_ack *); ++ void (*attach)(struct Qdisc___2 *); ++ int (*change_tx_queue_len)(struct Qdisc___2 *, unsigned int); ++ int (*dump)(struct Qdisc___2 *, struct sk_buff___2 *); ++ int (*dump_stats)(struct Qdisc___2 *, struct gnet_dump___2 *); ++ void (*ingress_block_set)(struct Qdisc___2 *, u32); ++ void (*egress_block_set)(struct Qdisc___2 *, u32); ++ u32 (*ingress_block_get)(struct Qdisc___2 *); ++ u32 (*egress_block_get)(struct Qdisc___2 *); ++ struct module___2 *owner; ++}; ++ ++struct tcf_block___2; ++ ++struct Qdisc_class_ops___2 { ++ struct netdev_queue___2 * (*select_queue)(struct Qdisc___2 *, struct tcmsg *); ++ int (*graft)(struct Qdisc___2 *, long unsigned int, struct Qdisc___2 *, struct Qdisc___2 **, struct netlink_ext_ack *); ++ struct Qdisc___2 * (*leaf)(struct Qdisc___2 *, long unsigned int); ++ void (*qlen_notify)(struct Qdisc___2 *, long unsigned int); ++ long unsigned int (*find)(struct Qdisc___2 *, u32); ++ int (*change)(struct Qdisc___2 *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); ++ int (*delete)(struct Qdisc___2 *, long unsigned int); ++ void (*walk)(struct Qdisc___2 *, struct qdisc_walker *); ++ struct tcf_block___2 * (*tcf_block)(struct Qdisc___2 *, long unsigned int, struct netlink_ext_ack *); ++ long unsigned int (*bind_tcf)(struct Qdisc___2 *, long unsigned int, u32); ++ void (*unbind_tcf)(struct Qdisc___2 *, long unsigned int); ++ int (*dump)(struct Qdisc___2 *, long unsigned int, struct sk_buff___2 *, struct tcmsg *); ++ int (*dump_stats)(struct Qdisc___2 *, long unsigned int, struct gnet_dump___2 *); ++}; ++ ++struct tcf_chain___2; ++ ++struct tcf_block___2 { ++ struct list_head chain_list; ++ u32 index; ++ unsigned int refcnt; ++ struct net___2 *net; ++ struct Qdisc___2 *q; ++ struct list_head cb_list; ++ struct list_head owner_list; ++ bool keep_dst; ++ unsigned int offloadcnt; ++ unsigned int nooffloaddevcnt; ++ struct { ++ struct tcf_chain___2 *chain; ++ struct list_head filter_chain_list; ++ } chain0; ++}; ++ ++struct tcf_result___2; ++ ++struct tcf_proto_ops___2; ++ ++struct tcf_proto___2 { ++ struct tcf_proto___2 *next; ++ void *root; ++ int (*classify)(struct sk_buff___2 *, const struct tcf_proto___2 *, struct tcf_result___2 *); ++ __be16 protocol; ++ u32 prio; ++ void *data; ++ const struct tcf_proto_ops___2 *ops; ++ struct tcf_chain___2 *chain; ++ struct callback_head rcu; ++}; ++ ++struct tcf_result___2 { ++ union { ++ struct { ++ long unsigned int class; ++ u32 classid; ++ }; ++ const struct tcf_proto___2 *goto_tp; ++ struct { ++ bool ingress; ++ struct gnet_stats_queue *qstats; ++ }; ++ }; ++}; ++ ++struct tcf_proto_ops___2 { ++ struct list_head head; ++ char kind[16]; ++ int (*classify)(struct sk_buff___2 *, const struct tcf_proto___2 *, struct tcf_result___2 *); ++ int (*init)(struct tcf_proto___2 *); ++ void (*destroy)(struct tcf_proto___2 *, struct netlink_ext_ack *); ++ void * (*get)(struct tcf_proto___2 *, u32); ++ int (*change)(struct net___2 *, struct sk_buff___2 *, struct tcf_proto___2 *, long unsigned int, u32, struct nlattr **, void **, bool, struct netlink_ext_ack *); ++ int (*delete)(struct tcf_proto___2 *, void *, bool *, struct netlink_ext_ack *); ++ void (*walk)(struct tcf_proto___2 *, struct tcf_walker *); ++ int (*reoffload)(struct tcf_proto___2 *, bool, tc_setup_cb_t *, void *, struct netlink_ext_ack *); ++ void (*bind_class)(void *, u32, long unsigned int); ++ void * (*tmplt_create)(struct net___2 *, struct tcf_chain___2 *, struct nlattr **, struct netlink_ext_ack *); ++ void (*tmplt_destroy)(void *); ++ int (*dump)(struct net___2 *, struct tcf_proto___2 *, void *, struct sk_buff___2 *, struct tcmsg *); ++ int (*tmplt_dump)(struct sk_buff___2 *, struct net___2 *, void *); ++ struct module___2 *owner; ++}; ++ ++struct tcf_chain___2 { ++ struct tcf_proto___2 *filter_chain; ++ struct list_head list; ++ struct tcf_block___2 *block; ++ u32 index; ++ unsigned int refcnt; ++ unsigned int action_refcnt; ++ bool explicitly_created; ++ const struct tcf_proto_ops___2 *tmplt_ops; ++ void *tmplt_priv; ++}; ++ ++struct bpf_redirect_info___2 { ++ u32 ifindex; ++ u32 flags; ++ struct bpf_map___2 *map; ++ struct bpf_map___2 *map_to_flush; ++ u32 kern_flags; ++}; ++ ++typedef int (*remote_function_f)(void *); ++ ++struct remote_function_call { ++ struct task_struct___2 *p; ++ remote_function_f func; ++ void *info; ++ int ret; ++}; ++ ++typedef void (*event_f)(struct perf_event___2 *, struct perf_cpu_context___2 *, struct perf_event_context___2 *, void *); ++ ++struct event_function_struct { ++ struct perf_event___2 *event; ++ event_f func; ++ void *data; ++}; ++ ++enum event_type_t { ++ EVENT_FLEXIBLE = 1, ++ EVENT_PINNED = 2, ++ EVENT_TIME = 4, ++ EVENT_CPU = 8, ++ EVENT_ALL = 3, ++}; ++ ++struct stop_event_data { ++ struct perf_event___2 *event; ++ unsigned int restart; ++}; ++ ++struct sched_in_data { ++ struct perf_event_context___2 *ctx; ++ struct perf_cpu_context___2 *cpuctx; ++ int can_add_hw; ++}; ++ ++struct perf_read_data { ++ struct perf_event___2 *event; ++ bool group; ++ int ret; ++}; ++ ++struct perf_read_event { ++ struct perf_event_header header; ++ u32 pid; ++ u32 tid; ++}; ++ ++typedef void perf_iterate_f(struct perf_event___2 *, void *); ++ ++struct remote_output { ++ struct ring_buffer___2 *rb; ++ int err; ++}; ++ ++struct perf_task_event { ++ struct task_struct___2 *task; ++ struct perf_event_context___2 *task_ctx; ++ struct { ++ struct perf_event_header header; ++ u32 pid; ++ u32 ppid; ++ u32 tid; ++ u32 ptid; ++ u64 time; ++ } event_id; ++}; ++ ++struct perf_comm_event { ++ struct task_struct___2 *task; ++ char *comm; ++ int comm_size; ++ struct { ++ struct perf_event_header header; ++ u32 pid; ++ u32 tid; ++ } event_id; ++}; ++ ++struct perf_namespaces_event { ++ struct task_struct___2 *task; ++ struct { ++ struct perf_event_header header; ++ u32 pid; ++ u32 tid; ++ u64 nr_namespaces; ++ struct perf_ns_link_info link_info[7]; ++ } event_id; ++}; ++ ++struct perf_mmap_event { ++ struct vm_area_struct___2 *vma; ++ const char *file_name; ++ int file_size; ++ int maj; ++ int min; ++ u64 ino; ++ u64 ino_generation; ++ u32 prot; ++ u32 flags; ++ struct { ++ struct perf_event_header header; ++ u32 pid; ++ u32 tid; ++ u64 start; ++ u64 len; ++ u64 pgoff; ++ } event_id; ++}; ++ ++struct perf_switch_event { ++ struct task_struct___2 *task; ++ struct task_struct___2 *next_prev; ++ struct { ++ struct perf_event_header header; ++ u32 next_prev_pid; ++ u32 next_prev_tid; ++ } event_id; ++}; ++ ++struct swevent_htable { ++ struct swevent_hlist *swevent_hlist; ++ struct mutex hlist_mutex; ++ int hlist_refcount; ++ int recursion[4]; ++}; ++ ++enum perf_probe_config { ++ PERF_PROBE_CONFIG_IS_RETPROBE = 1, ++}; ++ ++enum { ++ IF_ACT_NONE = 4294967295, ++ IF_ACT_FILTER = 0, ++ IF_ACT_START = 1, ++ IF_ACT_STOP = 2, ++ IF_SRC_FILE = 3, ++ IF_SRC_KERNEL = 4, ++ IF_SRC_FILEADDR = 5, ++ IF_SRC_KERNELADDR = 6, ++}; ++ ++enum { ++ IF_STATE_ACTION = 0, ++ IF_STATE_SOURCE = 1, ++ IF_STATE_END = 2, ++}; ++ ++struct perf_aux_event { ++ struct perf_event_header header; ++ u32 pid; ++ u32 tid; ++}; ++ ++struct perf_aux_event___2 { ++ struct perf_event_header header; ++ u64 offset; ++ u64 size; ++ u64 flags; ++}; ++ ++enum perf_callchain_context { ++ PERF_CONTEXT_HV = 4294967264, ++ PERF_CONTEXT_KERNEL = 4294967168, ++ PERF_CONTEXT_USER = 4294966784, ++ PERF_CONTEXT_GUEST = 4294965248, ++ PERF_CONTEXT_GUEST_KERNEL = 4294965120, ++ PERF_CONTEXT_GUEST_USER = 4294964736, ++ PERF_CONTEXT_MAX = 4294963201, ++}; ++ ++struct callchain_cpus_entries { ++ struct callback_head callback_head; ++ struct perf_callchain_entry *cpu_entries[0]; ++}; ++ ++struct bp_cpuinfo { ++ unsigned int cpu_pinned; ++ unsigned int *tsk_pinned; ++ unsigned int flexible; ++}; ++ ++struct bp_busy_slots { ++ unsigned int pinned; ++ unsigned int flexible; ++}; ++ ++typedef u32 uprobe_opcode_t; ++ ++struct uprobe { ++ struct rb_node rb_node; ++ atomic_t ref; ++ struct rw_semaphore register_rwsem; ++ struct rw_semaphore consumer_rwsem; ++ struct list_head pending_list; ++ struct uprobe_consumer *consumers; ++ struct inode___2 *inode; ++ loff_t offset; ++ long unsigned int flags; ++ struct arch_uprobe arch; ++}; ++ ++struct xol_area { ++ wait_queue_head_t wq; ++ atomic_t slot_count; ++ long unsigned int *bitmap; ++ struct vm_special_mapping xol_mapping; ++ struct page *pages[2]; ++ long unsigned int vaddr; ++}; ++ ++typedef long unsigned int vm_flags_t; ++ ++typedef int filler_t(void *, struct page *); ++ ++struct page_vma_mapped_walk { ++ struct page *page; ++ struct vm_area_struct *vma; ++ long unsigned int address; ++ pmd_t *pmd; ++ pte_t *pte; ++ spinlock_t *ptl; ++ unsigned int flags; ++}; ++ ++struct map_info { ++ struct map_info *next; ++ struct mm_struct *mm; ++ long unsigned int vaddr; ++}; ++ ++struct parallel_data; ++ ++struct padata_priv { ++ struct list_head list; ++ struct parallel_data *pd; ++ int cb_cpu; ++ int cpu; ++ int info; ++ void (*parallel)(struct padata_priv *); ++ void (*serial)(struct padata_priv *); ++}; ++ ++struct padata_cpumask { ++ cpumask_var_t pcpu; ++ cpumask_var_t cbcpu; ++}; ++ ++struct padata_instance; ++ ++struct padata_parallel_queue; ++ ++struct padata_serial_queue; ++ ++struct parallel_data { ++ struct padata_instance *pinst; ++ struct padata_parallel_queue *pqueue; ++ struct padata_serial_queue *squeue; ++ atomic_t reorder_objects; ++ atomic_t refcnt; ++ atomic_t seq_nr; ++ int cpu; ++ struct padata_cpumask cpumask; ++ struct work_struct reorder_work; ++ long: 64; ++ long: 64; ++ long: 64; ++ spinlock_t lock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct padata_list { ++ struct list_head list; ++ spinlock_t lock; ++}; ++ ++struct padata_serial_queue { ++ struct padata_list serial; ++ struct work_struct work; ++ struct parallel_data *pd; ++}; ++ ++struct padata_parallel_queue { ++ struct padata_list parallel; ++ struct padata_list reorder; ++ struct work_struct work; ++ atomic_t num_obj; ++ int cpu_index; ++}; ++ ++struct padata_instance { ++ struct hlist_node node; ++ struct workqueue_struct *wq; ++ struct parallel_data *pd; ++ struct padata_cpumask cpumask; ++ struct blocking_notifier_head___2 cpumask_change_notifier; ++ struct kobject___3 kobj; ++ struct mutex lock; ++ u8 flags; ++}; ++ ++struct padata_sysfs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct padata_instance *, struct attribute *, char *); ++ ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); ++}; ++ ++struct static_key_mod { ++ struct static_key_mod *next; ++ struct jump_entry *entries; ++ struct module___2 *mod; ++}; ++ ++struct static_key_deferred { ++ struct static_key key; ++ long unsigned int timeout; ++ struct delayed_work work; ++}; ++ ++struct trace_event_raw_context_tracking_user { ++ struct trace_entry ent; ++ int dummy; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_context_tracking_user {}; ++ ++enum rseq_cpu_id_state { ++ RSEQ_CPU_ID_UNINITIALIZED = 4294967295, ++ RSEQ_CPU_ID_REGISTRATION_FAILED = 4294967294, ++}; ++ ++enum rseq_flags { ++ RSEQ_FLAG_UNREGISTER = 1, ++}; ++ ++enum rseq_cs_flags { ++ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, ++ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, ++ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, ++}; ++ ++struct rseq_cs { ++ __u32 version; ++ __u32 flags; ++ __u64 start_ip; ++ __u64 post_commit_offset; ++ __u64 abort_ip; ++}; ++ ++struct trace_event_raw_rseq_update { ++ struct trace_entry ent; ++ s32 cpu_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_rseq_ip_fixup { ++ struct trace_entry ent; ++ long unsigned int regs_ip; ++ long unsigned int start_ip; ++ long unsigned int post_commit_offset; ++ long unsigned int abort_ip; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_rseq_update {}; ++ ++struct trace_event_data_offsets_rseq_ip_fixup {}; ++ ++struct __key_reference_with_attributes; ++ ++typedef struct __key_reference_with_attributes *key_ref_t; ++ ++typedef int (*request_key_actor_t___2)(struct key___2 *, void *); ++ ++struct pkcs7_message; ++ ++struct radix_tree_iter { ++ long unsigned int index; ++ long unsigned int next_index; ++ long unsigned int tags; ++ struct radix_tree_node *node; ++ unsigned int shift; ++}; ++ ++typedef void (*radix_tree_update_node_t)(struct radix_tree_node *); ++ ++enum { ++ RADIX_TREE_ITER_TAG_MASK = 15, ++ RADIX_TREE_ITER_TAGGED = 16, ++ RADIX_TREE_ITER_CONTIG = 32, ++}; ++ ++enum positive_aop_returns { ++ AOP_WRITEPAGE_ACTIVATE = 524288, ++ AOP_TRUNCATED_PAGE = 524289, ++}; ++ ++enum mapping_flags { ++ AS_EIO = 0, ++ AS_ENOSPC = 1, ++ AS_MM_ALL_LOCKS = 2, ++ AS_UNEVICTABLE = 3, ++ AS_EXITING = 4, ++ AS_NO_WRITEBACK_TAGS = 5, ++ AS_PERCPU_REF = 6, ++}; ++ ++struct pagevec { ++ unsigned char nr; ++ bool percpu_pvec_drained; ++ struct page *pages[15]; ++}; ++ ++struct fid { ++ union { ++ struct { ++ u32 ino; ++ u32 gen; ++ u32 parent_ino; ++ u32 parent_gen; ++ } i32; ++ struct { ++ u32 block; ++ u16 partref; ++ u16 parent_partref; ++ u32 generation; ++ u32 parent_block; ++ u32 parent_generation; ++ } udf; ++ __u32 raw[0]; ++ }; ++}; ++ ++typedef void (*poll_queue_proc___3)(struct file___2 *, wait_queue_head_t *, struct poll_table_struct *); ++ ++struct trace_event_raw_mm_filemap_op_page_cache { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ long unsigned int i_ino; ++ long unsigned int index; ++ dev_t s_dev; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_filemap_set_wb_err { ++ struct trace_entry ent; ++ long unsigned int i_ino; ++ dev_t s_dev; ++ errseq_t errseq; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_file_check_and_advance_wb_err { ++ struct trace_entry ent; ++ struct file___2 *file; ++ long unsigned int i_ino; ++ dev_t s_dev; ++ errseq_t old; ++ errseq_t new; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mm_filemap_op_page_cache {}; ++ ++struct trace_event_data_offsets_filemap_set_wb_err {}; ++ ++struct trace_event_data_offsets_file_check_and_advance_wb_err {}; ++ ++struct percpu_page { ++ struct percpu_ref ref; ++ struct page *page; ++}; ++ ++struct wait_page_key { ++ struct page *page; ++ int bit_nr; ++ int page_match; ++}; ++ ++struct wait_page_queue { ++ struct page *page; ++ int bit_nr; ++ wait_queue_entry_t wait; ++}; ++ ++struct kmem_cache_order_objects { ++ unsigned int x; ++}; ++ ++struct memcg_cache_array; ++ ++struct memcg_cache_params { ++ struct kmem_cache *root_cache; ++ union { ++ struct { ++ struct memcg_cache_array *memcg_caches; ++ struct list_head __root_caches_node; ++ struct list_head children; ++ bool dying; ++ }; ++ struct { ++ struct mem_cgroup *memcg; ++ struct list_head children_node; ++ struct list_head kmem_caches_node; ++ void (*deact_fn)(struct kmem_cache *); ++ union { ++ struct callback_head deact_rcu_head; ++ struct work_struct deact_work; ++ }; ++ }; ++ }; ++}; ++ ++struct kmem_cache_cpu; ++ ++struct kmem_cache_node; ++ ++struct kmem_cache { ++ struct kmem_cache_cpu *cpu_slab; ++ slab_flags_t flags; ++ long unsigned int min_partial; ++ unsigned int size; ++ unsigned int object_size; ++ unsigned int offset; ++ unsigned int cpu_partial; ++ struct kmem_cache_order_objects oo; ++ struct kmem_cache_order_objects max; ++ struct kmem_cache_order_objects min; ++ gfp_t allocflags; ++ int refcount; ++ void (*ctor)(void *); ++ unsigned int inuse; ++ unsigned int align; ++ unsigned int red_left_pad; ++ const char *name; ++ struct list_head list; ++ struct kobject kobj; ++ struct work_struct kobj_remove_work; ++ struct memcg_cache_params memcg_params; ++ unsigned int max_attr_size; ++ struct kset *memcg_kset; ++ unsigned int remote_node_defrag_ratio; ++ unsigned int *random_seq; ++ unsigned int useroffset; ++ unsigned int usersize; ++ struct kmem_cache_node *node[16]; ++}; ++ ++struct memcg_cache_array { ++ struct callback_head rcu; ++ struct kmem_cache *entries[0]; ++}; ++ ++struct kmem_cache_cpu { ++ void **freelist; ++ long unsigned int tid; ++ struct page *page; ++ struct page *partial; ++}; ++ ++struct kmem_cache_node { ++ spinlock_t list_lock; ++ long unsigned int nr_partial; ++ struct list_head partial; ++ atomic_long_t nr_slabs; ++ atomic_long_t total_objects; ++ struct list_head full; ++}; ++ ++enum slab_state { ++ DOWN = 0, ++ PARTIAL = 1, ++ PARTIAL_NODE = 2, ++ UP = 3, ++ FULL = 4, ++}; ++ ++struct kmalloc_info_struct { ++ const char *name; ++ unsigned int size; ++}; ++ ++struct oom_control { ++ struct zonelist *zonelist; ++ nodemask_t *nodemask; ++ struct mem_cgroup *memcg; ++ const gfp_t gfp_mask; ++ const int order; ++ long unsigned int totalpages; ++ struct task_struct *chosen; ++ long unsigned int chosen_points; ++}; ++ ++struct mmu_table_batch { ++ struct callback_head rcu; ++ unsigned int nr; ++ void *tables[0]; ++}; ++ ++struct mmu_gather_batch { ++ struct mmu_gather_batch *next; ++ unsigned int nr; ++ unsigned int max; ++ struct page *pages[0]; ++}; ++ ++struct mmu_gather { ++ struct mm_struct *mm; ++ struct mmu_table_batch *batch; ++ long unsigned int start; ++ long unsigned int end; ++ unsigned int fullmm: 1; ++ unsigned int need_flush_all: 1; ++ unsigned int freed_tables: 1; ++ unsigned int cleared_ptes: 1; ++ unsigned int cleared_pmds: 1; ++ unsigned int cleared_puds: 1; ++ unsigned int cleared_p4ds: 1; ++ struct mmu_gather_batch *active; ++ struct mmu_gather_batch local; ++ struct page *__pages[8]; ++ unsigned int batch_count; ++ int page_size; ++}; ++ ++enum compact_priority { ++ COMPACT_PRIO_SYNC_FULL = 0, ++ MIN_COMPACT_PRIORITY = 0, ++ COMPACT_PRIO_SYNC_LIGHT = 1, ++ MIN_COMPACT_COSTLY_PRIORITY = 1, ++ DEF_COMPACT_PRIORITY = 1, ++ COMPACT_PRIO_ASYNC = 2, ++ INIT_COMPACT_PRIORITY = 2, ++}; ++ ++enum compact_result { ++ COMPACT_NOT_SUITABLE_ZONE = 0, ++ COMPACT_SKIPPED = 1, ++ COMPACT_DEFERRED = 2, ++ COMPACT_INACTIVE = 2, ++ COMPACT_NO_SUITABLE_PAGE = 3, ++ COMPACT_CONTINUE = 4, ++ COMPACT_COMPLETE = 5, ++ COMPACT_PARTIAL_SKIPPED = 6, ++ COMPACT_CONTENDED = 7, ++ COMPACT_SUCCESS = 8, ++}; ++ ++struct trace_event_raw_oom_score_adj_update { ++ struct trace_entry ent; ++ pid_t pid; ++ char comm[16]; ++ short int oom_score_adj; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_reclaim_retry_zone { ++ struct trace_entry ent; ++ int node; ++ int zone_idx; ++ int order; ++ long unsigned int reclaimable; ++ long unsigned int available; ++ long unsigned int min_wmark; ++ int no_progress_loops; ++ bool wmark_check; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mark_victim { ++ struct trace_entry ent; ++ int pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_wake_reaper { ++ struct trace_entry ent; ++ int pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_start_task_reaping { ++ struct trace_entry ent; ++ int pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_finish_task_reaping { ++ struct trace_entry ent; ++ int pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_skip_task_reaping { ++ struct trace_entry ent; ++ int pid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_compact_retry { ++ struct trace_entry ent; ++ int order; ++ int priority; ++ int result; ++ int retries; ++ int max_retries; ++ bool ret; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_oom_score_adj_update {}; ++ ++struct trace_event_data_offsets_reclaim_retry_zone {}; ++ ++struct trace_event_data_offsets_mark_victim {}; ++ ++struct trace_event_data_offsets_wake_reaper {}; ++ ++struct trace_event_data_offsets_start_task_reaping {}; ++ ++struct trace_event_data_offsets_finish_task_reaping {}; ++ ++struct trace_event_data_offsets_skip_task_reaping {}; ++ ++struct trace_event_data_offsets_compact_retry {}; ++ ++enum oom_constraint { ++ CONSTRAINT_NONE = 0, ++ CONSTRAINT_CPUSET = 1, ++ CONSTRAINT_MEMORY_POLICY = 2, ++ CONSTRAINT_MEMCG = 3, ++}; ++ ++enum wb_congested_state { ++ WB_async_congested = 0, ++ WB_sync_congested = 1, ++}; ++ ++enum pageblock_bits { ++ PB_migrate = 0, ++ PB_migrate_end = 2, ++ PB_migrate_skip = 3, ++ NR_PAGEBLOCK_BITS = 4, ++}; ++ ++struct mminit_pfnnid_cache { ++ long unsigned int last_start; ++ long unsigned int last_end; ++ int last_nid; ++}; ++ ++struct page_frag_cache { ++ void *va; ++ __u32 offset; ++ unsigned int pagecnt_bias; ++ bool pfmemalloc; ++}; ++ ++enum { ++ BLK_RW_ASYNC = 0, ++ BLK_RW_SYNC = 1, ++}; ++ ++struct alloc_context { ++ struct zonelist *zonelist; ++ nodemask_t *nodemask; ++ struct zoneref *preferred_zoneref; ++ int migratetype; ++ enum zone_type high_zoneidx; ++ bool spread_dirty_pages; ++}; ++ ++struct compact_control { ++ struct list_head freepages; ++ struct list_head migratepages; ++ struct zone *zone; ++ long unsigned int nr_freepages; ++ long unsigned int nr_migratepages; ++ long unsigned int total_migrate_scanned; ++ long unsigned int total_free_scanned; ++ long unsigned int free_pfn; ++ long unsigned int migrate_pfn; ++ long unsigned int last_migrated_pfn; ++ const gfp_t gfp_mask; ++ int order; ++ int migratetype; ++ const unsigned int alloc_flags; ++ const int classzone_idx; ++ enum migrate_mode mode; ++ bool ignore_skip_hint; ++ bool no_set_skip_hint; ++ bool ignore_block_suitable; ++ bool direct_compaction; ++ bool whole_zone; ++ bool contended; ++ bool finishing_block; ++}; ++ ++enum mminit_level { ++ MMINIT_WARNING = 0, ++ MMINIT_VERIFY = 1, ++ MMINIT_TRACE = 2, ++}; ++ ++enum wb_state { ++ WB_registered = 0, ++ WB_writeback_running = 1, ++ WB_has_dirty_io = 2, ++ WB_start_all = 3, ++}; ++ ++struct wb_lock_cookie { ++ bool locked; ++ long unsigned int flags; ++}; ++ ++typedef int (*writepage_t)(struct page *, struct writeback_control *, void *); ++ ++struct dirty_throttle_control { ++ struct wb_domain *dom; ++ struct dirty_throttle_control *gdtc; ++ struct bdi_writeback *wb; ++ struct fprop_local_percpu *wb_completions; ++ long unsigned int avail; ++ long unsigned int dirty; ++ long unsigned int thresh; ++ long unsigned int bg_thresh; ++ long unsigned int wb_dirty; ++ long unsigned int wb_thresh; ++ long unsigned int wb_bg_thresh; ++ long unsigned int pos_ratio; ++}; ++ ++struct trace_event_raw_mm_lru_insertion { ++ struct trace_entry ent; ++ struct page *page; ++ long unsigned int pfn; ++ int lru; ++ long unsigned int flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_lru_activate { ++ struct trace_entry ent; ++ struct page *page; ++ long unsigned int pfn; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mm_lru_insertion {}; ++ ++struct trace_event_data_offsets_mm_lru_activate {}; ++ ++enum pgdat_flags { ++ PGDAT_CONGESTED = 0, ++ PGDAT_DIRTY = 1, ++ PGDAT_WRITEBACK = 2, ++ PGDAT_RECLAIM_LOCKED = 3, ++}; ++ ++struct reclaim_stat { ++ unsigned int nr_dirty; ++ unsigned int nr_unqueued_dirty; ++ unsigned int nr_congested; ++ unsigned int nr_writeback; ++ unsigned int nr_immediate; ++ unsigned int nr_activate; ++ unsigned int nr_ref_keep; ++ unsigned int nr_unmap_fail; ++}; ++ ++enum mem_cgroup_protection { ++ MEMCG_PROT_NONE = 0, ++ MEMCG_PROT_LOW = 1, ++ MEMCG_PROT_MIN = 2, ++}; ++ ++struct mem_cgroup_reclaim_cookie { ++ pg_data_t *pgdat; ++ int priority; ++ unsigned int generation; ++}; ++ ++enum ttu_flags { ++ TTU_MIGRATION = 1, ++ TTU_MUNLOCK = 2, ++ TTU_SPLIT_HUGE_PMD = 4, ++ TTU_IGNORE_MLOCK = 8, ++ TTU_IGNORE_ACCESS = 16, ++ TTU_IGNORE_HWPOISON = 32, ++ TTU_BATCH_FLUSH = 64, ++ TTU_RMAP_LOCKED = 128, ++ TTU_SPLIT_FREEZE = 256, ++}; ++ ++struct trace_event_raw_mm_vmscan_kswapd_sleep { ++ struct trace_entry ent; ++ int nid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_kswapd_wake { ++ struct trace_entry ent; ++ int nid; ++ int zid; ++ int order; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_wakeup_kswapd { ++ struct trace_entry ent; ++ int nid; ++ int zid; ++ int order; ++ gfp_t gfp_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { ++ struct trace_entry ent; ++ int order; ++ int may_writepage; ++ gfp_t gfp_flags; ++ int classzone_idx; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { ++ struct trace_entry ent; ++ long unsigned int nr_reclaimed; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_shrink_slab_start { ++ struct trace_entry ent; ++ struct shrinker *shr; ++ void *shrink; ++ int nid; ++ long int nr_objects_to_shrink; ++ gfp_t gfp_flags; ++ long unsigned int cache_items; ++ long long unsigned int delta; ++ long unsigned int total_scan; ++ int priority; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_shrink_slab_end { ++ struct trace_entry ent; ++ struct shrinker *shr; ++ int nid; ++ void *shrink; ++ long int unused_scan; ++ long int new_scan; ++ int retval; ++ long int total_scan; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_lru_isolate { ++ struct trace_entry ent; ++ int classzone_idx; ++ int order; ++ long unsigned int nr_requested; ++ long unsigned int nr_scanned; ++ long unsigned int nr_skipped; ++ long unsigned int nr_taken; ++ isolate_mode_t isolate_mode; ++ int lru; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_writepage { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ int reclaim_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_lru_shrink_inactive { ++ struct trace_entry ent; ++ int nid; ++ long unsigned int nr_scanned; ++ long unsigned int nr_reclaimed; ++ long unsigned int nr_dirty; ++ long unsigned int nr_writeback; ++ long unsigned int nr_congested; ++ long unsigned int nr_immediate; ++ long unsigned int nr_activate; ++ long unsigned int nr_ref_keep; ++ long unsigned int nr_unmap_fail; ++ int priority; ++ int reclaim_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_lru_shrink_active { ++ struct trace_entry ent; ++ int nid; ++ long unsigned int nr_taken; ++ long unsigned int nr_active; ++ long unsigned int nr_deactivated; ++ long unsigned int nr_referenced; ++ int priority; ++ int reclaim_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_vmscan_inactive_list_is_low { ++ struct trace_entry ent; ++ int nid; ++ int reclaim_idx; ++ long unsigned int total_inactive; ++ long unsigned int inactive; ++ long unsigned int total_active; ++ long unsigned int active; ++ long unsigned int ratio; ++ int reclaim_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; ++ ++struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; ++ ++struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; ++ ++struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; ++ ++struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; ++ ++struct trace_event_data_offsets_mm_shrink_slab_start {}; ++ ++struct trace_event_data_offsets_mm_shrink_slab_end {}; ++ ++struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; ++ ++struct trace_event_data_offsets_mm_vmscan_writepage {}; ++ ++struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; ++ ++struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; ++ ++struct trace_event_data_offsets_mm_vmscan_inactive_list_is_low {}; ++ ++struct scan_control { ++ long unsigned int nr_to_reclaim; ++ nodemask_t *nodemask; ++ struct mem_cgroup *target_mem_cgroup; ++ unsigned int may_writepage: 1; ++ unsigned int may_unmap: 1; ++ unsigned int may_swap: 1; ++ unsigned int memcg_low_reclaim: 1; ++ unsigned int memcg_low_skipped: 1; ++ unsigned int hibernation_mode: 1; ++ unsigned int compaction_ready: 1; ++ s8 order; ++ s8 priority; ++ s8 reclaim_idx; ++ gfp_t gfp_mask; ++ long unsigned int nr_scanned; ++ long unsigned int nr_reclaimed; ++ struct { ++ unsigned int dirty; ++ unsigned int unqueued_dirty; ++ unsigned int congested; ++ unsigned int writeback; ++ unsigned int immediate; ++ unsigned int file_taken; ++ unsigned int taken; ++ } nr; ++}; ++ ++typedef enum { ++ PAGE_KEEP = 0, ++ PAGE_ACTIVATE = 1, ++ PAGE_SUCCESS = 2, ++ PAGE_CLEAN = 3, ++} pageout_t; ++ ++enum page_references { ++ PAGEREF_RECLAIM = 0, ++ PAGEREF_RECLAIM_CLEAN = 1, ++ PAGEREF_KEEP = 2, ++ PAGEREF_ACTIVATE = 3, ++}; ++ ++enum scan_balance { ++ SCAN_EQUAL = 0, ++ SCAN_FRACT = 1, ++ SCAN_ANON = 2, ++ SCAN_FILE = 3, ++}; ++ ++enum transparent_hugepage_flag { ++ TRANSPARENT_HUGEPAGE_FLAG = 0, ++ TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG = 1, ++ TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG = 2, ++ TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG = 3, ++ TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG = 4, ++ TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG = 5, ++ TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG = 6, ++ TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG = 7, ++}; ++ ++enum { ++ MPOL_DEFAULT = 0, ++ MPOL_PREFERRED = 1, ++ MPOL_BIND = 2, ++ MPOL_INTERLEAVE = 3, ++ MPOL_LOCAL = 4, ++ MPOL_MAX = 5, ++}; ++ ++struct shared_policy { ++ struct rb_root root; ++ rwlock_t lock; ++}; ++ ++struct xattr { ++ const char *name; ++ void *value; ++ size_t value_len; ++}; ++ ++struct simple_xattrs { ++ struct list_head head; ++ spinlock_t lock; ++}; ++ ++struct simple_xattr { ++ struct list_head list; ++ char *name; ++ size_t size; ++ char value[0]; ++}; ++ ++enum fid_type { ++ FILEID_ROOT = 0, ++ FILEID_INO32_GEN = 1, ++ FILEID_INO32_GEN_PARENT = 2, ++ FILEID_BTRFS_WITHOUT_PARENT = 77, ++ FILEID_BTRFS_WITH_PARENT = 78, ++ FILEID_BTRFS_WITH_PARENT_ROOT = 79, ++ FILEID_UDF_WITHOUT_PARENT = 81, ++ FILEID_UDF_WITH_PARENT = 82, ++ FILEID_NILFS_WITHOUT_PARENT = 97, ++ FILEID_NILFS_WITH_PARENT = 98, ++ FILEID_FAT_WITHOUT_PARENT = 113, ++ FILEID_FAT_WITH_PARENT = 114, ++ FILEID_LUSTRE = 151, ++ FILEID_INVALID = 255, ++}; ++ ++struct shmem_inode_info { ++ spinlock_t lock; ++ unsigned int seals; ++ long unsigned int flags; ++ long unsigned int alloced; ++ long unsigned int swapped; ++ struct list_head shrinklist; ++ struct list_head swaplist; ++ struct shared_policy policy; ++ struct simple_xattrs xattrs; ++ struct inode___2 vfs_inode; ++}; ++ ++struct shmem_sb_info { ++ long unsigned int max_blocks; ++ struct percpu_counter used_blocks; ++ long unsigned int max_inodes; ++ long unsigned int free_inodes; ++ spinlock_t stat_lock; ++ umode_t mode; ++ unsigned char huge; ++ kuid_t uid; ++ kgid_t gid; ++ struct mempolicy *mpol; ++ spinlock_t shrinklist_lock; ++ struct list_head shrinklist; ++ long unsigned int shrinklist_len; ++}; ++ ++enum sgp_type { ++ SGP_READ = 0, ++ SGP_CACHE = 1, ++ SGP_NOHUGE = 2, ++ SGP_HUGE = 3, ++ SGP_WRITE = 4, ++ SGP_FALLOC = 5, ++}; ++ ++struct shmem_falloc { ++ wait_queue_head_t *waitq; ++ long unsigned int start; ++ long unsigned int next; ++ long unsigned int nr_falloced; ++ long unsigned int nr_unswapped; ++}; ++ ++struct contig_page_info { ++ long unsigned int free_pages; ++ long unsigned int free_blocks_total; ++ long unsigned int free_blocks_suitable; ++}; ++ ++enum writeback_stat_item { ++ NR_DIRTY_THRESHOLD = 0, ++ NR_DIRTY_BG_THRESHOLD = 1, ++ NR_VM_WRITEBACK_STAT_ITEMS = 2, ++}; ++ ++struct pcpu_group_info { ++ int nr_units; ++ long unsigned int base_offset; ++ unsigned int *cpu_map; ++}; ++ ++struct pcpu_alloc_info { ++ size_t static_size; ++ size_t reserved_size; ++ size_t dyn_size; ++ size_t unit_size; ++ size_t atom_size; ++ size_t alloc_size; ++ size_t __ai_size; ++ int nr_groups; ++ struct pcpu_group_info groups[0]; ++}; ++ ++typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int, size_t, size_t); ++ ++typedef void (*pcpu_fc_free_fn_t)(void *, size_t); ++ ++typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); ++ ++struct trace_event_raw_percpu_alloc_percpu { ++ struct trace_entry ent; ++ bool reserved; ++ bool is_atomic; ++ size_t size; ++ size_t align; ++ void *base_addr; ++ int off; ++ void *ptr; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_percpu_free_percpu { ++ struct trace_entry ent; ++ void *base_addr; ++ int off; ++ void *ptr; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_percpu_alloc_percpu_fail { ++ struct trace_entry ent; ++ bool reserved; ++ bool is_atomic; ++ size_t size; ++ size_t align; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_percpu_create_chunk { ++ struct trace_entry ent; ++ void *base_addr; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_percpu_destroy_chunk { ++ struct trace_entry ent; ++ void *base_addr; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_percpu_alloc_percpu {}; ++ ++struct trace_event_data_offsets_percpu_free_percpu {}; ++ ++struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; ++ ++struct trace_event_data_offsets_percpu_create_chunk {}; ++ ++struct trace_event_data_offsets_percpu_destroy_chunk {}; ++ ++struct pcpu_block_md { ++ int contig_hint; ++ int contig_hint_start; ++ int left_free; ++ int right_free; ++ int first_free; ++}; ++ ++struct pcpu_chunk { ++ struct list_head list; ++ int free_bytes; ++ int contig_bits; ++ int contig_bits_start; ++ void *base_addr; ++ long unsigned int *alloc_map; ++ long unsigned int *bound_map; ++ struct pcpu_block_md *md_blocks; ++ void *data; ++ int first_bit; ++ bool immutable; ++ int start_offset; ++ int end_offset; ++ int nr_pages; ++ int nr_populated; ++ int nr_empty_pop_pages; ++ long unsigned int populated[0]; ++}; ++ ++struct trace_event_raw_kmem_alloc { ++ struct trace_entry ent; ++ long unsigned int call_site; ++ const void *ptr; ++ size_t bytes_req; ++ size_t bytes_alloc; ++ gfp_t gfp_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kmem_alloc_node { ++ struct trace_entry ent; ++ long unsigned int call_site; ++ const void *ptr; ++ size_t bytes_req; ++ size_t bytes_alloc; ++ gfp_t gfp_flags; ++ int node; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kmem_free { ++ struct trace_entry ent; ++ long unsigned int call_site; ++ const void *ptr; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_page_free { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ unsigned int order; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_page_free_batched { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_page_alloc { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ unsigned int order; ++ gfp_t gfp_flags; ++ int migratetype; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_page { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ unsigned int order; ++ int migratetype; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_page_pcpu_drain { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ unsigned int order; ++ int migratetype; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_page_alloc_extfrag { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ int alloc_order; ++ int fallback_order; ++ int alloc_migratetype; ++ int fallback_migratetype; ++ int change_ownership; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_kmem_alloc {}; ++ ++struct trace_event_data_offsets_kmem_alloc_node {}; ++ ++struct trace_event_data_offsets_kmem_free {}; ++ ++struct trace_event_data_offsets_mm_page_free {}; ++ ++struct trace_event_data_offsets_mm_page_free_batched {}; ++ ++struct trace_event_data_offsets_mm_page_alloc {}; ++ ++struct trace_event_data_offsets_mm_page {}; ++ ++struct trace_event_data_offsets_mm_page_pcpu_drain {}; ++ ++struct trace_event_data_offsets_mm_page_alloc_extfrag {}; ++ ++struct slabinfo { ++ long unsigned int active_objs; ++ long unsigned int num_objs; ++ long unsigned int active_slabs; ++ long unsigned int num_slabs; ++ long unsigned int shared_avail; ++ unsigned int limit; ++ unsigned int batchcount; ++ unsigned int shared; ++ unsigned int objects_per_slab; ++ unsigned int cache_order; ++}; ++ ++struct trace_event_raw_mm_compaction_isolate_template { ++ struct trace_entry ent; ++ long unsigned int start_pfn; ++ long unsigned int end_pfn; ++ long unsigned int nr_scanned; ++ long unsigned int nr_taken; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_compaction_migratepages { ++ struct trace_entry ent; ++ long unsigned int nr_migrated; ++ long unsigned int nr_failed; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_compaction_begin { ++ struct trace_entry ent; ++ long unsigned int zone_start; ++ long unsigned int migrate_pfn; ++ long unsigned int free_pfn; ++ long unsigned int zone_end; ++ bool sync; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_compaction_end { ++ struct trace_entry ent; ++ long unsigned int zone_start; ++ long unsigned int migrate_pfn; ++ long unsigned int free_pfn; ++ long unsigned int zone_end; ++ bool sync; ++ int status; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_compaction_try_to_compact_pages { ++ struct trace_entry ent; ++ int order; ++ gfp_t gfp_mask; ++ int prio; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_compaction_suitable_template { ++ struct trace_entry ent; ++ int nid; ++ enum zone_type idx; ++ int order; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_compaction_defer_template { ++ struct trace_entry ent; ++ int nid; ++ enum zone_type idx; ++ int order; ++ unsigned int considered; ++ unsigned int defer_shift; ++ int order_failed; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_compaction_kcompactd_sleep { ++ struct trace_entry ent; ++ int nid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_kcompactd_wake_template { ++ struct trace_entry ent; ++ int nid; ++ int order; ++ enum zone_type classzone_idx; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mm_compaction_isolate_template {}; ++ ++struct trace_event_data_offsets_mm_compaction_migratepages {}; ++ ++struct trace_event_data_offsets_mm_compaction_begin {}; ++ ++struct trace_event_data_offsets_mm_compaction_end {}; ++ ++struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; ++ ++struct trace_event_data_offsets_mm_compaction_suitable_template {}; ++ ++struct trace_event_data_offsets_mm_compaction_defer_template {}; ++ ++struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; ++ ++struct trace_event_data_offsets_kcompactd_wake_template {}; ++ ++typedef enum { ++ ISOLATE_ABORT = 0, ++ ISOLATE_NONE = 1, ++ ISOLATE_SUCCESS = 2, ++} isolate_migrate_t; ++ ++struct anon_vma_chain { ++ struct vm_area_struct *vma; ++ struct anon_vma *anon_vma; ++ struct list_head same_vma; ++ struct rb_node rb; ++ long unsigned int rb_subtree_last; ++}; ++ ++struct rb_augment_callbacks { ++ void (*propagate)(struct rb_node *, struct rb_node *); ++ void (*copy)(struct rb_node *, struct rb_node *); ++ void (*rotate)(struct rb_node *, struct rb_node *); ++}; ++ ++enum lru_status { ++ LRU_REMOVED = 0, ++ LRU_REMOVED_RETRY = 1, ++ LRU_ROTATE = 2, ++ LRU_SKIP = 3, ++ LRU_RETRY = 4, ++}; ++ ++typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, spinlock_t *, void *); ++ ++typedef struct { ++ long unsigned int pd; ++} hugepd_t; ++ ++typedef struct { ++ u64 val; ++} pfn_t; ++ ++struct zap_details { ++ struct address_space *check_mapping; ++ long unsigned int first_index; ++ long unsigned int last_index; ++}; ++ ++typedef int (*pte_fn_t)(pte_t *, pgtable_t, long unsigned int, void *); ++ ++enum { ++ SWP_USED = 1, ++ SWP_WRITEOK = 2, ++ SWP_DISCARDABLE = 4, ++ SWP_DISCARDING = 8, ++ SWP_SOLIDSTATE = 16, ++ SWP_CONTINUED = 32, ++ SWP_BLKDEV = 64, ++ SWP_ACTIVATED = 128, ++ SWP_FS = 256, ++ SWP_AREA_DISCARD = 512, ++ SWP_PAGE_DISCARD = 1024, ++ SWP_STABLE_WRITES = 2048, ++ SWP_SYNCHRONOUS_IO = 4096, ++ SWP_VALID = 8192, ++ SWP_SCANNING = 16384, ++}; ++ ++struct ktask_node { ++ void *kn_start; ++ size_t kn_task_size; ++ int kn_nid; ++ void *kn_position; ++ size_t kn_remaining_size; ++ struct list_head kn_failed_works; ++}; ++ ++typedef int (*ktask_thread_func)(void *, void *, void *); ++ ++typedef void (*ktask_undo_func)(void *, void *, void *); ++ ++typedef void * (*ktask_iter_func)(void *, size_t); ++ ++struct ktask_ctl { ++ ktask_thread_func kc_thread_func; ++ ktask_undo_func kc_undo_func; ++ void *kc_func_arg; ++ size_t kc_min_chunk_size; ++ ktask_iter_func kc_iter_func; ++ size_t kc_max_threads; ++}; ++ ++struct cgp_args { ++ struct page *base_page; ++ long unsigned int addr; ++}; ++ ++struct copy_subpage_arg { ++ struct page *dst; ++ struct page *src; ++ struct vm_area_struct *vma; ++}; ++ ++struct mm_walk { ++ int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); ++ int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); ++ int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); ++ int (*pte_hole)(long unsigned int, long unsigned int, struct mm_walk *); ++ int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); ++ int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); ++ struct mm_struct *mm; ++ struct vm_area_struct *vma; ++ void *private; ++}; ++ ++struct vm_unmapped_area_info { ++ long unsigned int flags; ++ long unsigned int length; ++ long unsigned int low_limit; ++ long unsigned int high_limit; ++ long unsigned int align_mask; ++ long unsigned int align_offset; ++}; ++ ++enum { ++ HUGETLB_SHMFS_INODE = 1, ++ HUGETLB_ANONHUGE_INODE = 2, ++}; ++ ++struct attribute_group___3; ++ ++struct rmap_walk_control { ++ void *arg; ++ bool (*rmap_one)(struct page *, struct vm_area_struct *, long unsigned int, void *); ++ int (*done)(struct page *); ++ struct anon_vma * (*anon_lock)(struct page *); ++ bool (*invalid_vma)(struct vm_area_struct *, void *); ++}; ++ ++struct page_referenced_arg { ++ int mapcount; ++ int referenced; ++ long unsigned int vm_flags; ++ struct mem_cgroup *memcg; ++}; ++ ++struct vmap_area { ++ long unsigned int va_start; ++ long unsigned int va_end; ++ long unsigned int flags; ++ struct rb_node rb_node; ++ struct list_head list; ++ struct llist_node purge_list; ++ struct vm_struct___2 *vm; ++ struct callback_head callback_head; ++}; ++ ++struct vfree_deferred { ++ struct llist_head list; ++ struct work_struct wq; ++}; ++ ++struct vmap_block_queue { ++ spinlock_t lock; ++ struct list_head free; ++}; ++ ++struct vmap_block { ++ spinlock_t lock; ++ struct vmap_area *va; ++ long unsigned int free; ++ long unsigned int dirty; ++ long unsigned int dirty_min; ++ long unsigned int dirty_max; ++ struct list_head free_list; ++ struct callback_head callback_head; ++ struct list_head purge; ++}; ++ ++struct vma_swap_readahead { ++ short unsigned int win; ++ short unsigned int offset; ++ short unsigned int nr_pte; ++ pte_t *ptes; ++}; ++ ++union swap_header { ++ struct { ++ char reserved[65526]; ++ char magic[10]; ++ } magic; ++ struct { ++ char bootbits[1024]; ++ __u32 version; ++ __u32 last_page; ++ __u32 nr_badpages; ++ unsigned char sws_uuid[16]; ++ unsigned char sws_volume[16]; ++ __u32 padding[117]; ++ __u32 badpages[1]; ++ } info; ++}; ++ ++struct swap_slots_cache { ++ bool lock_initialized; ++ struct mutex alloc_lock; ++ swp_entry_t *slots; ++ int nr; ++ int cur; ++ spinlock_t free_lock; ++ swp_entry_t *slots_ret; ++ int n_ret; ++}; ++ ++struct frontswap_ops { ++ void (*init)(unsigned int); ++ int (*store)(unsigned int, long unsigned int, struct page *); ++ int (*load)(unsigned int, long unsigned int, struct page *); ++ void (*invalidate_page)(unsigned int, long unsigned int); ++ void (*invalidate_area)(unsigned int); ++ struct frontswap_ops *next; ++}; ++ ++struct crypto_comp { ++ struct crypto_tfm base; ++}; ++ ++struct zpool; ++ ++struct zpool_ops { ++ int (*evict)(struct zpool *, long unsigned int); ++}; ++ ++enum zpool_mapmode { ++ ZPOOL_MM_RW = 0, ++ ZPOOL_MM_RO = 1, ++ ZPOOL_MM_WO = 2, ++ ZPOOL_MM_DEFAULT = 0, ++}; ++ ++struct zswap_pool { ++ struct zpool *zpool; ++ struct crypto_comp **tfm; ++ struct kref kref; ++ struct list_head list; ++ struct work_struct work; ++ struct hlist_node node; ++ char tfm_name[128]; ++}; ++ ++struct zswap_entry { ++ struct rb_node rbnode; ++ long unsigned int offset; ++ int refcount; ++ unsigned int length; ++ struct zswap_pool *pool; ++ union { ++ long unsigned int handle; ++ long unsigned int value; ++ }; ++}; ++ ++struct zswap_header { ++ swp_entry_t swpentry; ++}; ++ ++struct zswap_tree { ++ struct rb_root rbroot; ++ spinlock_t lock; ++}; ++ ++enum zswap_get_swap_ret { ++ ZSWAP_SWAPCACHE_NEW = 0, ++ ZSWAP_SWAPCACHE_EXIST = 1, ++ ZSWAP_SWAPCACHE_FAIL = 2, ++}; ++ ++typedef void (*dr_release_t___2)(struct device___2 *, void *); ++ ++struct dma_pool { ++ struct list_head page_list; ++ spinlock_t lock; ++ size_t size; ++ struct device___2 *dev; ++ size_t allocation; ++ size_t boundary; ++ char name[32]; ++ struct list_head pools; ++}; ++ ++struct dma_page { ++ struct list_head page_list; ++ void *vaddr; ++ dma_addr_t dma; ++ unsigned int in_use; ++ unsigned int offset; ++}; ++ ++enum string_size_units { ++ STRING_UNITS_10 = 0, ++ STRING_UNITS_2 = 1, ++}; ++ ++struct resv_map { ++ struct kref refs; ++ spinlock_t lock; ++ struct list_head regions; ++ long int adds_in_progress; ++ struct list_head region_cache; ++ long int region_cache_count; ++}; ++ ++struct huge_bootmem_page { ++ struct list_head list; ++ struct hstate *hstate; ++}; ++ ++struct file_region { ++ struct list_head link; ++ long int from; ++ long int to; ++}; ++ ++enum vma_resv_mode { ++ VMA_NEEDS_RESV = 0, ++ VMA_COMMIT_RESV = 1, ++ VMA_END_RESV = 2, ++ VMA_ADD_RESV = 3, ++}; ++ ++struct node_hstate { ++ struct kobject *hugepages_kobj; ++ struct kobject *hstate_kobjs[4]; ++}; ++ ++struct hugetlb_cgroup; ++ ++struct nodemask_scratch { ++ nodemask_t mask1; ++ nodemask_t mask2; ++}; ++ ++struct sp_node { ++ struct rb_node nd; ++ long unsigned int start; ++ long unsigned int end; ++ struct mempolicy *policy; ++}; ++ ++struct mempolicy_operations { ++ int (*create)(struct mempolicy *, const nodemask_t *); ++ void (*rebind)(struct mempolicy *, const nodemask_t *); ++}; ++ ++struct queue_pages { ++ struct list_head *pagelist; ++ long unsigned int flags; ++ nodemask_t *nmask; ++ struct vm_area_struct *prev; ++}; ++ ++struct rmap_item; ++ ++struct mm_slot { ++ struct hlist_node link; ++ struct list_head mm_list; ++ struct rmap_item *rmap_list; ++ struct mm_struct *mm; ++}; ++ ++struct stable_node; ++ ++struct rmap_item { ++ struct rmap_item *rmap_list; ++ union { ++ struct anon_vma *anon_vma; ++ int nid; ++ }; ++ struct mm_struct *mm; ++ long unsigned int address; ++ unsigned int oldchecksum; ++ union { ++ struct rb_node node; ++ struct { ++ struct stable_node *head; ++ struct hlist_node hlist; ++ }; ++ }; ++}; ++ ++struct ksm_scan { ++ struct mm_slot *mm_slot; ++ long unsigned int address; ++ struct rmap_item **rmap_list; ++ long unsigned int seqnr; ++}; ++ ++struct stable_node { ++ union { ++ struct rb_node node; ++ struct { ++ struct list_head *head; ++ struct { ++ struct hlist_node hlist_dup; ++ struct list_head list; ++ }; ++ }; ++ }; ++ struct hlist_head hlist; ++ union { ++ long unsigned int kpfn; ++ long unsigned int chain_prune_time; ++ }; ++ int rmap_hlist_len; ++ int nid; ++}; ++ ++enum get_ksm_page_flags { ++ GET_KSM_PAGE_NOLOCK = 0, ++ GET_KSM_PAGE_LOCK = 1, ++ GET_KSM_PAGE_TRYLOCK = 2, ++}; ++ ++enum stat_item { ++ ALLOC_FASTPATH = 0, ++ ALLOC_SLOWPATH = 1, ++ FREE_FASTPATH = 2, ++ FREE_SLOWPATH = 3, ++ FREE_FROZEN = 4, ++ FREE_ADD_PARTIAL = 5, ++ FREE_REMOVE_PARTIAL = 6, ++ ALLOC_FROM_PARTIAL = 7, ++ ALLOC_SLAB = 8, ++ ALLOC_REFILL = 9, ++ ALLOC_NODE_MISMATCH = 10, ++ FREE_SLAB = 11, ++ CPUSLAB_FLUSH = 12, ++ DEACTIVATE_FULL = 13, ++ DEACTIVATE_EMPTY = 14, ++ DEACTIVATE_TO_HEAD = 15, ++ DEACTIVATE_TO_TAIL = 16, ++ DEACTIVATE_REMOTE_FREES = 17, ++ DEACTIVATE_BYPASS = 18, ++ ORDER_FALLBACK = 19, ++ CMPXCHG_DOUBLE_CPU_FAIL = 20, ++ CMPXCHG_DOUBLE_FAIL = 21, ++ CPU_PARTIAL_ALLOC = 22, ++ CPU_PARTIAL_FREE = 23, ++ CPU_PARTIAL_NODE = 24, ++ CPU_PARTIAL_DRAIN = 25, ++ NR_SLUB_STAT_ITEMS = 26, ++}; ++ ++struct memory_notify { ++ long unsigned int start_pfn; ++ long unsigned int nr_pages; ++ int status_change_nid_normal; ++ int status_change_nid_high; ++ int status_change_nid; ++}; ++ ++struct track { ++ long unsigned int addr; ++ long unsigned int addrs[16]; ++ int cpu; ++ int pid; ++ long unsigned int when; ++}; ++ ++enum track_item { ++ TRACK_ALLOC = 0, ++ TRACK_FREE = 1, ++}; ++ ++struct detached_freelist { ++ struct page *page; ++ void *tail; ++ void *freelist; ++ int cnt; ++ struct kmem_cache *s; ++}; ++ ++struct location { ++ long unsigned int count; ++ long unsigned int addr; ++ long long int sum_time; ++ long int min_time; ++ long int max_time; ++ long int min_pid; ++ long int max_pid; ++ long unsigned int cpus[16]; ++ nodemask_t nodes; ++}; ++ ++struct loc_track { ++ long unsigned int max; ++ long unsigned int count; ++ struct location *loc; ++}; ++ ++enum slab_stat_type { ++ SL_ALL = 0, ++ SL_PARTIAL = 1, ++ SL_CPU = 2, ++ SL_OBJECTS = 3, ++ SL_TOTAL = 4, ++}; ++ ++struct slab_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct kmem_cache *, char *); ++ ssize_t (*store)(struct kmem_cache *, const char *, size_t); ++}; ++ ++struct saved_alias { ++ struct kmem_cache *s; ++ const char *name; ++ struct saved_alias *next; ++}; ++ ++enum slab_modes { ++ M_NONE = 0, ++ M_PARTIAL = 1, ++ M_FULL = 2, ++ M_FREE = 3, ++}; ++ ++enum { ++ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, ++ SECTION_INFO = 12, ++ MIX_SECTION_INFO = 13, ++ NODE_INFO = 14, ++ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = 14, ++}; ++ ++enum { ++ MMOP_OFFLINE = 4294967295, ++ MMOP_ONLINE_KEEP = 0, ++ MMOP_ONLINE_KERNEL = 1, ++ MMOP_ONLINE_MOVABLE = 2, ++}; ++ ++typedef void (*online_page_callback_t)(struct page *); ++ ++struct memory_block { ++ long unsigned int start_section_nr; ++ long unsigned int end_section_nr; ++ long unsigned int state; ++ int section_count; ++ int online_type; ++ int phys_device; ++ void *hw; ++ int (*phys_callback)(struct memory_block *); ++ struct device dev; ++ int nid; ++}; ++ ++struct buffer_head; ++ ++typedef void bh_end_io_t(struct buffer_head *, int); ++ ++struct buffer_head { ++ long unsigned int b_state; ++ struct buffer_head *b_this_page; ++ struct page *b_page; ++ sector_t b_blocknr; ++ size_t b_size; ++ char *b_data; ++ struct block_device *b_bdev; ++ bh_end_io_t *b_end_io; ++ void *b_private; ++ struct list_head b_assoc_buffers; ++ struct address_space *b_assoc_map; ++ atomic_t b_count; ++}; ++ ++typedef struct page *new_page_t(struct page *, long unsigned int); ++ ++typedef void free_page_t(struct page *, long unsigned int); ++ ++enum bh_state_bits { ++ BH_Uptodate = 0, ++ BH_Dirty = 1, ++ BH_Lock = 2, ++ BH_Req = 3, ++ BH_Uptodate_Lock = 4, ++ BH_Mapped = 5, ++ BH_New = 6, ++ BH_Async_Read = 7, ++ BH_Async_Write = 8, ++ BH_Delay = 9, ++ BH_Boundary = 10, ++ BH_Write_EIO = 11, ++ BH_Unwritten = 12, ++ BH_Quiet = 13, ++ BH_Meta = 14, ++ BH_Prio = 15, ++ BH_Defer_Completion = 16, ++ BH_PrivateStart = 17, ++}; ++ ++struct trace_event_raw_mm_migrate_pages { ++ struct trace_entry ent; ++ long unsigned int succeeded; ++ long unsigned int failed; ++ enum migrate_mode mode; ++ int reason; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mm_migrate_pages {}; ++ ++enum scan_result { ++ SCAN_FAIL = 0, ++ SCAN_SUCCEED = 1, ++ SCAN_PMD_NULL = 2, ++ SCAN_EXCEED_NONE_PTE = 3, ++ SCAN_PTE_NON_PRESENT = 4, ++ SCAN_PAGE_RO = 5, ++ SCAN_LACK_REFERENCED_PAGE = 6, ++ SCAN_PAGE_NULL = 7, ++ SCAN_SCAN_ABORT = 8, ++ SCAN_PAGE_COUNT = 9, ++ SCAN_PAGE_LRU = 10, ++ SCAN_PAGE_LOCK = 11, ++ SCAN_PAGE_ANON = 12, ++ SCAN_PAGE_COMPOUND = 13, ++ SCAN_ANY_PROCESS = 14, ++ SCAN_VMA_NULL = 15, ++ SCAN_VMA_CHECK = 16, ++ SCAN_ADDRESS_RANGE = 17, ++ SCAN_SWAP_CACHE_PAGE = 18, ++ SCAN_DEL_PAGE_LRU = 19, ++ SCAN_ALLOC_HUGE_PAGE_FAIL = 20, ++ SCAN_CGROUP_CHARGE_FAIL = 21, ++ SCAN_EXCEED_SWAP_PTE = 22, ++ SCAN_TRUNCATED = 23, ++}; ++ ++struct trace_event_raw_mm_khugepaged_scan_pmd { ++ struct trace_entry ent; ++ struct mm_struct *mm; ++ long unsigned int pfn; ++ bool writable; ++ int referenced; ++ int none_or_zero; ++ int status; ++ int unmapped; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_collapse_huge_page { ++ struct trace_entry ent; ++ struct mm_struct *mm; ++ int isolated; ++ int status; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_collapse_huge_page_isolate { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ int none_or_zero; ++ int referenced; ++ bool writable; ++ int status; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_mm_collapse_huge_page_swapin { ++ struct trace_entry ent; ++ struct mm_struct *mm; ++ int swapped_in; ++ int referenced; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mm_khugepaged_scan_pmd {}; ++ ++struct trace_event_data_offsets_mm_collapse_huge_page {}; ++ ++struct trace_event_data_offsets_mm_collapse_huge_page_isolate {}; ++ ++struct trace_event_data_offsets_mm_collapse_huge_page_swapin {}; ++ ++struct mm_slot___2 { ++ struct hlist_node hash; ++ struct list_head mm_node; ++ struct mm_struct *mm; ++}; ++ ++struct khugepaged_scan { ++ struct list_head mm_head; ++ struct mm_slot___2 *mm_slot; ++ long unsigned int address; ++}; ++ ++typedef __kernel_ulong_t __kernel_ino_t; ++ ++typedef __kernel_ino_t ino_t; ++ ++struct mem_cgroup_tree_per_node { ++ struct rb_root rb_root; ++ struct rb_node *rb_rightmost; ++ spinlock_t lock; ++}; ++ ++struct mem_cgroup_tree { ++ struct mem_cgroup_tree_per_node *rb_tree_per_node[16]; ++}; ++ ++struct mem_cgroup_eventfd_list { ++ struct list_head list; ++ struct eventfd_ctx *eventfd; ++}; ++ ++struct mem_cgroup_event { ++ struct mem_cgroup *memcg; ++ struct eventfd_ctx *eventfd; ++ struct list_head list; ++ int (*register_event)(struct mem_cgroup *, struct eventfd_ctx *, const char *); ++ void (*unregister_event)(struct mem_cgroup *, struct eventfd_ctx *); ++ poll_table pt; ++ wait_queue_head_t *wqh; ++ wait_queue_entry_t wait; ++ struct work_struct remove; ++}; ++ ++struct move_charge_struct { ++ spinlock_t lock; ++ struct mm_struct *mm; ++ struct mem_cgroup *from; ++ struct mem_cgroup *to; ++ long unsigned int flags; ++ long unsigned int precharge; ++ long unsigned int moved_charge; ++ long unsigned int moved_swap; ++ struct task_struct *moving_task; ++ wait_queue_head_t waitq; ++}; ++ ++enum res_type { ++ _MEM = 0, ++ _MEMSWAP = 1, ++ _OOM_TYPE = 2, ++ _KMEM = 3, ++ _TCP = 4, ++}; ++ ++struct oom_wait_info { ++ struct mem_cgroup *memcg; ++ wait_queue_entry_t wait; ++}; ++ ++enum oom_status { ++ OOM_SUCCESS = 0, ++ OOM_FAILED = 1, ++ OOM_ASYNC = 2, ++ OOM_SKIPPED = 3, ++}; ++ ++struct memcg_stock_pcp { ++ struct mem_cgroup *cached; ++ unsigned int nr_pages; ++ struct work_struct work; ++ long unsigned int flags; ++}; ++ ++struct memcg_kmem_cache_create_work { ++ struct mem_cgroup *memcg; ++ struct kmem_cache *cachep; ++ struct work_struct work; ++}; ++ ++struct accumulated_stats { ++ long unsigned int stat[34]; ++ long unsigned int events[81]; ++ long unsigned int lru_pages[5]; ++ const unsigned int *stats_array; ++ const unsigned int *events_array; ++ int stats_size; ++ int events_size; ++}; ++ ++enum { ++ RES_USAGE = 0, ++ RES_LIMIT = 1, ++ RES_MAX_USAGE = 2, ++ RES_FAILCNT = 3, ++ RES_SOFT_LIMIT = 4, ++}; ++ ++union mc_target { ++ struct page *page; ++ swp_entry_t ent; ++}; ++ ++enum mc_target_type { ++ MC_TARGET_NONE = 0, ++ MC_TARGET_PAGE = 1, ++ MC_TARGET_SWAP = 2, ++ MC_TARGET_DEVICE = 3, ++}; ++ ++struct uncharge_gather { ++ struct mem_cgroup *memcg; ++ long unsigned int pgpgout; ++ long unsigned int nr_anon; ++ long unsigned int nr_file; ++ long unsigned int nr_kmem; ++ long unsigned int nr_huge; ++ long unsigned int nr_shmem; ++ struct page *dummy_page; ++}; ++ ++struct numa_stat { ++ const char *name; ++ unsigned int lru_mask; ++}; ++ ++enum vmpressure_levels { ++ VMPRESSURE_LOW = 0, ++ VMPRESSURE_MEDIUM = 1, ++ VMPRESSURE_CRITICAL = 2, ++ VMPRESSURE_NUM_LEVELS = 3, ++}; ++ ++enum vmpressure_modes { ++ VMPRESSURE_NO_PASSTHROUGH = 0, ++ VMPRESSURE_HIERARCHY = 1, ++ VMPRESSURE_LOCAL = 2, ++ VMPRESSURE_NUM_MODES = 3, ++}; ++ ++struct vmpressure_event { ++ struct eventfd_ctx *efd; ++ enum vmpressure_levels level; ++ enum vmpressure_modes mode; ++ struct list_head node; ++}; ++ ++struct swap_cgroup_ctrl { ++ struct page **map; ++ long unsigned int length; ++ spinlock_t lock; ++}; ++ ++struct swap_cgroup { ++ short unsigned int id; ++}; ++ ++struct hugetlb_cgroup___2 { ++ struct cgroup_subsys_state___2 css; ++ struct page_counter hugepage[4]; ++}; ++ ++enum { ++ RES_USAGE___2 = 0, ++ RES_LIMIT___2 = 1, ++ RES_MAX_USAGE___2 = 2, ++ RES_FAILCNT___2 = 3, ++}; ++ ++enum mf_result { ++ MF_IGNORED = 0, ++ MF_FAILED = 1, ++ MF_DELAYED = 2, ++ MF_RECOVERED = 3, ++}; ++ ++enum mf_action_page_type { ++ MF_MSG_KERNEL = 0, ++ MF_MSG_KERNEL_HIGH_ORDER = 1, ++ MF_MSG_SLAB = 2, ++ MF_MSG_DIFFERENT_COMPOUND = 3, ++ MF_MSG_POISONED_HUGE = 4, ++ MF_MSG_HUGE = 5, ++ MF_MSG_FREE_HUGE = 6, ++ MF_MSG_NON_PMD_HUGE = 7, ++ MF_MSG_UNMAP_FAILED = 8, ++ MF_MSG_DIRTY_SWAPCACHE = 9, ++ MF_MSG_CLEAN_SWAPCACHE = 10, ++ MF_MSG_DIRTY_MLOCKED_LRU = 11, ++ MF_MSG_CLEAN_MLOCKED_LRU = 12, ++ MF_MSG_DIRTY_UNEVICTABLE_LRU = 13, ++ MF_MSG_CLEAN_UNEVICTABLE_LRU = 14, ++ MF_MSG_DIRTY_LRU = 15, ++ MF_MSG_CLEAN_LRU = 16, ++ MF_MSG_TRUNCATED_LRU = 17, ++ MF_MSG_BUDDY = 18, ++ MF_MSG_BUDDY_2ND = 19, ++ MF_MSG_DAX = 20, ++ MF_MSG_UNKNOWN = 21, ++}; ++ ++struct __kfifo { ++ unsigned int in; ++ unsigned int out; ++ unsigned int mask; ++ unsigned int esize; ++ void *data; ++}; ++ ++struct to_kill { ++ struct list_head nd; ++ struct task_struct *tsk; ++ long unsigned int addr; ++ short int size_shift; ++}; ++ ++struct page_state { ++ long unsigned int mask; ++ long unsigned int res; ++ enum mf_action_page_type type; ++ int (*action)(struct page *, long unsigned int); ++}; ++ ++struct memory_failure_entry { ++ long unsigned int pfn; ++ int flags; ++}; ++ ++struct memory_failure_cpu { ++ struct { ++ union { ++ struct __kfifo kfifo; ++ struct memory_failure_entry *type; ++ const struct memory_failure_entry *const_type; ++ char (*rectype)[0]; ++ struct memory_failure_entry *ptr; ++ const struct memory_failure_entry *ptr_const; ++ }; ++ struct memory_failure_entry buf[16]; ++ } fifo; ++ spinlock_t lock; ++ struct work_struct work; ++}; ++ ++struct cleancache_filekey { ++ union { ++ ino_t ino; ++ __u32 fh[6]; ++ u32 key[6]; ++ } u; ++}; ++ ++struct cleancache_ops { ++ int (*init_fs)(size_t); ++ int (*init_shared_fs)(uuid_t *, size_t); ++ int (*get_page)(int, struct cleancache_filekey, long unsigned int, struct page___2 *); ++ void (*put_page)(int, struct cleancache_filekey, long unsigned int, struct page___2 *); ++ void (*invalidate_page)(int, struct cleancache_filekey, long unsigned int); ++ void (*invalidate_inode)(int, struct cleancache_filekey); ++ void (*invalidate_fs)(int); ++}; ++ ++struct memory_isolate_notify { ++ long unsigned int start_pfn; ++ unsigned int nr_pages; ++ unsigned int pages_found; ++}; ++ ++struct trace_event_raw_test_pages_isolated { ++ struct trace_entry ent; ++ long unsigned int start_pfn; ++ long unsigned int end_pfn; ++ long unsigned int fin_pfn; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_test_pages_isolated {}; ++ ++struct zpool_driver; ++ ++struct zpool { ++ struct zpool_driver *driver; ++ void *pool; ++ const struct zpool_ops *ops; ++ bool evictable; ++ struct list_head list; ++}; ++ ++struct zpool_driver { ++ char *type; ++ struct module___2 *owner; ++ atomic_t refcount; ++ struct list_head list; ++ void * (*create)(const char *, gfp_t, const struct zpool_ops *, struct zpool *); ++ void (*destroy)(void *); ++ int (*malloc)(void *, size_t, gfp_t, long unsigned int *); ++ void (*free)(void *, long unsigned int); ++ int (*shrink)(void *, unsigned int, unsigned int *); ++ void * (*map)(void *, long unsigned int, enum zpool_mapmode); ++ void (*unmap)(void *, long unsigned int); ++ u64 (*total_size)(void *); ++}; ++ ++struct zbud_pool; ++ ++struct zbud_ops { ++ int (*evict)(struct zbud_pool *, long unsigned int); ++}; ++ ++struct zbud_pool { ++ spinlock_t lock; ++ struct list_head unbuddied[63]; ++ struct list_head buddied; ++ struct list_head lru; ++ u64 pages_nr; ++ const struct zbud_ops *ops; ++ struct zpool *zpool; ++ const struct zpool_ops *zpool_ops; ++}; ++ ++struct zbud_header { ++ struct list_head buddy; ++ struct list_head lru; ++ unsigned int first_chunks; ++ unsigned int last_chunks; ++ bool under_reclaim; ++}; ++ ++enum buddy { ++ FIRST = 0, ++ LAST = 1, ++}; ++ ++enum zs_mapmode { ++ ZS_MM_RW = 0, ++ ZS_MM_RO = 1, ++ ZS_MM_WO = 2, ++}; ++ ++struct zs_pool_stats { ++ long unsigned int pages_compacted; ++}; ++ ++enum fullness_group { ++ ZS_EMPTY = 0, ++ ZS_ALMOST_EMPTY = 1, ++ ZS_ALMOST_FULL = 2, ++ ZS_FULL = 3, ++ NR_ZS_FULLNESS = 4, ++}; ++ ++enum zs_stat_type { ++ CLASS_EMPTY = 0, ++ CLASS_ALMOST_EMPTY = 1, ++ CLASS_ALMOST_FULL = 2, ++ CLASS_FULL = 3, ++ OBJ_ALLOCATED = 4, ++ OBJ_USED = 5, ++ NR_ZS_STAT_TYPE = 6, ++}; ++ ++struct zs_size_stat { ++ long unsigned int objs[6]; ++}; ++ ++struct size_class { ++ spinlock_t lock; ++ struct list_head fullness_list[4]; ++ int size; ++ int objs_per_zspage; ++ int pages_per_zspage; ++ unsigned int index; ++ struct zs_size_stat stats; ++}; ++ ++struct link_free { ++ union { ++ long unsigned int next; ++ long unsigned int handle; ++ }; ++}; ++ ++struct zs_pool { ++ const char *name; ++ struct size_class *size_class[257]; ++ struct kmem_cache *handle_cachep; ++ struct kmem_cache *zspage_cachep; ++ atomic_long_t pages_allocated; ++ struct zs_pool_stats stats; ++ struct shrinker shrinker; ++ struct dentry___2 *stat_dentry; ++ struct inode___2 *inode; ++ struct work_struct free_work; ++ struct wait_queue_head migration_wait; ++ atomic_long_t isolated_pages; ++ bool destroying; ++}; ++ ++struct zspage { ++ struct { ++ unsigned int fullness: 2; ++ unsigned int class: 9; ++ unsigned int isolated: 3; ++ unsigned int magic: 8; ++ }; ++ unsigned int inuse; ++ unsigned int freeobj; ++ struct page___2 *first_page; ++ struct list_head list; ++ rwlock_t lock; ++}; ++ ++struct mapping_area { ++ char *vm_buf; ++ char *vm_addr; ++ enum zs_mapmode vm_mm; ++}; ++ ++struct zs_compact_control { ++ struct page___2 *s_page; ++ struct page___2 *d_page; ++ int obj_idx; ++}; ++ ++struct cma { ++ long unsigned int base_pfn; ++ long unsigned int count; ++ long unsigned int *bitmap; ++ unsigned int order_per_bit; ++ struct mutex lock; ++ const char *name; ++}; ++ ++struct trace_event_raw_cma_alloc { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ const struct page___2 *page; ++ unsigned int count; ++ unsigned int align; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_cma_release { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ const struct page___2 *page; ++ unsigned int count; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_cma_alloc {}; ++ ++struct trace_event_data_offsets_cma_release {}; ++ ++struct balloon_dev_info { ++ long unsigned int isolated_pages; ++ spinlock_t pages_lock; ++ struct list_head pages; ++ int (*migratepage)(struct balloon_dev_info *, struct page___2 *, struct page___2 *, enum migrate_mode); ++ struct inode___2 *inode; ++}; ++ ++struct frame_vector { ++ unsigned int nr_allocated; ++ unsigned int nr_frames; ++ bool got_ref; ++ bool is_pfns; ++ void *ptrs[0]; ++}; ++ ++enum { ++ BAD_STACK = 4294967295, ++ NOT_STACK = 0, ++ GOOD_FRAME = 1, ++ GOOD_STACK = 2, ++}; ++ ++struct hugetlbfs_inode_info { ++ struct shared_policy policy; ++ struct inode___2 vfs_inode; ++ unsigned int seals; ++}; ++ ++typedef s32 compat_off_t; ++ ++struct open_flags { ++ int open_flag; ++ umode_t mode; ++ int acc_mode; ++ int intent; ++ int lookup_flags; ++}; ++ ++typedef __kernel_long_t __kernel_off_t; ++ ++typedef __kernel_off_t off_t; ++ ++typedef s32 compat_ssize_t; ++ ++struct file_dedupe_range_info { ++ __s64 dest_fd; ++ __u64 dest_offset; ++ __u64 bytes_deduped; ++ __s32 status; ++ __u32 reserved; ++}; ++ ++struct file_dedupe_range { ++ __u64 src_offset; ++ __u64 src_length; ++ __u16 dest_count; ++ __u16 reserved1; ++ __u32 reserved2; ++ struct file_dedupe_range_info info[0]; ++}; ++ ++typedef int __kernel_rwf_t; ++ ++typedef __kernel_rwf_t rwf_t; ++ ++typedef int filler_t___2(void *, struct page___2 *); ++ ++struct kobj_map; ++ ++struct char_device_struct { ++ struct char_device_struct *next; ++ unsigned int major; ++ unsigned int baseminor; ++ int minorct; ++ char name[64]; ++ struct cdev *cdev; ++}; ++ ++struct stat { ++ long unsigned int st_dev; ++ long unsigned int st_ino; ++ unsigned int st_mode; ++ unsigned int st_nlink; ++ unsigned int st_uid; ++ unsigned int st_gid; ++ long unsigned int st_rdev; ++ long unsigned int __pad1; ++ long int st_size; ++ int st_blksize; ++ int __pad2; ++ long int st_blocks; ++ long int st_atime; ++ long unsigned int st_atime_nsec; ++ long int st_mtime; ++ long unsigned int st_mtime_nsec; ++ long int st_ctime; ++ long unsigned int st_ctime_nsec; ++ unsigned int __unused4; ++ unsigned int __unused5; ++}; ++ ++typedef u16 __compat_uid16_t; ++ ++typedef u16 __compat_gid16_t; ++ ++typedef u16 compat_mode_t; ++ ++typedef u32 compat_ino_t; ++ ++typedef u32 compat_dev_t; ++ ++typedef s64 compat_s64; ++ ++typedef u16 compat_ushort_t; ++ ++struct compat_stat { ++ compat_dev_t st_dev; ++ compat_ino_t st_ino; ++ compat_mode_t st_mode; ++ compat_ushort_t st_nlink; ++ __compat_uid16_t st_uid; ++ __compat_gid16_t st_gid; ++ compat_dev_t st_rdev; ++ compat_off_t st_size; ++ compat_off_t st_blksize; ++ compat_off_t st_blocks; ++ compat_time_t st_atime; ++ compat_ulong_t st_atime_nsec; ++ compat_time_t st_mtime; ++ compat_ulong_t st_mtime_nsec; ++ compat_time_t st_ctime; ++ compat_ulong_t st_ctime_nsec; ++ compat_ulong_t __unused4[2]; ++}; ++ ++struct stat64 { ++ compat_u64 st_dev; ++ unsigned char __pad0[4]; ++ compat_ulong_t __st_ino; ++ compat_uint_t st_mode; ++ compat_uint_t st_nlink; ++ compat_ulong_t st_uid; ++ compat_ulong_t st_gid; ++ compat_u64 st_rdev; ++ unsigned char __pad3[4]; ++ compat_s64 st_size; ++ compat_ulong_t st_blksize; ++ compat_u64 st_blocks; ++ compat_ulong_t st_atime; ++ compat_ulong_t st_atime_nsec; ++ compat_ulong_t st_mtime; ++ compat_ulong_t st_mtime_nsec; ++ compat_ulong_t st_ctime; ++ compat_ulong_t st_ctime_nsec; ++ compat_u64 st_ino; ++}; ++ ++struct statx_timestamp { ++ __s64 tv_sec; ++ __u32 tv_nsec; ++ __s32 __reserved; ++}; ++ ++struct statx { ++ __u32 stx_mask; ++ __u32 stx_blksize; ++ __u64 stx_attributes; ++ __u32 stx_nlink; ++ __u32 stx_uid; ++ __u32 stx_gid; ++ __u16 stx_mode; ++ __u16 __spare0[1]; ++ __u64 stx_ino; ++ __u64 stx_size; ++ __u64 stx_blocks; ++ __u64 stx_attributes_mask; ++ struct statx_timestamp stx_atime; ++ struct statx_timestamp stx_btime; ++ struct statx_timestamp stx_ctime; ++ struct statx_timestamp stx_mtime; ++ __u32 stx_rdev_major; ++ __u32 stx_rdev_minor; ++ __u32 stx_dev_major; ++ __u32 stx_dev_minor; ++ __u64 __spare2[14]; ++}; ++ ++typedef short unsigned int ushort; ++ ++struct user_arg_ptr { ++ bool is_compat; ++ union { ++ const char * const *native; ++ const compat_uptr_t *compat; ++ } ptr; ++}; ++ ++enum inode_i_mutex_lock_class { ++ I_MUTEX_NORMAL = 0, ++ I_MUTEX_PARENT = 1, ++ I_MUTEX_CHILD = 2, ++ I_MUTEX_XATTR = 3, ++ I_MUTEX_NONDIR2 = 4, ++ I_MUTEX_PARENT2 = 5, ++}; ++ ++struct name_snapshot { ++ const unsigned char *name; ++ unsigned char inline_name[32]; ++}; ++ ++struct saved { ++ struct path___2 link; ++ struct delayed_call done; ++ const char *name; ++ unsigned int seq; ++}; ++ ++struct nameidata { ++ struct path___2 path; ++ struct qstr last; ++ struct path___2 root; ++ struct inode___2 *inode; ++ unsigned int flags; ++ unsigned int seq; ++ unsigned int m_seq; ++ int last_type; ++ unsigned int depth; ++ int total_link_count; ++ struct saved *stack; ++ struct saved internal[2]; ++ struct filename *name; ++ struct nameidata *saved; ++ struct inode___2 *link_inode; ++ unsigned int root_seq; ++ int dfd; ++}; ++ ++enum { ++ LAST_NORM = 0, ++ LAST_ROOT = 1, ++ LAST_DOT = 2, ++ LAST_DOTDOT = 3, ++ LAST_BIND = 4, ++}; ++ ++struct mount; ++ ++struct mnt_namespace { ++ atomic_t count; ++ struct ns_common___2 ns; ++ struct mount *root; ++ struct list_head list; ++ struct user_namespace___2 *user_ns; ++ struct ucounts___2 *ucounts; ++ u64 seq; ++ wait_queue_head_t poll; ++ u64 event; ++ unsigned int mounts; ++ unsigned int pending_mounts; ++}; ++ ++struct mnt_pcp; ++ ++struct mountpoint; ++ ++struct mount { ++ struct hlist_node mnt_hash; ++ struct mount *mnt_parent; ++ struct dentry___2 *mnt_mountpoint; ++ struct vfsmount___2 mnt; ++ union { ++ struct callback_head mnt_rcu; ++ struct llist_node mnt_llist; ++ }; ++ struct mnt_pcp *mnt_pcp; ++ struct list_head mnt_mounts; ++ struct list_head mnt_child; ++ struct list_head mnt_instance; ++ const char *mnt_devname; ++ struct list_head mnt_list; ++ struct list_head mnt_expire; ++ struct list_head mnt_share; ++ struct list_head mnt_slave_list; ++ struct list_head mnt_slave; ++ struct mount *mnt_master; ++ struct mnt_namespace *mnt_ns; ++ struct mountpoint *mnt_mp; ++ struct hlist_node mnt_mp_list; ++ struct list_head mnt_umounting; ++ struct fsnotify_mark_connector *mnt_fsnotify_marks; ++ __u32 mnt_fsnotify_mask; ++ int mnt_id; ++ int mnt_group_id; ++ int mnt_expiry_mark; ++ struct hlist_head mnt_pins; ++ struct fs_pin mnt_umount; ++ struct dentry___2 *mnt_ex_mountpoint; ++}; ++ ++struct mnt_pcp { ++ int mnt_count; ++ int mnt_writers; ++}; ++ ++struct mountpoint { ++ struct hlist_node m_hash; ++ struct dentry___2 *m_dentry; ++ struct hlist_head m_list; ++ int m_count; ++}; ++ ++enum { ++ WALK_FOLLOW = 1, ++ WALK_MORE = 2, ++}; ++ ++struct word_at_a_time { ++ const long unsigned int one_bits; ++ const long unsigned int high_bits; ++}; ++ ++struct compat_flock { ++ short int l_type; ++ short int l_whence; ++ compat_off_t l_start; ++ compat_off_t l_len; ++ compat_pid_t l_pid; ++}; ++ ++struct compat_flock64 { ++ short int l_type; ++ short int l_whence; ++ compat_loff_t l_start; ++ compat_loff_t l_len; ++ compat_pid_t l_pid; ++}; ++ ++struct f_owner_ex { ++ int type; ++ __kernel_pid_t pid; ++}; ++ ++struct flock { ++ short int l_type; ++ short int l_whence; ++ __kernel_off_t l_start; ++ __kernel_off_t l_len; ++ __kernel_pid_t l_pid; ++}; ++ ++struct fiemap { ++ __u64 fm_start; ++ __u64 fm_length; ++ __u32 fm_flags; ++ __u32 fm_mapped_extents; ++ __u32 fm_extent_count; ++ __u32 fm_reserved; ++ struct fiemap_extent fm_extents[0]; ++}; ++ ++struct file_clone_range { ++ __s64 src_fd; ++ __u64 src_offset; ++ __u64 src_length; ++ __u64 dest_offset; ++}; ++ ++typedef int get_block_t(struct inode___2 *, sector_t, struct buffer_head *, int); ++ ++struct space_resv { ++ __s16 l_type; ++ __s16 l_whence; ++ __s64 l_start; ++ __s64 l_len; ++ __s32 l_sysid; ++ __u32 l_pid; ++ __s32 l_pad[4]; ++}; ++ ++struct linux_dirent64 { ++ u64 d_ino; ++ s64 d_off; ++ short unsigned int d_reclen; ++ unsigned char d_type; ++ char d_name[0]; ++}; ++ ++struct linux_dirent { ++ long unsigned int d_ino; ++ long unsigned int d_off; ++ short unsigned int d_reclen; ++ char d_name[1]; ++}; ++ ++struct getdents_callback { ++ struct dir_context ctx; ++ struct linux_dirent *current_dir; ++ struct linux_dirent *previous; ++ int count; ++ int error; ++}; ++ ++struct getdents_callback64 { ++ struct dir_context ctx; ++ struct linux_dirent64 *current_dir; ++ struct linux_dirent64 *previous; ++ int count; ++ int error; ++}; ++ ++struct compat_old_linux_dirent { ++ compat_ulong_t d_ino; ++ compat_ulong_t d_offset; ++ short unsigned int d_namlen; ++ char d_name[1]; ++}; ++ ++struct compat_readdir_callback { ++ struct dir_context ctx; ++ struct compat_old_linux_dirent *dirent; ++ int result; ++}; ++ ++struct compat_linux_dirent { ++ compat_ulong_t d_ino; ++ compat_ulong_t d_off; ++ short unsigned int d_reclen; ++ char d_name[1]; ++}; ++ ++struct compat_getdents_callback { ++ struct dir_context ctx; ++ struct compat_linux_dirent *current_dir; ++ struct compat_linux_dirent *previous; ++ int count; ++ int error; ++}; ++ ++typedef struct { ++ long unsigned int fds_bits[16]; ++} __kernel_fd_set; ++ ++typedef __kernel_fd_set fd_set; ++ ++struct poll_table_entry { ++ struct file *filp; ++ __poll_t key; ++ wait_queue_entry_t wait; ++ wait_queue_head_t *wait_address; ++}; ++ ++struct poll_table_page; ++ ++struct poll_wqueues { ++ poll_table pt; ++ struct poll_table_page *table; ++ struct task_struct *polling_task; ++ int triggered; ++ int error; ++ int inline_index; ++ struct poll_table_entry inline_entries[9]; ++}; ++ ++struct poll_table_page { ++ struct poll_table_page *next; ++ struct poll_table_entry *entry; ++ struct poll_table_entry entries[0]; ++}; ++ ++typedef struct { ++ long unsigned int *in; ++ long unsigned int *out; ++ long unsigned int *ex; ++ long unsigned int *res_in; ++ long unsigned int *res_out; ++ long unsigned int *res_ex; ++} fd_set_bits; ++ ++struct poll_list { ++ struct poll_list *next; ++ int len; ++ struct pollfd entries[0]; ++}; ++ ++struct compat_sel_arg_struct { ++ compat_ulong_t n; ++ compat_uptr_t inp; ++ compat_uptr_t outp; ++ compat_uptr_t exp; ++ compat_uptr_t tvp; ++}; ++ ++enum dentry_d_lock_class { ++ DENTRY_D_LOCK_NORMAL = 0, ++ DENTRY_D_LOCK_NESTED = 1, ++}; ++ ++struct external_name { ++ union { ++ atomic_t count; ++ struct callback_head head; ++ } u; ++ unsigned char name[0]; ++}; ++ ++enum d_walk_ret { ++ D_WALK_CONTINUE = 0, ++ D_WALK_QUIT = 1, ++ D_WALK_NORETRY = 2, ++ D_WALK_SKIP = 3, ++}; ++ ++struct check_mount { ++ struct vfsmount___2 *mnt; ++ unsigned int mounted; ++}; ++ ++struct select_data { ++ struct dentry___2 *start; ++ struct list_head dispose; ++ int found; ++}; ++ ++enum file_time_flags { ++ S_ATIME = 1, ++ S_MTIME = 2, ++ S_CTIME = 4, ++ S_VERSION = 8, ++}; ++ ++struct proc_mounts { ++ struct mnt_namespace *ns; ++ struct path___2 root; ++ int (*show)(struct seq_file___2 *, struct vfsmount___2 *); ++ void *cached_mount; ++ u64 cached_event; ++ loff_t cached_index; ++}; ++ ++enum umount_tree_flags { ++ UMOUNT_SYNC = 1, ++ UMOUNT_PROPAGATE = 2, ++ UMOUNT_CONNECTED = 4, ++}; ++ ++struct simple_transaction_argresp { ++ ssize_t size; ++ char data[0]; ++}; ++ ++struct simple_attr { ++ int (*get)(void *, u64 *); ++ int (*set)(void *, u64); ++ char get_buf[24]; ++ char set_buf[24]; ++ void *data; ++ const char *fmt; ++ struct mutex mutex; ++}; ++ ++struct wb_completion { ++ atomic_t cnt; ++}; ++ ++struct wb_writeback_work { ++ long int nr_pages; ++ struct super_block___2 *sb; ++ enum writeback_sync_modes sync_mode; ++ unsigned int tagged_writepages: 1; ++ unsigned int for_kupdate: 1; ++ unsigned int range_cyclic: 1; ++ unsigned int for_background: 1; ++ unsigned int for_sync: 1; ++ unsigned int auto_free: 1; ++ enum wb_reason reason; ++ struct list_head list; ++ struct wb_completion *done; ++}; ++ ++struct trace_event_raw_writeback_dirty_page { ++ struct trace_entry ent; ++ char name[32]; ++ long unsigned int ino; ++ long unsigned int index; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_dirty_inode_template { ++ struct trace_entry ent; ++ char name[32]; ++ long unsigned int ino; ++ long unsigned int state; ++ long unsigned int flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_write_inode_template { ++ struct trace_entry ent; ++ char name[32]; ++ long unsigned int ino; ++ int sync_mode; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_work_class { ++ struct trace_entry ent; ++ char name[32]; ++ long int nr_pages; ++ dev_t sb_dev; ++ int sync_mode; ++ int for_kupdate; ++ int range_cyclic; ++ int for_background; ++ int reason; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_pages_written { ++ struct trace_entry ent; ++ long int pages; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_class { ++ struct trace_entry ent; ++ char name[32]; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_bdi_register { ++ struct trace_entry ent; ++ char name[32]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_wbc_class { ++ struct trace_entry ent; ++ char name[32]; ++ long int nr_to_write; ++ long int pages_skipped; ++ int sync_mode; ++ int for_kupdate; ++ int for_background; ++ int for_reclaim; ++ int range_cyclic; ++ long int range_start; ++ long int range_end; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_queue_io { ++ struct trace_entry ent; ++ char name[32]; ++ long unsigned int older; ++ long int age; ++ int moved; ++ int reason; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_global_dirty_state { ++ struct trace_entry ent; ++ long unsigned int nr_dirty; ++ long unsigned int nr_writeback; ++ long unsigned int nr_unstable; ++ long unsigned int background_thresh; ++ long unsigned int dirty_thresh; ++ long unsigned int dirty_limit; ++ long unsigned int nr_dirtied; ++ long unsigned int nr_written; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_bdi_dirty_ratelimit { ++ struct trace_entry ent; ++ char bdi[32]; ++ long unsigned int write_bw; ++ long unsigned int avg_write_bw; ++ long unsigned int dirty_rate; ++ long unsigned int dirty_ratelimit; ++ long unsigned int task_ratelimit; ++ long unsigned int balanced_dirty_ratelimit; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_balance_dirty_pages { ++ struct trace_entry ent; ++ char bdi[32]; ++ long unsigned int limit; ++ long unsigned int setpoint; ++ long unsigned int dirty; ++ long unsigned int bdi_setpoint; ++ long unsigned int bdi_dirty; ++ long unsigned int dirty_ratelimit; ++ long unsigned int task_ratelimit; ++ unsigned int dirtied; ++ unsigned int dirtied_pause; ++ long unsigned int paused; ++ long int pause; ++ long unsigned int period; ++ long int think; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_sb_inodes_requeue { ++ struct trace_entry ent; ++ char name[32]; ++ long unsigned int ino; ++ long unsigned int state; ++ long unsigned int dirtied_when; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_congest_waited_template { ++ struct trace_entry ent; ++ unsigned int usec_timeout; ++ unsigned int usec_delayed; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_single_inode_template { ++ struct trace_entry ent; ++ char name[32]; ++ long unsigned int ino; ++ long unsigned int state; ++ long unsigned int dirtied_when; ++ long unsigned int writeback_index; ++ long int nr_to_write; ++ long unsigned int wrote; ++ unsigned int cgroup_ino; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_writeback_inode_template { ++ struct trace_entry ent; ++ dev_t dev; ++ long unsigned int ino; ++ long unsigned int state; ++ __u16 mode; ++ long unsigned int dirtied_when; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_writeback_dirty_page {}; ++ ++struct trace_event_data_offsets_writeback_dirty_inode_template {}; ++ ++struct trace_event_data_offsets_writeback_write_inode_template {}; ++ ++struct trace_event_data_offsets_writeback_work_class {}; ++ ++struct trace_event_data_offsets_writeback_pages_written {}; ++ ++struct trace_event_data_offsets_writeback_class {}; ++ ++struct trace_event_data_offsets_writeback_bdi_register {}; ++ ++struct trace_event_data_offsets_wbc_class {}; ++ ++struct trace_event_data_offsets_writeback_queue_io {}; ++ ++struct trace_event_data_offsets_global_dirty_state {}; ++ ++struct trace_event_data_offsets_bdi_dirty_ratelimit {}; ++ ++struct trace_event_data_offsets_balance_dirty_pages {}; ++ ++struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; ++ ++struct trace_event_data_offsets_writeback_congest_waited_template {}; ++ ++struct trace_event_data_offsets_writeback_single_inode_template {}; ++ ++struct trace_event_data_offsets_writeback_inode_template {}; ++ ++struct inode_switch_wbs_context { ++ struct inode___2 *inode; ++ struct bdi_writeback *new_wb; ++ struct callback_head callback_head; ++ struct work_struct work; ++}; ++ ++struct splice_desc { ++ size_t total_len; ++ unsigned int len; ++ unsigned int flags; ++ union { ++ void *userptr; ++ struct file___2 *file; ++ void *data; ++ } u; ++ loff_t pos; ++ loff_t *opos; ++ size_t num_spliced; ++ bool need_wakeup; ++}; ++ ++typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); ++ ++typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); ++ ++struct compat_utimbuf { ++ compat_time_t actime; ++ compat_time_t modtime; ++}; ++ ++typedef int __kernel_daddr_t; ++ ++struct ustat { ++ __kernel_daddr_t f_tfree; ++ __kernel_ino_t f_tinode; ++ char f_fname[6]; ++ char f_fpack[6]; ++}; ++ ++typedef s32 compat_daddr_t; ++ ++typedef __kernel_fsid_t compat_fsid_t; ++ ++struct compat_statfs { ++ int f_type; ++ int f_bsize; ++ int f_blocks; ++ int f_bfree; ++ int f_bavail; ++ int f_files; ++ int f_ffree; ++ compat_fsid_t f_fsid; ++ int f_namelen; ++ int f_frsize; ++ int f_flags; ++ int f_spare[4]; ++}; ++ ++struct compat_ustat { ++ compat_daddr_t f_tfree; ++ compat_ino_t f_tinode; ++ char f_fname[6]; ++ char f_fpack[6]; ++}; ++ ++struct statfs { ++ __kernel_long_t f_type; ++ __kernel_long_t f_bsize; ++ __kernel_long_t f_blocks; ++ __kernel_long_t f_bfree; ++ __kernel_long_t f_bavail; ++ __kernel_long_t f_files; ++ __kernel_long_t f_ffree; ++ __kernel_fsid_t f_fsid; ++ __kernel_long_t f_namelen; ++ __kernel_long_t f_frsize; ++ __kernel_long_t f_flags; ++ __kernel_long_t f_spare[4]; ++}; ++ ++struct statfs64 { ++ __kernel_long_t f_type; ++ __kernel_long_t f_bsize; ++ __u64 f_blocks; ++ __u64 f_bfree; ++ __u64 f_bavail; ++ __u64 f_files; ++ __u64 f_ffree; ++ __kernel_fsid_t f_fsid; ++ __kernel_long_t f_namelen; ++ __kernel_long_t f_frsize; ++ __kernel_long_t f_flags; ++ __kernel_long_t f_spare[4]; ++}; ++ ++struct compat_statfs64___2 { ++ __u32 f_type; ++ __u32 f_bsize; ++ __u64 f_blocks; ++ __u64 f_bfree; ++ __u64 f_bavail; ++ __u64 f_files; ++ __u64 f_ffree; ++ __kernel_fsid_t f_fsid; ++ __u32 f_namelen; ++ __u32 f_frsize; ++ __u32 f_flags; ++ __u32 f_spare[4]; ++} __attribute__((packed)); ++ ++typedef struct ns_common___2 *ns_get_path_helper_t(void *); ++ ++struct ns_get_path_task_args { ++ const struct proc_ns_operations___2 *ns_ops; ++ struct task_struct___2 *task; ++}; ++ ++struct dax_device; ++ ++struct iomap___2 { ++ u64 addr; ++ loff_t offset; ++ u64 length; ++ u16 type; ++ u16 flags; ++ struct block_device *bdev; ++ struct dax_device *dax_dev; ++ void *inline_data; ++ void *private; ++ void (*page_done)(struct inode___2 *, loff_t, unsigned int, struct page *, struct iomap___2 *); ++}; ++ ++struct bh_lru { ++ struct buffer_head *bhs[16]; ++}; ++ ++struct bh_accounting { ++ int nr; ++ int ratelimit; ++}; ++ ++enum { ++ DISK_EVENT_MEDIA_CHANGE = 1, ++ DISK_EVENT_EJECT_REQUEST = 2, ++}; ++ ++struct badblocks { ++ struct device *dev; ++ int count; ++ int unacked_exist; ++ int shift; ++ u64 *page; ++ int changed; ++ seqlock_t lock; ++ sector_t sector; ++ sector_t size; ++}; ++ ++enum { ++ BIOSET_NEED_BVECS = 1, ++ BIOSET_NEED_RESCUER = 2, ++}; ++ ++struct bdev_inode { ++ struct block_device bdev; ++ struct inode___2 vfs_inode; ++}; ++ ++struct blkdev_dio { ++ union { ++ struct kiocb *iocb; ++ struct task_struct *waiter; ++ }; ++ size_t size; ++ atomic_t ref; ++ bool multi_bio: 1; ++ bool should_dirty: 1; ++ bool is_sync: 1; ++ struct bio bio; ++}; ++ ++struct bd_holder_disk { ++ struct list_head list; ++ struct gendisk *disk; ++ int refcnt; ++}; ++ ++typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); ++ ++typedef void dio_submit_t(struct bio *, struct inode___2 *, loff_t); ++ ++enum { ++ DIO_LOCKING = 1, ++ DIO_SKIP_HOLES = 2, ++}; ++ ++struct dio_submit { ++ struct bio *bio; ++ unsigned int blkbits; ++ unsigned int blkfactor; ++ unsigned int start_zero_done; ++ int pages_in_io; ++ sector_t block_in_file; ++ unsigned int blocks_available; ++ int reap_counter; ++ sector_t final_block_in_request; ++ int boundary; ++ get_block_t *get_block; ++ dio_submit_t *submit_io; ++ loff_t logical_offset_in_bio; ++ sector_t final_block_in_bio; ++ sector_t next_block_for_io; ++ struct page___2 *cur_page; ++ unsigned int cur_page_offset; ++ unsigned int cur_page_len; ++ sector_t cur_page_block; ++ loff_t cur_page_fs_offset; ++ struct iov_iter *iter; ++ unsigned int head; ++ unsigned int tail; ++ size_t from; ++ size_t to; ++}; ++ ++struct dio { ++ int flags; ++ int op; ++ int op_flags; ++ blk_qc_t bio_cookie; ++ struct gendisk *bio_disk; ++ struct inode___2 *inode; ++ loff_t i_size; ++ dio_iodone_t *end_io; ++ void *private; ++ spinlock_t bio_lock; ++ int page_errors; ++ int is_async; ++ bool defer_completion; ++ bool should_dirty; ++ int io_error; ++ long unsigned int refcount; ++ struct bio *bio_list; ++ struct task_struct___2 *waiter; ++ struct kiocb *iocb; ++ ssize_t result; ++ union { ++ struct page___2 *pages[64]; ++ struct work_struct complete_work; ++ }; ++ long: 64; ++}; ++ ++struct mpage_readpage_args { ++ struct bio *bio; ++ struct page *page; ++ unsigned int nr_pages; ++ bool is_readahead; ++ sector_t last_block_in_bio; ++ struct buffer_head map_bh; ++ long unsigned int first_logical_block; ++ get_block_t *get_block; ++}; ++ ++struct mpage_data { ++ struct bio *bio; ++ sector_t last_block_in_bio; ++ get_block_t *get_block; ++ unsigned int use_writepage; ++}; ++ ++typedef u32 nlink_t; ++ ++typedef int (*proc_write_t)(struct file___2 *, char *, size_t); ++ ++struct proc_dir_entry { ++ atomic_t in_use; ++ refcount_t refcnt; ++ struct list_head pde_openers; ++ spinlock_t pde_unload_lock; ++ struct completion *pde_unload_completion; ++ const struct inode_operations___2 *proc_iops; ++ const struct file_operations___2 *proc_fops; ++ const struct dentry_operations *proc_dops; ++ union { ++ const struct seq_operations___2 *seq_ops; ++ int (*single_show)(struct seq_file___2 *, void *); ++ }; ++ proc_write_t write; ++ void *data; ++ unsigned int state_size; ++ unsigned int low_ino; ++ nlink_t nlink; ++ kuid_t uid; ++ kgid_t gid; ++ loff_t size; ++ struct proc_dir_entry *parent; ++ struct rb_root subdir; ++ struct rb_node subdir_node; ++ char *name; ++ umode_t mode; ++ u8 namelen; ++ char inline_name[0]; ++}; ++ ++union proc_op { ++ int (*proc_get_link)(struct dentry___2 *, struct path___2 *); ++ int (*proc_show)(struct seq_file___2 *, struct pid_namespace *, struct pid *, struct task_struct___2 *); ++}; ++ ++struct proc_inode { ++ struct pid *pid; ++ unsigned int fd; ++ union proc_op op; ++ struct proc_dir_entry *pde; ++ struct ctl_table_header *sysctl; ++ struct ctl_table *sysctl_entry; ++ struct hlist_node sysctl_inodes; ++ const struct proc_ns_operations *ns_ops; ++ struct inode___2 vfs_inode; ++}; ++ ++struct proc_fs_info { ++ int flag; ++ const char *str; ++}; ++ ++struct file_handle { ++ __u32 handle_bytes; ++ int handle_type; ++ unsigned char f_handle[0]; ++}; ++ ++struct inotify_inode_mark { ++ struct fsnotify_mark fsn_mark; ++ int wd; ++}; ++ ++struct dnotify_struct { ++ struct dnotify_struct *dn_next; ++ __u32 dn_mask; ++ int dn_fd; ++ struct file___2 *dn_filp; ++ fl_owner_t dn_owner; ++}; ++ ++struct dnotify_mark { ++ struct fsnotify_mark fsn_mark; ++ struct dnotify_struct *dn; ++}; ++ ++struct inotify_event_info { ++ struct fsnotify_event fse; ++ int wd; ++ u32 sync_cookie; ++ int name_len; ++ char name[0]; ++}; ++ ++struct inotify_event { ++ __s32 wd; ++ __u32 mask; ++ __u32 cookie; ++ __u32 len; ++ char name[0]; ++}; ++ ++typedef int (*dev_page_fault_t___3)(struct vm_area_struct___2 *, long unsigned int, const struct page *, unsigned int, pmd_t *); ++ ++struct fanotify_event_info { ++ struct fsnotify_event fse; ++ struct path___2 path; ++ struct pid___2 *tgid; ++}; ++ ++struct fanotify_perm_event_info { ++ struct fanotify_event_info fae; ++ int response; ++ int fd; ++}; ++ ++struct fanotify_event_metadata { ++ __u32 event_len; ++ __u8 vers; ++ __u8 reserved; ++ __u16 metadata_len; ++ __u64 mask; ++ __s32 fd; ++ __s32 pid; ++}; ++ ++struct fanotify_response { ++ __s32 fd; ++ __u32 response; ++}; ++ ++struct epoll_event { ++ __poll_t events; ++ __u64 data; ++}; ++ ++struct epoll_filefd { ++ struct file *file; ++ int fd; ++} __attribute__((packed)); ++ ++struct nested_call_node { ++ struct list_head llink; ++ void *cookie; ++ void *ctx; ++}; ++ ++struct nested_calls { ++ struct list_head tasks_call_list; ++ spinlock_t lock; ++}; ++ ++struct eventpoll; ++ ++struct epitem { ++ union { ++ struct rb_node rbn; ++ struct callback_head rcu; ++ }; ++ struct list_head rdllink; ++ struct epitem *next; ++ struct epoll_filefd ffd; ++ int nwait; ++ struct list_head pwqlist; ++ struct eventpoll *ep; ++ struct list_head fllink; ++ struct wakeup_source *ws; ++ struct epoll_event event; ++}; ++ ++struct eventpoll { ++ struct mutex mtx; ++ wait_queue_head_t wq; ++ wait_queue_head_t poll_wait; ++ struct list_head rdllist; ++ struct rb_root_cached rbr; ++ struct epitem *ovflist; ++ struct wakeup_source *ws; ++ struct user_struct *user; ++ struct file *file; ++ u64 gen; ++ unsigned int napi_id; ++}; ++ ++struct eppoll_entry { ++ struct list_head llink; ++ struct epitem *base; ++ wait_queue_entry_t wait; ++ wait_queue_head_t *whead; ++}; ++ ++struct ep_pqueue { ++ poll_table pt; ++ struct epitem *epi; ++}; ++ ++struct ep_send_events_data { ++ int maxevents; ++ struct epoll_event *events; ++ int res; ++}; ++ ++struct signalfd_siginfo { ++ __u32 ssi_signo; ++ __s32 ssi_errno; ++ __s32 ssi_code; ++ __u32 ssi_pid; ++ __u32 ssi_uid; ++ __s32 ssi_fd; ++ __u32 ssi_tid; ++ __u32 ssi_band; ++ __u32 ssi_overrun; ++ __u32 ssi_trapno; ++ __s32 ssi_status; ++ __s32 ssi_int; ++ __u64 ssi_ptr; ++ __u64 ssi_utime; ++ __u64 ssi_stime; ++ __u64 ssi_addr; ++ __u16 ssi_addr_lsb; ++ __u16 __pad2; ++ __s32 ssi_syscall; ++ __u64 ssi_call_addr; ++ __u32 ssi_arch; ++ __u8 __pad[28]; ++}; ++ ++struct signalfd_ctx { ++ sigset_t sigmask; ++}; ++ ++struct timerfd_ctx { ++ union { ++ struct hrtimer tmr; ++ struct alarm alarm; ++ } t; ++ ktime_t tintv; ++ ktime_t moffs; ++ wait_queue_head_t wqh; ++ u64 ticks; ++ int clockid; ++ short unsigned int expired; ++ short unsigned int settime_flags; ++ struct callback_head rcu; ++ struct list_head clist; ++ spinlock_t cancel_lock; ++ bool might_cancel; ++}; ++ ++struct eventfd_ctx___2 { ++ struct kref kref; ++ wait_queue_head_t wqh; ++ __u64 count; ++ unsigned int flags; ++}; ++ ++enum userfaultfd_state { ++ UFFD_STATE_WAIT_API = 0, ++ UFFD_STATE_RUNNING = 1, ++}; ++ ++struct userfaultfd_ctx { ++ wait_queue_head_t fault_pending_wqh; ++ wait_queue_head_t fault_wqh; ++ wait_queue_head_t fd_wqh; ++ wait_queue_head_t event_wqh; ++ struct seqcount refile_seq; ++ atomic_t refcount; ++ unsigned int flags; ++ unsigned int features; ++ enum userfaultfd_state state; ++ bool released; ++ bool mmap_changing; ++ struct mm_struct___2 *mm; ++}; ++ ++struct uffd_msg { ++ __u8 event; ++ __u8 reserved1; ++ __u16 reserved2; ++ __u32 reserved3; ++ union { ++ struct { ++ __u64 flags; ++ __u64 address; ++ union { ++ __u32 ptid; ++ } feat; ++ } pagefault; ++ struct { ++ __u32 ufd; ++ } fork; ++ struct { ++ __u64 from; ++ __u64 to; ++ __u64 len; ++ } remap; ++ struct { ++ __u64 start; ++ __u64 end; ++ } remove; ++ struct { ++ __u64 reserved1; ++ __u64 reserved2; ++ __u64 reserved3; ++ } reserved; ++ } arg; ++}; ++ ++struct uffdio_api { ++ __u64 api; ++ __u64 features; ++ __u64 ioctls; ++}; ++ ++struct uffdio_range { ++ __u64 start; ++ __u64 len; ++}; ++ ++struct uffdio_register { ++ struct uffdio_range range; ++ __u64 mode; ++ __u64 ioctls; ++}; ++ ++struct uffdio_copy { ++ __u64 dst; ++ __u64 src; ++ __u64 len; ++ __u64 mode; ++ __s64 copy; ++}; ++ ++struct uffdio_zeropage { ++ struct uffdio_range range; ++ __u64 mode; ++ __s64 zeropage; ++}; ++ ++struct userfaultfd_fork_ctx { ++ struct userfaultfd_ctx *orig; ++ struct userfaultfd_ctx *new; ++ struct list_head list; ++}; ++ ++struct userfaultfd_unmap_ctx { ++ struct userfaultfd_ctx *ctx; ++ long unsigned int start; ++ long unsigned int end; ++ struct list_head list; ++}; ++ ++struct userfaultfd_wait_queue { ++ struct uffd_msg msg; ++ wait_queue_entry_t wq; ++ struct userfaultfd_ctx *ctx; ++ bool waken; ++}; ++ ++struct userfaultfd_wake_range { ++ long unsigned int start; ++ long unsigned int len; ++}; ++ ++struct kioctx; ++ ++struct kioctx_table { ++ struct callback_head rcu; ++ unsigned int nr; ++ struct kioctx *table[0]; ++}; ++ ++typedef __kernel_ulong_t aio_context_t; ++ ++enum { ++ IOCB_CMD_PREAD = 0, ++ IOCB_CMD_PWRITE = 1, ++ IOCB_CMD_FSYNC = 2, ++ IOCB_CMD_FDSYNC = 3, ++ IOCB_CMD_POLL = 5, ++ IOCB_CMD_NOOP = 6, ++ IOCB_CMD_PREADV = 7, ++ IOCB_CMD_PWRITEV = 8, ++}; ++ ++struct io_event { ++ __u64 data; ++ __u64 obj; ++ __s64 res; ++ __s64 res2; ++}; ++ ++struct iocb { ++ __u64 aio_data; ++ __u32 aio_key; ++ __kernel_rwf_t aio_rw_flags; ++ __u16 aio_lio_opcode; ++ __s16 aio_reqprio; ++ __u32 aio_fildes; ++ __u64 aio_buf; ++ __u64 aio_nbytes; ++ __s64 aio_offset; ++ __u64 aio_reserved2; ++ __u32 aio_flags; ++ __u32 aio_resfd; ++}; ++ ++typedef compat_ulong_t compat_aio_context_t; ++ ++typedef int kiocb_cancel_fn(struct kiocb *); ++ ++struct aio_ring { ++ unsigned int id; ++ unsigned int nr; ++ unsigned int head; ++ unsigned int tail; ++ unsigned int magic; ++ unsigned int compat_features; ++ unsigned int incompat_features; ++ unsigned int header_length; ++ struct io_event io_events[0]; ++}; ++ ++struct kioctx_cpu; ++ ++struct ctx_rq_wait; ++ ++struct kioctx { ++ struct percpu_ref users; ++ atomic_t dead; ++ struct percpu_ref reqs; ++ long unsigned int user_id; ++ struct kioctx_cpu *cpu; ++ unsigned int req_batch; ++ unsigned int max_reqs; ++ unsigned int nr_events; ++ long unsigned int mmap_base; ++ long unsigned int mmap_size; ++ struct page **ring_pages; ++ long int nr_pages; ++ struct rcu_work free_rwork; ++ struct ctx_rq_wait *rq_wait; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct { ++ atomic_t reqs_available; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ }; ++ struct { ++ spinlock_t ctx_lock; ++ struct list_head active_reqs; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ }; ++ struct { ++ struct mutex ring_lock; ++ wait_queue_head_t wait; ++ long: 64; ++ }; ++ struct { ++ unsigned int tail; ++ unsigned int completed_events; ++ spinlock_t completion_lock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ }; ++ struct page *internal_pages[8]; ++ struct file___2 *aio_ring_file; ++ unsigned int id; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct kioctx_cpu { ++ unsigned int reqs_available; ++}; ++ ++struct ctx_rq_wait { ++ struct completion comp; ++ atomic_t count; ++}; ++ ++struct fsync_iocb { ++ struct file___2 *file; ++ struct work_struct work; ++ bool datasync; ++ struct cred *creds; ++}; ++ ++struct poll_iocb { ++ struct file___2 *file; ++ struct wait_queue_head *head; ++ __poll_t events; ++ bool done; ++ bool cancelled; ++ struct wait_queue_entry wait; ++ struct work_struct work; ++}; ++ ++struct aio_kiocb { ++ union { ++ struct file___2 *ki_filp; ++ struct kiocb rw; ++ struct fsync_iocb fsync; ++ struct poll_iocb poll; ++ }; ++ struct kioctx *ki_ctx; ++ kiocb_cancel_fn *ki_cancel; ++ struct io_event ki_res; ++ struct list_head ki_list; ++ refcount_t ki_refcnt; ++ struct eventfd_ctx *ki_eventfd; ++}; ++ ++struct aio_poll_table { ++ struct poll_table_struct pt; ++ struct aio_kiocb *iocb; ++ int error; ++}; ++ ++struct __aio_sigset { ++ const sigset_t *sigmask; ++ size_t sigsetsize; ++}; ++ ++struct __compat_aio_sigset { ++ compat_sigset_t *sigmask; ++ compat_size_t sigsetsize; ++}; ++ ++struct iomap_ops { ++ int (*iomap_begin)(struct inode___2 *, loff_t, loff_t, unsigned int, struct iomap___2 *); ++ int (*iomap_end)(struct inode___2 *, loff_t, loff_t, ssize_t, unsigned int, struct iomap___2 *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct trace_event_raw_dax_pmd_fault_class { ++ struct trace_entry ent; ++ long unsigned int ino; ++ long unsigned int vm_start; ++ long unsigned int vm_end; ++ long unsigned int vm_flags; ++ long unsigned int address; ++ long unsigned int pgoff; ++ long unsigned int max_pgoff; ++ dev_t dev; ++ unsigned int flags; ++ int result; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dax_pmd_load_hole_class { ++ struct trace_entry ent; ++ long unsigned int ino; ++ long unsigned int vm_flags; ++ long unsigned int address; ++ struct page *zero_page; ++ void *radix_entry; ++ dev_t dev; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dax_pmd_insert_mapping_class { ++ struct trace_entry ent; ++ long unsigned int ino; ++ long unsigned int vm_flags; ++ long unsigned int address; ++ long int length; ++ u64 pfn_val; ++ void *radix_entry; ++ dev_t dev; ++ int write; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dax_pte_fault_class { ++ struct trace_entry ent; ++ long unsigned int ino; ++ long unsigned int vm_flags; ++ long unsigned int address; ++ long unsigned int pgoff; ++ dev_t dev; ++ unsigned int flags; ++ int result; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dax_insert_mapping { ++ struct trace_entry ent; ++ long unsigned int ino; ++ long unsigned int vm_flags; ++ long unsigned int address; ++ void *radix_entry; ++ dev_t dev; ++ int write; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dax_writeback_range_class { ++ struct trace_entry ent; ++ long unsigned int ino; ++ long unsigned int start_index; ++ long unsigned int end_index; ++ dev_t dev; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dax_writeback_one { ++ struct trace_entry ent; ++ long unsigned int ino; ++ long unsigned int pgoff; ++ long unsigned int pglen; ++ dev_t dev; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_dax_pmd_fault_class {}; ++ ++struct trace_event_data_offsets_dax_pmd_load_hole_class {}; ++ ++struct trace_event_data_offsets_dax_pmd_insert_mapping_class {}; ++ ++struct trace_event_data_offsets_dax_pte_fault_class {}; ++ ++struct trace_event_data_offsets_dax_insert_mapping {}; ++ ++struct trace_event_data_offsets_dax_writeback_range_class {}; ++ ++struct trace_event_data_offsets_dax_writeback_one {}; ++ ++struct exceptional_entry_key { ++ struct address_space *mapping; ++ long unsigned int entry_start; ++}; ++ ++struct wait_exceptional_entry_queue { ++ wait_queue_entry_t wait; ++ struct exceptional_entry_key key; ++}; ++ ++struct flock64 { ++ short int l_type; ++ short int l_whence; ++ __kernel_loff_t l_start; ++ __kernel_loff_t l_len; ++ __kernel_pid_t l_pid; ++}; ++ ++struct trace_event_raw_locks_get_lock_context { ++ struct trace_entry ent; ++ long unsigned int i_ino; ++ dev_t s_dev; ++ unsigned char type; ++ struct file_lock_context *ctx; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_filelock_lock { ++ struct trace_entry ent; ++ struct file_lock *fl; ++ long unsigned int i_ino; ++ dev_t s_dev; ++ struct file_lock *fl_next; ++ fl_owner_t fl_owner; ++ unsigned int fl_pid; ++ unsigned int fl_flags; ++ unsigned char fl_type; ++ loff_t fl_start; ++ loff_t fl_end; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_filelock_lease { ++ struct trace_entry ent; ++ struct file_lock *fl; ++ long unsigned int i_ino; ++ dev_t s_dev; ++ struct file_lock *fl_next; ++ fl_owner_t fl_owner; ++ unsigned int fl_flags; ++ unsigned char fl_type; ++ long unsigned int fl_break_time; ++ long unsigned int fl_downgrade_time; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_generic_add_lease { ++ struct trace_entry ent; ++ long unsigned int i_ino; ++ int wcount; ++ int dcount; ++ int icount; ++ dev_t s_dev; ++ fl_owner_t fl_owner; ++ unsigned int fl_flags; ++ unsigned char fl_type; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_locks_get_lock_context {}; ++ ++struct trace_event_data_offsets_filelock_lock {}; ++ ++struct trace_event_data_offsets_filelock_lease {}; ++ ++struct trace_event_data_offsets_generic_add_lease {}; ++ ++struct file_lock_list_struct { ++ spinlock_t lock; ++ struct hlist_head hlist; ++}; ++ ++struct locks_iterator { ++ int li_cpu; ++ loff_t li_pos; ++}; ++ ++struct nfs_string { ++ unsigned int len; ++ const char *data; ++}; ++ ++struct nfs4_mount_data { ++ int version; ++ int flags; ++ int rsize; ++ int wsize; ++ int timeo; ++ int retrans; ++ int acregmin; ++ int acregmax; ++ int acdirmin; ++ int acdirmax; ++ struct nfs_string client_addr; ++ struct nfs_string mnt_path; ++ struct nfs_string hostname; ++ unsigned int host_addrlen; ++ struct sockaddr *host_addr; ++ int proto; ++ int auth_flavourlen; ++ int *auth_flavours; ++}; ++ ++struct compat_nfs_string { ++ compat_uint_t len; ++ compat_uptr_t data; ++}; ++ ++struct compat_nfs4_mount_data_v1 { ++ compat_int_t version; ++ compat_int_t flags; ++ compat_int_t rsize; ++ compat_int_t wsize; ++ compat_int_t timeo; ++ compat_int_t retrans; ++ compat_int_t acregmin; ++ compat_int_t acregmax; ++ compat_int_t acdirmin; ++ compat_int_t acdirmax; ++ struct compat_nfs_string client_addr; ++ struct compat_nfs_string mnt_path; ++ struct compat_nfs_string hostname; ++ compat_uint_t host_addrlen; ++ compat_uptr_t host_addr; ++ compat_int_t proto; ++ compat_int_t auth_flavourlen; ++ compat_uptr_t auth_flavours; ++}; ++ ++typedef u32 compat_caddr_t; ++ ++typedef int br_should_route_hook_t(struct sk_buff *); ++ ++struct ppp_idle { ++ __kernel_time_t xmit_idle; ++ __kernel_time_t recv_idle; ++}; ++ ++struct ppp_option_data { ++ __u8 *ptr; ++ __u32 length; ++ int transmit; ++}; ++ ++struct mtget { ++ long int mt_type; ++ long int mt_resid; ++ long int mt_dsreg; ++ long int mt_gstat; ++ long int mt_erreg; ++ __kernel_daddr_t mt_fileno; ++ __kernel_daddr_t mt_blkno; ++}; ++ ++struct mtpos { ++ long int mt_blkno; ++}; ++ ++struct fb_fix_screeninfo { ++ char id[16]; ++ long unsigned int smem_start; ++ __u32 smem_len; ++ __u32 type; ++ __u32 type_aux; ++ __u32 visual; ++ __u16 xpanstep; ++ __u16 ypanstep; ++ __u16 ywrapstep; ++ __u32 line_length; ++ long unsigned int mmio_start; ++ __u32 mmio_len; ++ __u32 accel; ++ __u16 capabilities; ++ __u16 reserved[2]; ++}; ++ ++struct fb_bitfield { ++ __u32 offset; ++ __u32 length; ++ __u32 msb_right; ++}; ++ ++struct fb_var_screeninfo { ++ __u32 xres; ++ __u32 yres; ++ __u32 xres_virtual; ++ __u32 yres_virtual; ++ __u32 xoffset; ++ __u32 yoffset; ++ __u32 bits_per_pixel; ++ __u32 grayscale; ++ struct fb_bitfield red; ++ struct fb_bitfield green; ++ struct fb_bitfield blue; ++ struct fb_bitfield transp; ++ __u32 nonstd; ++ __u32 activate; ++ __u32 height; ++ __u32 width; ++ __u32 accel_flags; ++ __u32 pixclock; ++ __u32 left_margin; ++ __u32 right_margin; ++ __u32 upper_margin; ++ __u32 lower_margin; ++ __u32 hsync_len; ++ __u32 vsync_len; ++ __u32 sync; ++ __u32 vmode; ++ __u32 rotate; ++ __u32 colorspace; ++ __u32 reserved[4]; ++}; ++ ++struct fb_cmap { ++ __u32 start; ++ __u32 len; ++ __u16 *red; ++ __u16 *green; ++ __u16 *blue; ++ __u16 *transp; ++}; ++ ++struct fb_copyarea { ++ __u32 dx; ++ __u32 dy; ++ __u32 width; ++ __u32 height; ++ __u32 sx; ++ __u32 sy; ++}; ++ ++struct fb_fillrect { ++ __u32 dx; ++ __u32 dy; ++ __u32 width; ++ __u32 height; ++ __u32 color; ++ __u32 rop; ++}; ++ ++struct fb_image { ++ __u32 dx; ++ __u32 dy; ++ __u32 width; ++ __u32 height; ++ __u32 fg_color; ++ __u32 bg_color; ++ __u8 depth; ++ const char *data; ++ struct fb_cmap cmap; ++}; ++ ++struct fbcurpos { ++ __u16 x; ++ __u16 y; ++}; ++ ++struct fb_cursor { ++ __u16 set; ++ __u16 enable; ++ __u16 rop; ++ const char *mask; ++ struct fbcurpos hot; ++ struct fb_image image; ++}; ++ ++enum backlight_type { ++ BACKLIGHT_RAW = 1, ++ BACKLIGHT_PLATFORM = 2, ++ BACKLIGHT_FIRMWARE = 3, ++ BACKLIGHT_TYPE_MAX = 4, ++}; ++ ++struct backlight_device; ++ ++struct fb_info; ++ ++struct backlight_ops { ++ unsigned int options; ++ int (*update_status)(struct backlight_device *); ++ int (*get_brightness)(struct backlight_device *); ++ int (*check_fb)(struct backlight_device *, struct fb_info *); ++}; ++ ++struct backlight_properties { ++ int brightness; ++ int max_brightness; ++ int power; ++ int fb_blank; ++ enum backlight_type type; ++ unsigned int state; ++}; ++ ++struct backlight_device { ++ struct backlight_properties props; ++ struct mutex update_lock; ++ struct mutex ops_lock; ++ const struct backlight_ops *ops; ++ struct notifier_block fb_notif; ++ struct list_head entry; ++ struct device dev; ++ bool fb_bl_on[32]; ++ int use_count; ++}; ++ ++struct fb_chroma { ++ __u32 redx; ++ __u32 greenx; ++ __u32 bluex; ++ __u32 whitex; ++ __u32 redy; ++ __u32 greeny; ++ __u32 bluey; ++ __u32 whitey; ++}; ++ ++struct fb_videomode; ++ ++struct fb_monspecs { ++ struct fb_chroma chroma; ++ struct fb_videomode *modedb; ++ __u8 manufacturer[4]; ++ __u8 monitor[14]; ++ __u8 serial_no[14]; ++ __u8 ascii[14]; ++ __u32 modedb_len; ++ __u32 model; ++ __u32 serial; ++ __u32 year; ++ __u32 week; ++ __u32 hfmin; ++ __u32 hfmax; ++ __u32 dclkmin; ++ __u32 dclkmax; ++ __u16 input; ++ __u16 dpms; ++ __u16 signal; ++ __u16 vfmin; ++ __u16 vfmax; ++ __u16 gamma; ++ __u16 gtf: 1; ++ __u16 misc; ++ __u8 version; ++ __u8 revision; ++ __u8 max_x; ++ __u8 max_y; ++}; ++ ++struct fb_pixmap { ++ u8 *addr; ++ u32 size; ++ u32 offset; ++ u32 buf_align; ++ u32 scan_align; ++ u32 access_align; ++ u32 flags; ++ u32 blit_x; ++ u32 blit_y; ++ void (*writeio)(struct fb_info *, void *, void *, unsigned int); ++ void (*readio)(struct fb_info *, void *, void *, unsigned int); ++}; ++ ++struct fb_deferred_io; ++ ++struct fb_ops; ++ ++struct fb_tile_ops; ++ ++struct apertures_struct; ++ ++struct fb_info { ++ atomic_t count; ++ int node; ++ int flags; ++ int fbcon_rotate_hint; ++ struct mutex lock; ++ struct mutex mm_lock; ++ struct fb_var_screeninfo var; ++ struct fb_fix_screeninfo fix; ++ struct fb_monspecs monspecs; ++ struct work_struct queue; ++ struct fb_pixmap pixmap; ++ struct fb_pixmap sprite; ++ struct fb_cmap cmap; ++ struct list_head modelist; ++ struct fb_videomode *mode; ++ struct backlight_device *bl_dev; ++ struct mutex bl_curve_mutex; ++ u8 bl_curve[128]; ++ struct delayed_work deferred_work; ++ struct fb_deferred_io *fbdefio; ++ struct fb_ops *fbops; ++ struct device *device; ++ struct device *dev; ++ int class_flag; ++ struct fb_tile_ops *tileops; ++ union { ++ char *screen_base; ++ char *screen_buffer; ++ }; ++ long unsigned int screen_size; ++ void *pseudo_palette; ++ u32 state; ++ void *fbcon_par; ++ void *par; ++ struct apertures_struct *apertures; ++ bool skip_vt_switch; ++}; ++ ++struct fb_videomode { ++ const char *name; ++ u32 refresh; ++ u32 xres; ++ u32 yres; ++ u32 pixclock; ++ u32 left_margin; ++ u32 right_margin; ++ u32 upper_margin; ++ u32 lower_margin; ++ u32 hsync_len; ++ u32 vsync_len; ++ u32 sync; ++ u32 vmode; ++ u32 flag; ++}; ++ ++struct fb_blit_caps { ++ u32 x; ++ u32 y; ++ u32 len; ++ u32 flags; ++}; ++ ++struct fb_deferred_io { ++ long unsigned int delay; ++ struct mutex lock; ++ struct list_head pagelist; ++ void (*first_io)(struct fb_info *); ++ void (*deferred_io)(struct fb_info *, struct list_head *); ++}; ++ ++struct fb_ops { ++ struct module___2 *owner; ++ int (*fb_open)(struct fb_info *, int); ++ int (*fb_release)(struct fb_info *, int); ++ ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); ++ ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); ++ int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); ++ int (*fb_set_par)(struct fb_info *); ++ int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); ++ int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); ++ int (*fb_blank)(int, struct fb_info *); ++ int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); ++ void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); ++ void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); ++ void (*fb_imageblit)(struct fb_info *, const struct fb_image *); ++ int (*fb_cursor)(struct fb_info *, struct fb_cursor *); ++ int (*fb_sync)(struct fb_info *); ++ int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); ++ int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); ++ int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); ++ void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); ++ void (*fb_destroy)(struct fb_info *); ++ int (*fb_debug_enter)(struct fb_info *); ++ int (*fb_debug_leave)(struct fb_info *); ++}; ++ ++struct fb_tilemap { ++ __u32 width; ++ __u32 height; ++ __u32 depth; ++ __u32 length; ++ const __u8 *data; ++}; ++ ++struct fb_tilerect { ++ __u32 sx; ++ __u32 sy; ++ __u32 width; ++ __u32 height; ++ __u32 index; ++ __u32 fg; ++ __u32 bg; ++ __u32 rop; ++}; ++ ++struct fb_tilearea { ++ __u32 sx; ++ __u32 sy; ++ __u32 dx; ++ __u32 dy; ++ __u32 width; ++ __u32 height; ++}; ++ ++struct fb_tileblit { ++ __u32 sx; ++ __u32 sy; ++ __u32 width; ++ __u32 height; ++ __u32 fg; ++ __u32 bg; ++ __u32 length; ++ __u32 *indices; ++}; ++ ++struct fb_tilecursor { ++ __u32 sx; ++ __u32 sy; ++ __u32 mode; ++ __u32 shape; ++ __u32 fg; ++ __u32 bg; ++}; ++ ++struct fb_tile_ops { ++ void (*fb_settile)(struct fb_info *, struct fb_tilemap *); ++ void (*fb_tilecopy)(struct fb_info *, struct fb_tilearea *); ++ void (*fb_tilefill)(struct fb_info *, struct fb_tilerect *); ++ void (*fb_tileblit)(struct fb_info *, struct fb_tileblit *); ++ void (*fb_tilecursor)(struct fb_info *, struct fb_tilecursor *); ++ int (*fb_get_tilemax)(struct fb_info *); ++}; ++ ++struct aperture { ++ resource_size_t base; ++ resource_size_t size; ++}; ++ ++struct apertures_struct { ++ unsigned int count; ++ struct aperture ranges[0]; ++}; ++ ++struct dmt_videomode { ++ u32 dmt_id; ++ u32 std_2byte_code; ++ u32 cvt_3byte_code; ++ const struct fb_videomode *mode; ++}; ++ ++enum v4l2_preemphasis { ++ V4L2_PREEMPHASIS_DISABLED = 0, ++ V4L2_PREEMPHASIS_50_uS = 1, ++ V4L2_PREEMPHASIS_75_uS = 2, ++}; ++ ++struct atalk_addr { ++ __be16 s_net; ++ __u8 s_node; ++}; ++ ++struct atalk_netrange { ++ __u8 nr_phase; ++ __be16 nr_firstnet; ++ __be16 nr_lastnet; ++}; ++ ++struct atalk_route { ++ struct net_device *dev; ++ struct atalk_addr target; ++ struct atalk_addr gateway; ++ int flags; ++ struct atalk_route *next; ++}; ++ ++struct atalk_iface { ++ struct net_device *dev; ++ struct atalk_addr address; ++ int status; ++ struct atalk_netrange nets; ++ struct atalk_iface *next; ++}; ++ ++struct datalink_proto; ++ ++struct sg_iovec { ++ void *iov_base; ++ size_t iov_len; ++}; ++ ++typedef struct sg_iovec sg_iovec_t; ++ ++struct sg_io_hdr { ++ int interface_id; ++ int dxfer_direction; ++ unsigned char cmd_len; ++ unsigned char mx_sb_len; ++ short unsigned int iovec_count; ++ unsigned int dxfer_len; ++ void *dxferp; ++ unsigned char *cmdp; ++ void *sbp; ++ unsigned int timeout; ++ unsigned int flags; ++ int pack_id; ++ void *usr_ptr; ++ unsigned char status; ++ unsigned char masked_status; ++ unsigned char msg_status; ++ unsigned char sb_len_wr; ++ short unsigned int host_status; ++ short unsigned int driver_status; ++ int resid; ++ unsigned int duration; ++ unsigned int info; ++}; ++ ++typedef struct sg_io_hdr sg_io_hdr_t; ++ ++struct sg_req_info { ++ char req_state; ++ char orphan; ++ char sg_io_owned; ++ char problem; ++ int pack_id; ++ void *usr_ptr; ++ unsigned int duration; ++ int unused; ++}; ++ ++typedef struct sg_req_info sg_req_info_t; ++ ++struct atm_blli { ++ unsigned char l2_proto; ++ union { ++ struct { ++ unsigned char mode; ++ unsigned char window; ++ } itu; ++ unsigned char user; ++ } l2; ++ unsigned char l3_proto; ++ union { ++ struct { ++ unsigned char mode; ++ unsigned char def_size; ++ unsigned char window; ++ } itu; ++ unsigned char user; ++ struct { ++ unsigned char term_type; ++ unsigned char fw_mpx_cap; ++ unsigned char bw_mpx_cap; ++ } h310; ++ struct { ++ unsigned char ipi; ++ unsigned char snap[5]; ++ } tr9577; ++ } l3; ++}; ++ ++struct atm_bhli { ++ unsigned char hl_type; ++ unsigned char hl_length; ++ unsigned char hl_info[8]; ++}; ++ ++struct atm_sap { ++ struct atm_bhli bhli; ++ struct atm_blli blli[3]; ++}; ++ ++struct atm_trafprm { ++ unsigned char traffic_class; ++ int max_pcr; ++ int pcr; ++ int min_pcr; ++ int max_cdv; ++ int max_sdu; ++ unsigned int icr; ++ unsigned int tbe; ++ unsigned int frtt: 24; ++ unsigned int rif: 4; ++ unsigned int rdf: 4; ++ unsigned int nrm_pres: 1; ++ unsigned int trm_pres: 1; ++ unsigned int adtf_pres: 1; ++ unsigned int cdf_pres: 1; ++ unsigned int nrm: 3; ++ unsigned int trm: 3; ++ unsigned int adtf: 10; ++ unsigned int cdf: 3; ++ unsigned int spare: 9; ++}; ++ ++struct atm_qos { ++ struct atm_trafprm txtp; ++ struct atm_trafprm rxtp; ++ unsigned char aal; ++}; ++ ++struct sockaddr_atmsvc { ++ short unsigned int sas_family; ++ struct { ++ unsigned char prv[20]; ++ char pub[13]; ++ char lij_type; ++ __u32 lij_id; ++ } sas_addr; ++}; ++ ++struct atm_cirange { ++ signed char vpi_bits; ++ signed char vci_bits; ++}; ++ ++struct k_atm_aal_stats { ++ atomic_t tx; ++ atomic_t tx_err; ++ atomic_t rx; ++ atomic_t rx_err; ++ atomic_t rx_drop; ++}; ++ ++struct k_atm_dev_stats { ++ struct k_atm_aal_stats aal0; ++ struct k_atm_aal_stats aal34; ++ struct k_atm_aal_stats aal5; ++}; ++ ++struct atm_dev; ++ ++struct atm_vcc { ++ struct sock sk; ++ long unsigned int flags; ++ short int vpi; ++ int vci; ++ long unsigned int aal_options; ++ long unsigned int atm_options; ++ struct atm_dev *dev; ++ struct atm_qos qos; ++ struct atm_sap sap; ++ void (*release_cb)(struct atm_vcc *); ++ void (*push)(struct atm_vcc *, struct sk_buff *); ++ void (*pop)(struct atm_vcc *, struct sk_buff *); ++ int (*push_oam)(struct atm_vcc *, void *); ++ int (*send)(struct atm_vcc *, struct sk_buff *); ++ void *dev_data; ++ void *proto_data; ++ struct k_atm_aal_stats *stats; ++ struct module___2 *owner; ++ short int itf; ++ struct sockaddr_atmsvc local; ++ struct sockaddr_atmsvc remote; ++ struct atm_vcc *session; ++ void *user_back; ++}; ++ ++struct atmdev_ops; ++ ++struct atmphy_ops; ++ ++struct atm_dev { ++ const struct atmdev_ops *ops; ++ const struct atmphy_ops *phy; ++ const char *type; ++ int number; ++ void *dev_data; ++ void *phy_data; ++ long unsigned int flags; ++ struct list_head local; ++ struct list_head lecs; ++ unsigned char esi[6]; ++ struct atm_cirange ci_range; ++ struct k_atm_dev_stats stats; ++ char signal; ++ int link_rate; ++ refcount_t refcnt; ++ spinlock_t lock; ++ struct proc_dir_entry *proc_entry; ++ char *proc_name; ++ struct device class_dev; ++ struct list_head dev_list; ++}; ++ ++struct atmdev_ops { ++ void (*dev_close)(struct atm_dev *); ++ int (*open)(struct atm_vcc *); ++ void (*close)(struct atm_vcc *); ++ int (*ioctl)(struct atm_dev *, unsigned int, void *); ++ int (*compat_ioctl)(struct atm_dev *, unsigned int, void *); ++ int (*getsockopt)(struct atm_vcc *, int, int, void *, int); ++ int (*setsockopt)(struct atm_vcc *, int, int, void *, unsigned int); ++ int (*send)(struct atm_vcc *, struct sk_buff *); ++ int (*send_oam)(struct atm_vcc *, void *, int); ++ void (*phy_put)(struct atm_dev *, unsigned char, long unsigned int); ++ unsigned char (*phy_get)(struct atm_dev *, long unsigned int); ++ int (*change_qos)(struct atm_vcc *, struct atm_qos *, int); ++ int (*proc_read)(struct atm_dev *, loff_t *, char *); ++ struct module___2 *owner; ++}; ++ ++struct atmphy_ops { ++ int (*start)(struct atm_dev *); ++ int (*ioctl)(struct atm_dev *, unsigned int, void *); ++ void (*interrupt)(struct atm_dev *); ++ int (*stop)(struct atm_dev *); ++}; ++ ++struct atm_tcp_ops { ++ int (*attach)(struct atm_vcc *, int); ++ int (*create_persistent)(int); ++ int (*remove_persistent)(int); ++ struct module___2 *owner; ++}; ++ ++typedef enum { ++ VIDEO_FORMAT_4_3 = 0, ++ VIDEO_FORMAT_16_9 = 1, ++ VIDEO_FORMAT_221_1 = 2, ++} video_format_t; ++ ++typedef struct { ++ int w; ++ int h; ++ video_format_t aspect_ratio; ++} video_size_t; ++ ++struct video_event { ++ __s32 type; ++ long int timestamp; ++ union { ++ video_size_t size; ++ unsigned int frame_rate; ++ unsigned char vsync_field; ++ } u; ++}; ++ ++struct video_still_picture { ++ char *iFrame; ++ __s32 size; ++}; ++ ++struct compat_video_event { ++ int32_t type; ++ compat_time_t timestamp; ++ union { ++ video_size_t size; ++ unsigned int frame_rate; ++ } u; ++}; ++ ++struct compat_video_still_picture { ++ compat_uptr_t iFrame; ++ int32_t size; ++}; ++ ++struct sg_io_hdr32 { ++ compat_int_t interface_id; ++ compat_int_t dxfer_direction; ++ unsigned char cmd_len; ++ unsigned char mx_sb_len; ++ short unsigned int iovec_count; ++ compat_uint_t dxfer_len; ++ compat_uint_t dxferp; ++ compat_uptr_t cmdp; ++ compat_uptr_t sbp; ++ compat_uint_t timeout; ++ compat_uint_t flags; ++ compat_int_t pack_id; ++ compat_uptr_t usr_ptr; ++ unsigned char status; ++ unsigned char masked_status; ++ unsigned char msg_status; ++ unsigned char sb_len_wr; ++ short unsigned int host_status; ++ short unsigned int driver_status; ++ compat_int_t resid; ++ compat_uint_t duration; ++ compat_uint_t info; ++}; ++ ++typedef struct sg_io_hdr32 sg_io_hdr32_t; ++ ++struct sg_iovec32 { ++ compat_uint_t iov_base; ++ compat_uint_t iov_len; ++}; ++ ++typedef struct sg_iovec32 sg_iovec32_t; ++ ++struct compat_sg_req_info { ++ char req_state; ++ char orphan; ++ char sg_io_owned; ++ char problem; ++ int pack_id; ++ compat_uptr_t usr_ptr; ++ unsigned int duration; ++ int unused; ++}; ++ ++struct sock_fprog32 { ++ short unsigned int len; ++ compat_caddr_t filter; ++}; ++ ++struct ppp_option_data32 { ++ compat_caddr_t ptr; ++ u32 length; ++ compat_int_t transmit; ++}; ++ ++struct ppp_idle32 { ++ compat_time_t xmit_idle; ++ compat_time_t recv_idle; ++}; ++ ++struct mtget32 { ++ compat_long_t mt_type; ++ compat_long_t mt_resid; ++ compat_long_t mt_dsreg; ++ compat_long_t mt_gstat; ++ compat_long_t mt_erreg; ++ compat_daddr_t mt_fileno; ++ compat_daddr_t mt_blkno; ++}; ++ ++struct mtpos32 { ++ compat_long_t mt_blkno; ++}; ++ ++struct serial_struct32 { ++ compat_int_t type; ++ compat_int_t line; ++ compat_uint_t port; ++ compat_int_t irq; ++ compat_int_t flags; ++ compat_int_t xmit_fifo_size; ++ compat_int_t custom_divisor; ++ compat_int_t baud_base; ++ short unsigned int close_delay; ++ char io_type; ++ char reserved_char[1]; ++ compat_int_t hub6; ++ short unsigned int closing_wait; ++ short unsigned int closing_wait2; ++ compat_uint_t iomem_base; ++ short unsigned int iomem_reg_shift; ++ unsigned int port_high; ++ compat_int_t reserved[1]; ++}; ++ ++typedef unsigned int __kernel_uid_t; ++ ++typedef unsigned int __kernel_gid_t; ++ ++struct elf_prpsinfo { ++ char pr_state; ++ char pr_sname; ++ char pr_zomb; ++ char pr_nice; ++ long unsigned int pr_flag; ++ __kernel_uid_t pr_uid; ++ __kernel_gid_t pr_gid; ++ pid_t pr_pid; ++ pid_t pr_ppid; ++ pid_t pr_pgrp; ++ pid_t pr_sid; ++ char pr_fname[16]; ++ char pr_psargs[80]; ++}; ++ ++typedef int user_regset_active_fn___2(struct task_struct___2 *, const struct user_regset *); ++ ++typedef int user_regset_get_fn___2(struct task_struct___2 *, const struct user_regset *, unsigned int, unsigned int, void *, void *); ++ ++typedef int user_regset_set_fn___2(struct task_struct___2 *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); ++ ++typedef int user_regset_writeback_fn___2(struct task_struct___2 *, const struct user_regset *, int); ++ ++typedef unsigned int user_regset_get_size_fn___2(struct task_struct___2 *, const struct user_regset *); ++ ++struct elf_thread_core_info___2 { ++ struct elf_thread_core_info___2 *next; ++ struct task_struct___2 *task; ++ struct elf_prstatus prstatus; ++ struct memelfnote notes[0]; ++}; ++ ++struct elf_note_info___2 { ++ struct elf_thread_core_info___2 *thread; ++ struct memelfnote psinfo; ++ struct memelfnote signote; ++ struct memelfnote auxv; ++ struct memelfnote files; ++ siginfo_t csigdata; ++ size_t size; ++ int thread_notes; ++}; ++ ++struct posix_acl_xattr_entry { ++ __le16 e_tag; ++ __le16 e_perm; ++ __le32 e_id; ++}; ++ ++struct posix_acl_xattr_header { ++ __le32 a_version; ++}; ++ ++struct core_name { ++ char *corename; ++ int used; ++ int size; ++}; ++ ++struct files_cgroup { ++ struct cgroup_subsys_state css; ++ struct page_counter open_handles; ++}; ++ ++struct iomap_page { ++ atomic_t read_count; ++ atomic_t write_count; ++ spinlock_t uptodate_lock; ++ long unsigned int uptodate[2]; ++}; ++ ++typedef int iomap_dio_end_io_t(struct kiocb *, ssize_t, unsigned int); ++ ++typedef loff_t (*iomap_actor_t)(struct inode___2 *, loff_t, loff_t, void *, struct iomap___2 *); ++ ++struct iomap_readpage_ctx { ++ struct page *cur_page; ++ bool cur_page_in_bio; ++ bool is_readahead; ++ struct bio *bio; ++ struct list_head *pages; ++}; ++ ++struct fiemap_ctx { ++ struct fiemap_extent_info *fi; ++ struct iomap___2 prev; ++}; ++ ++struct iomap_dio { ++ struct kiocb *iocb; ++ iomap_dio_end_io_t *end_io; ++ loff_t i_size; ++ loff_t size; ++ atomic_t ref; ++ unsigned int flags; ++ int error; ++ bool wait_for_completion; ++ union { ++ struct { ++ struct iov_iter *iter; ++ struct task_struct *waiter; ++ struct request_queue *last_queue; ++ blk_qc_t cookie; ++ } submit; ++ struct { ++ struct work_struct work; ++ } aio; ++ }; ++}; ++ ++struct iomap_swapfile_info { ++ struct iomap___2 iomap; ++ struct swap_info_struct *sis; ++ uint64_t lowest_ppage; ++ uint64_t highest_ppage; ++ long unsigned int nr_pages; ++ int nr_extents; ++}; ++ ++enum { ++ QIF_BLIMITS_B = 0, ++ QIF_SPACE_B = 1, ++ QIF_ILIMITS_B = 2, ++ QIF_INODES_B = 3, ++ QIF_BTIME_B = 4, ++ QIF_ITIME_B = 5, ++}; ++ ++enum { ++ DQF_ROOT_SQUASH_B = 0, ++ DQF_SYS_FILE_B = 16, ++ DQF_PRIVATE = 17, ++}; ++ ++typedef __kernel_uid32_t qid_t; ++ ++enum { ++ DQF_INFO_DIRTY_B = 17, ++}; ++ ++enum { ++ DQST_LOOKUPS = 0, ++ DQST_DROPS = 1, ++ DQST_READS = 2, ++ DQST_WRITES = 3, ++ DQST_CACHE_HITS = 4, ++ DQST_ALLOC_DQUOTS = 5, ++ DQST_FREE_DQUOTS = 6, ++ DQST_SYNCS = 7, ++ _DQST_DQSTAT_LAST = 8, ++}; ++ ++enum { ++ _DQUOT_USAGE_ENABLED = 0, ++ _DQUOT_LIMITS_ENABLED = 1, ++ _DQUOT_SUSPENDED = 2, ++ _DQUOT_STATE_FLAGS = 3, ++}; ++ ++struct quota_module_name { ++ int qm_fmt_id; ++ char *qm_mod_name; ++}; ++ ++struct dquot_warn { ++ struct super_block___2 *w_sb; ++ struct kqid w_dq_id; ++ short int w_type; ++}; ++ ++struct qtree_fmt_operations { ++ void (*mem2disk_dqblk)(void *, struct dquot___2 *); ++ void (*disk2mem_dqblk)(struct dquot___2 *, void *); ++ int (*is_id)(void *, struct dquot___2 *); ++}; ++ ++struct qtree_mem_dqinfo { ++ struct super_block___2 *dqi_sb; ++ int dqi_type; ++ unsigned int dqi_blocks; ++ unsigned int dqi_free_blk; ++ unsigned int dqi_free_entry; ++ unsigned int dqi_blocksize_bits; ++ unsigned int dqi_entry_size; ++ unsigned int dqi_usable_bs; ++ unsigned int dqi_qtree_depth; ++ const struct qtree_fmt_operations *dqi_ops; ++}; ++ ++struct v2_disk_dqheader { ++ __le32 dqh_magic; ++ __le32 dqh_version; ++}; ++ ++struct v2r0_disk_dqblk { ++ __le32 dqb_id; ++ __le32 dqb_ihardlimit; ++ __le32 dqb_isoftlimit; ++ __le32 dqb_curinodes; ++ __le32 dqb_bhardlimit; ++ __le32 dqb_bsoftlimit; ++ __le64 dqb_curspace; ++ __le64 dqb_btime; ++ __le64 dqb_itime; ++}; ++ ++struct v2r1_disk_dqblk { ++ __le32 dqb_id; ++ __le32 dqb_pad; ++ __le64 dqb_ihardlimit; ++ __le64 dqb_isoftlimit; ++ __le64 dqb_curinodes; ++ __le64 dqb_bhardlimit; ++ __le64 dqb_bsoftlimit; ++ __le64 dqb_curspace; ++ __le64 dqb_btime; ++ __le64 dqb_itime; ++}; ++ ++struct v2_disk_dqinfo { ++ __le32 dqi_bgrace; ++ __le32 dqi_igrace; ++ __le32 dqi_flags; ++ __le32 dqi_blocks; ++ __le32 dqi_free_blk; ++ __le32 dqi_free_entry; ++}; ++ ++struct qt_disk_dqdbheader { ++ __le32 dqdh_next_free; ++ __le32 dqdh_prev_free; ++ __le16 dqdh_entries; ++ __le16 dqdh_pad1; ++ __le32 dqdh_pad2; ++}; ++ ++struct fs_disk_quota { ++ __s8 d_version; ++ __s8 d_flags; ++ __u16 d_fieldmask; ++ __u32 d_id; ++ __u64 d_blk_hardlimit; ++ __u64 d_blk_softlimit; ++ __u64 d_ino_hardlimit; ++ __u64 d_ino_softlimit; ++ __u64 d_bcount; ++ __u64 d_icount; ++ __s32 d_itimer; ++ __s32 d_btimer; ++ __u16 d_iwarns; ++ __u16 d_bwarns; ++ __s32 d_padding2; ++ __u64 d_rtb_hardlimit; ++ __u64 d_rtb_softlimit; ++ __u64 d_rtbcount; ++ __s32 d_rtbtimer; ++ __u16 d_rtbwarns; ++ __s16 d_padding3; ++ char d_padding4[8]; ++}; ++ ++struct fs_qfilestat { ++ __u64 qfs_ino; ++ __u64 qfs_nblks; ++ __u32 qfs_nextents; ++}; ++ ++typedef struct fs_qfilestat fs_qfilestat_t; ++ ++struct fs_quota_stat { ++ __s8 qs_version; ++ __u16 qs_flags; ++ __s8 qs_pad; ++ fs_qfilestat_t qs_uquota; ++ fs_qfilestat_t qs_gquota; ++ __u32 qs_incoredqs; ++ __s32 qs_btimelimit; ++ __s32 qs_itimelimit; ++ __s32 qs_rtbtimelimit; ++ __u16 qs_bwarnlimit; ++ __u16 qs_iwarnlimit; ++}; ++ ++struct fs_qfilestatv { ++ __u64 qfs_ino; ++ __u64 qfs_nblks; ++ __u32 qfs_nextents; ++ __u32 qfs_pad; ++}; ++ ++struct fs_quota_statv { ++ __s8 qs_version; ++ __u8 qs_pad1; ++ __u16 qs_flags; ++ __u32 qs_incoredqs; ++ struct fs_qfilestatv qs_uquota; ++ struct fs_qfilestatv qs_gquota; ++ struct fs_qfilestatv qs_pquota; ++ __s32 qs_btimelimit; ++ __s32 qs_itimelimit; ++ __s32 qs_rtbtimelimit; ++ __u16 qs_bwarnlimit; ++ __u16 qs_iwarnlimit; ++ __u64 qs_pad2[8]; ++}; ++ ++struct if_dqblk { ++ __u64 dqb_bhardlimit; ++ __u64 dqb_bsoftlimit; ++ __u64 dqb_curspace; ++ __u64 dqb_ihardlimit; ++ __u64 dqb_isoftlimit; ++ __u64 dqb_curinodes; ++ __u64 dqb_btime; ++ __u64 dqb_itime; ++ __u32 dqb_valid; ++}; ++ ++struct if_nextdqblk { ++ __u64 dqb_bhardlimit; ++ __u64 dqb_bsoftlimit; ++ __u64 dqb_curspace; ++ __u64 dqb_ihardlimit; ++ __u64 dqb_isoftlimit; ++ __u64 dqb_curinodes; ++ __u64 dqb_btime; ++ __u64 dqb_itime; ++ __u32 dqb_valid; ++ __u32 dqb_id; ++}; ++ ++struct if_dqinfo { ++ __u64 dqi_bgrace; ++ __u64 dqi_igrace; ++ __u32 dqi_flags; ++ __u32 dqi_valid; ++}; ++ ++enum { ++ QUOTA_NL_C_UNSPEC = 0, ++ QUOTA_NL_C_WARNING = 1, ++ __QUOTA_NL_C_MAX = 2, ++}; ++ ++enum { ++ QUOTA_NL_A_UNSPEC = 0, ++ QUOTA_NL_A_QTYPE = 1, ++ QUOTA_NL_A_EXCESS_ID = 2, ++ QUOTA_NL_A_WARNING = 3, ++ QUOTA_NL_A_DEV_MAJOR = 4, ++ QUOTA_NL_A_DEV_MINOR = 5, ++ QUOTA_NL_A_CAUSED_ID = 6, ++ QUOTA_NL_A_PAD = 7, ++ __QUOTA_NL_A_MAX = 8, ++}; ++ ++struct proc_maps_private { ++ struct inode___2 *inode; ++ struct task_struct *task; ++ struct mm_struct *mm; ++ struct vm_area_struct *tail_vma; ++ struct mempolicy *task_mempolicy; ++}; ++ ++struct mem_size_stats { ++ long unsigned int resident; ++ long unsigned int shared_clean; ++ long unsigned int shared_dirty; ++ long unsigned int private_clean; ++ long unsigned int private_dirty; ++ long unsigned int referenced; ++ long unsigned int anonymous; ++ long unsigned int lazyfree; ++ long unsigned int anonymous_thp; ++ long unsigned int shmem_thp; ++ long unsigned int swap; ++ long unsigned int shared_hugetlb; ++ long unsigned int private_hugetlb; ++ u64 pss; ++ u64 pss_locked; ++ u64 swap_pss; ++ bool check_shmem_swap; ++}; ++ ++enum clear_refs_types { ++ CLEAR_REFS_ALL = 1, ++ CLEAR_REFS_ANON = 2, ++ CLEAR_REFS_MAPPED = 3, ++ CLEAR_REFS_SOFT_DIRTY = 4, ++ CLEAR_REFS_MM_HIWATER_RSS = 5, ++ CLEAR_REFS_LAST = 6, ++}; ++ ++struct clear_refs_private { ++ enum clear_refs_types type; ++}; ++ ++typedef struct { ++ u64 pme; ++} pagemap_entry_t; ++ ++struct pagemapread { ++ int pos; ++ int len; ++ pagemap_entry_t *buffer; ++ bool show_pfn; ++}; ++ ++struct numa_maps { ++ long unsigned int pages; ++ long unsigned int anon; ++ long unsigned int active; ++ long unsigned int writeback; ++ long unsigned int mapcount_max; ++ long unsigned int dirty; ++ long unsigned int swapcache; ++ long unsigned int node[16]; ++}; ++ ++struct numa_maps_private { ++ struct proc_maps_private proc_maps; ++ struct numa_maps md; ++}; ++ ++enum { ++ HIDEPID_OFF = 0, ++ HIDEPID_NO_ACCESS = 1, ++ HIDEPID_INVISIBLE = 2, ++}; ++ ++struct pde_opener { ++ struct file___2 *file; ++ struct list_head lh; ++ bool closing; ++ struct completion *c; ++}; ++ ++enum { ++ BIAS = 2147483648, ++}; ++ ++enum { ++ Opt_gid = 0, ++ Opt_hidepid = 1, ++ Opt_err = 2, ++}; ++ ++struct reciprocal_value { ++ u32 m; ++ u8 sh1; ++ u8 sh2; ++}; ++ ++struct flex_array_part; ++ ++struct flex_array { ++ union { ++ struct { ++ int element_size; ++ int total_nr_elements; ++ int elems_per_part; ++ struct reciprocal_value reciprocal_elems; ++ struct flex_array_part *parts[0]; ++ }; ++ char padding[65536]; ++ }; ++}; ++ ++typedef struct dentry___2 *instantiate_t(struct dentry___2 *, struct task_struct *, const void *); ++ ++struct pid_entry { ++ const char *name; ++ unsigned int len; ++ umode_t mode; ++ const struct inode_operations___2 *iop; ++ const struct file_operations___2 *fop; ++ union proc_op op; ++}; ++ ++struct limit_names { ++ const char *name; ++ const char *unit; ++}; ++ ++struct map_files_info { ++ long unsigned int start; ++ long unsigned int end; ++ fmode_t mode; ++}; ++ ++struct timers_private { ++ struct pid *pid; ++ struct task_struct *task; ++ struct sighand_struct *sighand; ++ struct pid_namespace *ns; ++ long unsigned int flags; ++}; ++ ++struct tgid_iter { ++ unsigned int tgid; ++ struct task_struct *task; ++}; ++ ++typedef struct dentry___2 *instantiate_t___2(struct dentry___2 *, struct task_struct___2 *, const void *); ++ ++struct fd_data { ++ fmode_t mode; ++ unsigned int fd; ++}; ++ ++enum kcore_type { ++ KCORE_TEXT = 0, ++ KCORE_VMALLOC = 1, ++ KCORE_RAM = 2, ++ KCORE_VMEMMAP = 3, ++ KCORE_USER = 4, ++ KCORE_OTHER = 5, ++ KCORE_REMAP = 6, ++}; ++ ++struct kcore_list { ++ struct list_head list; ++ long unsigned int addr; ++ long unsigned int vaddr; ++ size_t size; ++ int type; ++}; ++ ++struct vmcore { ++ struct list_head list; ++ long long unsigned int paddr; ++ long long unsigned int size; ++ loff_t offset; ++}; ++ ++typedef struct elf64_note Elf64_Nhdr; ++ ++struct kernfs_iattrs { ++ struct iattr___2 ia_iattr; ++ void *ia_secdata; ++ u32 ia_secdata_len; ++ struct simple_xattrs xattrs; ++}; ++ ++struct kernfs_super_info { ++ struct super_block___2 *sb; ++ struct kernfs_root___2 *root; ++ const void *ns; ++ struct list_head node; ++}; ++ ++enum kernfs_node_flag { ++ KERNFS_ACTIVATED = 16, ++ KERNFS_NS = 32, ++ KERNFS_HAS_SEQ_SHOW = 64, ++ KERNFS_HAS_MMAP = 128, ++ KERNFS_LOCKDEP = 256, ++ KERNFS_SUICIDAL = 1024, ++ KERNFS_SUICIDED = 2048, ++ KERNFS_EMPTY_DIR = 4096, ++ KERNFS_HAS_RELEASE = 8192, ++}; ++ ++struct kernfs_open_node { ++ atomic_t refcnt; ++ atomic_t event; ++ wait_queue_head_t poll; ++ struct list_head files; ++}; ++ ++struct config_group; ++ ++struct config_item_type; ++ ++struct config_item { ++ char *ci_name; ++ char ci_namebuf[20]; ++ struct kref ci_kref; ++ struct list_head ci_entry; ++ struct config_item *ci_parent; ++ struct config_group *ci_group; ++ const struct config_item_type *ci_type; ++ struct dentry___2 *ci_dentry; ++}; ++ ++struct configfs_subsystem; ++ ++struct config_group { ++ struct config_item cg_item; ++ struct list_head cg_children; ++ struct configfs_subsystem *cg_subsys; ++ struct list_head default_groups; ++ struct list_head group_entry; ++}; ++ ++struct configfs_item_operations; ++ ++struct configfs_group_operations; ++ ++struct configfs_attribute; ++ ++struct configfs_bin_attribute; ++ ++struct config_item_type { ++ struct module *ct_owner; ++ struct configfs_item_operations *ct_item_ops; ++ struct configfs_group_operations *ct_group_ops; ++ struct configfs_attribute **ct_attrs; ++ struct configfs_bin_attribute **ct_bin_attrs; ++}; ++ ++struct configfs_item_operations { ++ void (*release)(struct config_item *); ++ int (*allow_link)(struct config_item *, struct config_item *); ++ void (*drop_link)(struct config_item *, struct config_item *); ++}; ++ ++struct configfs_group_operations { ++ struct config_item * (*make_item)(struct config_group *, const char *); ++ struct config_group * (*make_group)(struct config_group *, const char *); ++ int (*commit_item)(struct config_item *); ++ void (*disconnect_notify)(struct config_group *, struct config_item *); ++ void (*drop_item)(struct config_group *, struct config_item *); ++}; ++ ++struct configfs_attribute { ++ const char *ca_name; ++ struct module *ca_owner; ++ umode_t ca_mode; ++ ssize_t (*show)(struct config_item *, char *); ++ ssize_t (*store)(struct config_item *, const char *, size_t); ++}; ++ ++struct configfs_bin_attribute { ++ struct configfs_attribute cb_attr; ++ void *cb_private; ++ size_t cb_max_size; ++ ssize_t (*read)(struct config_item *, void *, size_t); ++ ssize_t (*write)(struct config_item *, const void *, size_t); ++}; ++ ++struct configfs_subsystem { ++ struct config_group su_group; ++ struct mutex su_mutex; ++}; ++ ++struct configfs_fragment { ++ atomic_t frag_count; ++ struct rw_semaphore frag_sem; ++ bool frag_dead; ++}; ++ ++struct configfs_dirent { ++ atomic_t s_count; ++ int s_dependent_count; ++ struct list_head s_sibling; ++ struct list_head s_children; ++ struct list_head s_links; ++ void *s_element; ++ int s_type; ++ umode_t s_mode; ++ struct dentry___2 *s_dentry; ++ struct iattr___2 *s_iattr; ++ struct configfs_fragment *s_frag; ++}; ++ ++struct configfs_buffer { ++ size_t count; ++ loff_t pos; ++ char *page; ++ struct configfs_item_operations *ops; ++ struct mutex mutex; ++ int needs_read_fill; ++ bool read_in_progress; ++ bool write_in_progress; ++ char *bin_buffer; ++ int bin_buffer_size; ++ int cb_max_size; ++ struct config_item *item; ++ struct module___2 *owner; ++ union { ++ struct configfs_attribute *attr; ++ struct configfs_bin_attribute *bin_attr; ++ }; ++}; ++ ++struct configfs_symlink { ++ struct list_head sl_list; ++ struct config_item *sl_target; ++}; ++ ++struct pts_mount_opts { ++ int setuid; ++ int setgid; ++ kuid_t uid; ++ kgid_t gid; ++ umode_t mode; ++ umode_t ptmxmode; ++ int reserve; ++ int max; ++}; ++ ++enum { ++ Opt_uid = 0, ++ Opt_gid___2 = 1, ++ Opt_mode = 2, ++ Opt_ptmxmode = 3, ++ Opt_newinstance = 4, ++ Opt_max = 5, ++ Opt_err___2 = 6, ++}; ++ ++struct pts_fs_info { ++ struct ida allocated_ptys; ++ struct pts_mount_opts mount_opts; ++ struct super_block___2 *sb; ++ struct dentry___2 *ptmx_dentry; ++}; ++ ++struct dcookie_struct { ++ struct path___2 path; ++ struct list_head hash_list; ++}; ++ ++struct dcookie_user { ++ struct list_head next; ++}; ++ ++struct ramfs_mount_opts { ++ umode_t mode; ++}; ++ ++struct ramfs_fs_info { ++ struct ramfs_mount_opts mount_opts; ++}; ++ ++enum { ++ Opt_mode___2 = 0, ++ Opt_err___3 = 1, ++}; ++ ++struct hugetlbfs_config { ++ struct hstate *hstate; ++ long int max_hpages; ++ long int nr_inodes; ++ long int min_hpages; ++ kuid_t uid; ++ kgid_t gid; ++ umode_t mode; ++}; ++ ++enum { ++ Opt_size = 0, ++ Opt_nr_inodes = 1, ++ Opt_mode___3 = 2, ++ Opt_uid___2 = 3, ++ Opt_gid___3 = 4, ++ Opt_pagesize = 5, ++ Opt_min_size = 6, ++ Opt_err___4 = 7, ++}; ++ ++struct hf_args { ++ struct file___2 *file; ++ struct task_struct *parent_task; ++ struct mm_struct *mm; ++ struct shared_policy *shared_policy; ++ struct hstate *hstate; ++ struct address_space *mapping; ++ int error; ++}; ++ ++enum hugetlbfs_size_type { ++ NO_SIZE = 0, ++ SIZE_STD = 1, ++ SIZE_PERCENT = 2, ++}; ++ ++struct getdents_callback___2 { ++ struct dir_context ctx; ++ char *name; ++ u64 ino; ++ int found; ++ int sequence; ++}; ++ ++typedef u16 wchar_t; ++ ++typedef u32 unicode_t; ++ ++struct nls_table { ++ const char *charset; ++ const char *alias; ++ int (*uni2char)(wchar_t, unsigned char *, int); ++ int (*char2uni)(const unsigned char *, int, wchar_t *); ++ const unsigned char *charset2lower; ++ const unsigned char *charset2upper; ++ struct module___2 *owner; ++ struct nls_table *next; ++}; ++ ++enum utf16_endian { ++ UTF16_HOST_ENDIAN = 0, ++ UTF16_LITTLE_ENDIAN = 1, ++ UTF16_BIG_ENDIAN = 2, ++}; ++ ++struct utf8_table { ++ int cmask; ++ int cval; ++ int shift; ++ long int lmask; ++ long int lval; ++}; ++ ++typedef unsigned int autofs_wqt_t; ++ ++struct autofs_sb_info; ++ ++struct autofs_info { ++ struct dentry___2 *dentry; ++ struct inode___2 *inode; ++ int flags; ++ struct completion expire_complete; ++ struct list_head active; ++ int active_count; ++ struct list_head expiring; ++ struct autofs_sb_info *sbi; ++ long unsigned int last_used; ++ atomic_t count; ++ kuid_t uid; ++ kgid_t gid; ++ struct callback_head rcu; ++}; ++ ++struct autofs_wait_queue; ++ ++struct autofs_sb_info { ++ u32 magic; ++ int pipefd; ++ struct file___2 *pipe; ++ struct pid___2 *oz_pgrp; ++ int catatonic; ++ int version; ++ int sub_version; ++ int min_proto; ++ int max_proto; ++ long unsigned int exp_timeout; ++ unsigned int type; ++ struct super_block___2 *sb; ++ struct mutex wq_mutex; ++ struct mutex pipe_mutex; ++ spinlock_t fs_lock; ++ struct autofs_wait_queue *queues; ++ spinlock_t lookup_lock; ++ struct list_head active_list; ++ struct list_head expiring_list; ++ struct callback_head rcu; ++}; ++ ++struct autofs_wait_queue { ++ wait_queue_head_t queue; ++ struct autofs_wait_queue *next; ++ autofs_wqt_t wait_queue_token; ++ struct qstr name; ++ u32 dev; ++ u64 ino; ++ kuid_t uid; ++ kgid_t gid; ++ pid_t pid; ++ pid_t tgid; ++ int status; ++ unsigned int wait_ctr; ++}; ++ ++enum { ++ Opt_err___5 = 0, ++ Opt_fd = 1, ++ Opt_uid___3 = 2, ++ Opt_gid___4 = 3, ++ Opt_pgrp = 4, ++ Opt_minproto = 5, ++ Opt_maxproto = 6, ++ Opt_indirect = 7, ++ Opt_direct = 8, ++ Opt_offset = 9, ++}; ++ ++enum { ++ AUTOFS_IOC_READY_CMD = 96, ++ AUTOFS_IOC_FAIL_CMD = 97, ++ AUTOFS_IOC_CATATONIC_CMD = 98, ++ AUTOFS_IOC_PROTOVER_CMD = 99, ++ AUTOFS_IOC_SETTIMEOUT_CMD = 100, ++ AUTOFS_IOC_EXPIRE_CMD = 101, ++}; ++ ++enum autofs_notify { ++ NFY_NONE = 0, ++ NFY_MOUNT = 1, ++ NFY_EXPIRE = 2, ++}; ++ ++enum { ++ AUTOFS_IOC_EXPIRE_MULTI_CMD = 102, ++ AUTOFS_IOC_PROTOSUBVER_CMD = 103, ++ AUTOFS_IOC_ASKUMOUNT_CMD = 112, ++}; ++ ++struct autofs_packet_hdr { ++ int proto_version; ++ int type; ++}; ++ ++struct autofs_packet_missing { ++ struct autofs_packet_hdr hdr; ++ autofs_wqt_t wait_queue_token; ++ int len; ++ char name[256]; ++}; ++ ++struct autofs_packet_expire { ++ struct autofs_packet_hdr hdr; ++ int len; ++ char name[256]; ++}; ++ ++struct autofs_packet_expire_multi { ++ struct autofs_packet_hdr hdr; ++ autofs_wqt_t wait_queue_token; ++ int len; ++ char name[256]; ++}; ++ ++union autofs_packet_union { ++ struct autofs_packet_hdr hdr; ++ struct autofs_packet_missing missing; ++ struct autofs_packet_expire expire; ++ struct autofs_packet_expire_multi expire_multi; ++}; ++ ++struct autofs_v5_packet { ++ struct autofs_packet_hdr hdr; ++ autofs_wqt_t wait_queue_token; ++ __u32 dev; ++ __u64 ino; ++ __u32 uid; ++ __u32 gid; ++ __u32 pid; ++ __u32 tgid; ++ __u32 len; ++ char name[256]; ++}; ++ ++typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; ++ ++typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; ++ ++typedef struct autofs_v5_packet autofs_packet_missing_direct_t; ++ ++typedef struct autofs_v5_packet autofs_packet_expire_direct_t; ++ ++union autofs_v5_packet_union { ++ struct autofs_packet_hdr hdr; ++ struct autofs_v5_packet v5_packet; ++ autofs_packet_missing_indirect_t missing_indirect; ++ autofs_packet_expire_indirect_t expire_indirect; ++ autofs_packet_missing_direct_t missing_direct; ++ autofs_packet_expire_direct_t expire_direct; ++}; ++ ++struct args_protover { ++ __u32 version; ++}; ++ ++struct args_protosubver { ++ __u32 sub_version; ++}; ++ ++struct args_openmount { ++ __u32 devid; ++}; ++ ++struct args_ready { ++ __u32 token; ++}; ++ ++struct args_fail { ++ __u32 token; ++ __s32 status; ++}; ++ ++struct args_setpipefd { ++ __s32 pipefd; ++}; ++ ++struct args_timeout { ++ __u64 timeout; ++}; ++ ++struct args_requester { ++ __u32 uid; ++ __u32 gid; ++}; ++ ++struct args_expire { ++ __u32 how; ++}; ++ ++struct args_askumount { ++ __u32 may_umount; ++}; ++ ++struct args_in { ++ __u32 type; ++}; ++ ++struct args_out { ++ __u32 devid; ++ __u32 magic; ++}; ++ ++struct args_ismountpoint { ++ union { ++ struct args_in in; ++ struct args_out out; ++ }; ++}; ++ ++struct autofs_dev_ioctl { ++ __u32 ver_major; ++ __u32 ver_minor; ++ __u32 size; ++ __s32 ioctlfd; ++ union { ++ struct args_protover protover; ++ struct args_protosubver protosubver; ++ struct args_openmount openmount; ++ struct args_ready ready; ++ struct args_fail fail; ++ struct args_setpipefd setpipefd; ++ struct args_timeout timeout; ++ struct args_requester requester; ++ struct args_expire expire; ++ struct args_askumount askumount; ++ struct args_ismountpoint ismountpoint; ++ }; ++ char path[0]; ++}; ++ ++enum { ++ AUTOFS_DEV_IOCTL_VERSION_CMD = 113, ++ AUTOFS_DEV_IOCTL_PROTOVER_CMD = 114, ++ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD = 115, ++ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD = 116, ++ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD = 117, ++ AUTOFS_DEV_IOCTL_READY_CMD = 118, ++ AUTOFS_DEV_IOCTL_FAIL_CMD = 119, ++ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD = 120, ++ AUTOFS_DEV_IOCTL_CATATONIC_CMD = 121, ++ AUTOFS_DEV_IOCTL_TIMEOUT_CMD = 122, ++ AUTOFS_DEV_IOCTL_REQUESTER_CMD = 123, ++ AUTOFS_DEV_IOCTL_EXPIRE_CMD = 124, ++ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD = 125, ++ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD = 126, ++}; ++ ++typedef int (*ioctl_fn)(struct file___2 *, struct autofs_sb_info *, struct autofs_dev_ioctl *); ++ ++typedef struct vfsmount___2 * (*debugfs_automount_t)(struct dentry___2 *, void *); ++ ++struct debugfs_fsdata { ++ const struct file_operations___2 *real_fops; ++ refcount_t active_users; ++ struct completion active_users_drained; ++}; ++ ++struct debugfs_mount_opts { ++ kuid_t uid; ++ kgid_t gid; ++ umode_t mode; ++}; ++ ++enum { ++ Opt_uid___4 = 0, ++ Opt_gid___5 = 1, ++ Opt_mode___4 = 2, ++ Opt_err___6 = 3, ++}; ++ ++struct debugfs_fs_info { ++ struct debugfs_mount_opts mount_opts; ++}; ++ ++struct debugfs_blob_wrapper { ++ void *data; ++ long unsigned int size; ++}; ++ ++struct debugfs_reg32 { ++ char *name; ++ long unsigned int offset; ++}; ++ ++struct debugfs_regset32 { ++ const struct debugfs_reg32 *regs; ++ int nregs; ++ void *base; ++}; ++ ++struct array_data { ++ void *array; ++ u32 elements; ++}; ++ ++struct debugfs_devm_entry { ++ int (*read)(struct seq_file___2 *, void *); ++ struct device___2 *dev; ++}; ++ ++struct tracefs_dir_ops { ++ int (*mkdir)(const char *); ++ int (*rmdir)(const char *); ++}; ++ ++struct tracefs_mount_opts { ++ kuid_t uid; ++ kgid_t gid; ++ umode_t mode; ++}; ++ ++struct tracefs_fs_info { ++ struct tracefs_mount_opts mount_opts; ++}; ++ ++enum pstore_type_id { ++ PSTORE_TYPE_DMESG = 0, ++ PSTORE_TYPE_MCE = 1, ++ PSTORE_TYPE_CONSOLE = 2, ++ PSTORE_TYPE_FTRACE = 3, ++ PSTORE_TYPE_PPC_RTAS = 4, ++ PSTORE_TYPE_PPC_OF = 5, ++ PSTORE_TYPE_PPC_COMMON = 6, ++ PSTORE_TYPE_PMSG = 7, ++ PSTORE_TYPE_PPC_OPAL = 8, ++ PSTORE_TYPE_UNKNOWN = 255, ++}; ++ ++struct pstore_info; ++ ++struct pstore_record { ++ struct pstore_info *psi; ++ enum pstore_type_id type; ++ u64 id; ++ struct timespec64 time; ++ char *buf; ++ ssize_t size; ++ ssize_t ecc_notice_size; ++ int count; ++ enum kmsg_dump_reason reason; ++ unsigned int part; ++ bool compressed; ++}; ++ ++struct pstore_info { ++ struct module___2 *owner; ++ char *name; ++ struct semaphore buf_lock; ++ char *buf; ++ size_t bufsize; ++ struct mutex read_mutex; ++ int flags; ++ void *data; ++ int (*open)(struct pstore_info *); ++ int (*close)(struct pstore_info *); ++ ssize_t (*read)(struct pstore_record *); ++ int (*write)(struct pstore_record *); ++ int (*write_user)(struct pstore_record *, const char *); ++ int (*erase)(struct pstore_record *); ++}; ++ ++struct pstore_ftrace_record { ++ long unsigned int ip; ++ long unsigned int parent_ip; ++ u64 ts; ++}; ++ ++struct pstore_private { ++ struct list_head list; ++ struct pstore_record *record; ++ size_t total_size; ++}; ++ ++struct pstore_ftrace_seq_data { ++ const void *ptr; ++ size_t off; ++ size_t size; ++}; ++ ++enum { ++ Opt_kmsg_bytes = 0, ++ Opt_err___7 = 1, ++}; ++ ++struct pstore_zbackend { ++ int (*zbufsize)(size_t); ++ const char *name; ++}; ++ ++struct efi_variable { ++ efi_char16_t VariableName[512]; ++ efi_guid_t VendorGuid; ++ long unsigned int DataSize; ++ __u8 Data[1024]; ++ efi_status_t Status; ++ __u32 Attributes; ++} __attribute__((packed)); ++ ++struct efivar_entry { ++ struct efi_variable var; ++ struct list_head list; ++ struct kobject kobj; ++ bool scanning; ++ bool deleting; ++}; ++ ++typedef unsigned int __kernel_mode_t; ++ ++struct ipc64_perm { ++ __kernel_key_t key; ++ __kernel_uid32_t uid; ++ __kernel_gid32_t gid; ++ __kernel_uid32_t cuid; ++ __kernel_gid32_t cgid; ++ __kernel_mode_t mode; ++ unsigned char __pad1[0]; ++ short unsigned int seq; ++ short unsigned int __pad2; ++ __kernel_ulong_t __unused1; ++ __kernel_ulong_t __unused2; ++}; ++ ++typedef u32 __compat_gid32_t; ++ ++typedef s32 compat_key_t; ++ ++struct compat_ipc64_perm { ++ compat_key_t key; ++ __compat_uid32_t uid; ++ __compat_gid32_t gid; ++ __compat_uid32_t cuid; ++ __compat_gid32_t cgid; ++ short unsigned int mode; ++ short unsigned int __pad1; ++ short unsigned int seq; ++ short unsigned int __pad2; ++ compat_ulong_t unused1; ++ compat_ulong_t unused2; ++}; ++ ++struct compat_ipc_perm { ++ key_t key; ++ __compat_uid_t uid; ++ __compat_gid_t gid; ++ __compat_uid_t cuid; ++ __compat_gid_t cgid; ++ compat_mode_t mode; ++ short unsigned int seq; ++}; ++ ++struct ipc_perm { ++ __kernel_key_t key; ++ __kernel_uid_t uid; ++ __kernel_gid_t gid; ++ __kernel_uid_t cuid; ++ __kernel_gid_t cgid; ++ __kernel_mode_t mode; ++ short unsigned int seq; ++}; ++ ++struct ipc_params { ++ key_t key; ++ int flg; ++ union { ++ size_t size; ++ int nsems; ++ } u; ++}; ++ ++struct ipc_ops { ++ int (*getnew)(struct ipc_namespace *, struct ipc_params *); ++ int (*associate)(struct kern_ipc_perm *, int); ++ int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); ++}; ++ ++struct ipc_proc_iface { ++ const char *path; ++ const char *header; ++ int ids; ++ int (*show)(struct seq_file___2 *, void *); ++}; ++ ++struct ipc_proc_iter { ++ struct ipc_namespace *ns; ++ struct pid_namespace___2 *pid_ns; ++ struct ipc_proc_iface *iface; ++}; ++ ++struct msg_msgseg; ++ ++struct msg_msg { ++ struct list_head m_list; ++ long int m_type; ++ size_t m_ts; ++ struct msg_msgseg *next; ++ void *security; ++}; ++ ++struct msg_msgseg { ++ struct msg_msgseg *next; ++}; ++ ++typedef int __kernel_ipc_pid_t; ++ ++struct msgbuf { ++ __kernel_long_t mtype; ++ char mtext[1]; ++}; ++ ++struct msg; ++ ++struct msqid_ds { ++ struct ipc_perm msg_perm; ++ struct msg *msg_first; ++ struct msg *msg_last; ++ __kernel_time_t msg_stime; ++ __kernel_time_t msg_rtime; ++ __kernel_time_t msg_ctime; ++ long unsigned int msg_lcbytes; ++ long unsigned int msg_lqbytes; ++ short unsigned int msg_cbytes; ++ short unsigned int msg_qnum; ++ short unsigned int msg_qbytes; ++ __kernel_ipc_pid_t msg_lspid; ++ __kernel_ipc_pid_t msg_lrpid; ++}; ++ ++struct msqid64_ds { ++ struct ipc64_perm msg_perm; ++ __kernel_time_t msg_stime; ++ __kernel_time_t msg_rtime; ++ __kernel_time_t msg_ctime; ++ long unsigned int msg_cbytes; ++ long unsigned int msg_qnum; ++ long unsigned int msg_qbytes; ++ __kernel_pid_t msg_lspid; ++ __kernel_pid_t msg_lrpid; ++ long unsigned int __unused4; ++ long unsigned int __unused5; ++}; ++ ++struct msginfo { ++ int msgpool; ++ int msgmap; ++ int msgmax; ++ int msgmnb; ++ int msgmni; ++ int msgssz; ++ int msgtql; ++ short unsigned int msgseg; ++}; ++ ++typedef u16 compat_ipc_pid_t; ++ ++struct compat_msqid64_ds { ++ struct compat_ipc64_perm msg_perm; ++ compat_ulong_t msg_stime; ++ compat_ulong_t msg_stime_high; ++ compat_ulong_t msg_rtime; ++ compat_ulong_t msg_rtime_high; ++ compat_ulong_t msg_ctime; ++ compat_ulong_t msg_ctime_high; ++ compat_ulong_t msg_cbytes; ++ compat_ulong_t msg_qnum; ++ compat_ulong_t msg_qbytes; ++ compat_pid_t msg_lspid; ++ compat_pid_t msg_lrpid; ++ compat_ulong_t __unused4; ++ compat_ulong_t __unused5; ++}; ++ ++struct msg_queue { ++ struct kern_ipc_perm q_perm; ++ time64_t q_stime; ++ time64_t q_rtime; ++ time64_t q_ctime; ++ long unsigned int q_cbytes; ++ long unsigned int q_qnum; ++ long unsigned int q_qbytes; ++ struct pid___2 *q_lspid; ++ struct pid___2 *q_lrpid; ++ struct list_head q_messages; ++ struct list_head q_receivers; ++ struct list_head q_senders; ++ long: 64; ++ long: 64; ++}; ++ ++struct msg_receiver { ++ struct list_head r_list; ++ struct task_struct___2 *r_tsk; ++ int r_mode; ++ long int r_msgtype; ++ long int r_maxsize; ++ struct msg_msg *r_msg; ++}; ++ ++struct msg_sender { ++ struct list_head list; ++ struct task_struct___2 *tsk; ++ size_t msgsz; ++}; ++ ++struct compat_msqid_ds { ++ struct compat_ipc_perm msg_perm; ++ compat_uptr_t msg_first; ++ compat_uptr_t msg_last; ++ compat_time_t msg_stime; ++ compat_time_t msg_rtime; ++ compat_time_t msg_ctime; ++ compat_ulong_t msg_lcbytes; ++ compat_ulong_t msg_lqbytes; ++ short unsigned int msg_cbytes; ++ short unsigned int msg_qnum; ++ short unsigned int msg_qbytes; ++ compat_ipc_pid_t msg_lspid; ++ compat_ipc_pid_t msg_lrpid; ++}; ++ ++struct compat_msgbuf { ++ compat_long_t mtype; ++ char mtext[1]; ++}; ++ ++struct sem; ++ ++struct sem_queue; ++ ++struct sem_undo; ++ ++struct semid_ds { ++ struct ipc_perm sem_perm; ++ __kernel_time_t sem_otime; ++ __kernel_time_t sem_ctime; ++ struct sem *sem_base; ++ struct sem_queue *sem_pending; ++ struct sem_queue **sem_pending_last; ++ struct sem_undo *undo; ++ short unsigned int sem_nsems; ++}; ++ ++struct sem { ++ int semval; ++ struct pid___2 *sempid; ++ spinlock_t lock; ++ struct list_head pending_alter; ++ struct list_head pending_const; ++ time64_t sem_otime; ++}; ++ ++struct sembuf; ++ ++struct sem_queue { ++ struct list_head list; ++ struct task_struct___2 *sleeper; ++ struct sem_undo *undo; ++ struct pid___2 *pid; ++ int status; ++ struct sembuf *sops; ++ struct sembuf *blocking; ++ int nsops; ++ bool alter; ++ bool dupsop; ++}; ++ ++struct sem_undo { ++ struct list_head list_proc; ++ struct callback_head rcu; ++ struct sem_undo_list *ulp; ++ struct list_head list_id; ++ int semid; ++ short int *semadj; ++}; ++ ++struct semid64_ds { ++ struct ipc64_perm sem_perm; ++ __kernel_time_t sem_otime; ++ __kernel_time_t sem_ctime; ++ long unsigned int sem_nsems; ++ long unsigned int __unused3; ++ long unsigned int __unused4; ++}; ++ ++struct sembuf { ++ short unsigned int sem_num; ++ short int sem_op; ++ short int sem_flg; ++}; ++ ++struct seminfo { ++ int semmap; ++ int semmni; ++ int semmns; ++ int semmnu; ++ int semmsl; ++ int semopm; ++ int semume; ++ int semusz; ++ int semvmx; ++ int semaem; ++}; ++ ++struct sem_undo_list { ++ refcount_t refcnt; ++ spinlock_t lock; ++ struct list_head list_proc; ++}; ++ ++struct compat_semid64_ds { ++ struct compat_ipc64_perm sem_perm; ++ compat_ulong_t sem_otime; ++ compat_ulong_t sem_otime_high; ++ compat_ulong_t sem_ctime; ++ compat_ulong_t sem_ctime_high; ++ compat_ulong_t sem_nsems; ++ compat_ulong_t __unused3; ++ compat_ulong_t __unused4; ++}; ++ ++struct sem_array { ++ struct kern_ipc_perm sem_perm; ++ time64_t sem_ctime; ++ struct list_head pending_alter; ++ struct list_head pending_const; ++ struct list_head list_id; ++ int sem_nsems; ++ int complex_count; ++ unsigned int use_global_lock; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct sem sems[0]; ++}; ++ ++struct compat_semid_ds { ++ struct compat_ipc_perm sem_perm; ++ compat_time_t sem_otime; ++ compat_time_t sem_ctime; ++ compat_uptr_t sem_base; ++ compat_uptr_t sem_pending; ++ compat_uptr_t sem_pending_last; ++ compat_uptr_t undo; ++ short unsigned int sem_nsems; ++}; ++ ++struct shmid_ds { ++ struct ipc_perm shm_perm; ++ int shm_segsz; ++ __kernel_time_t shm_atime; ++ __kernel_time_t shm_dtime; ++ __kernel_time_t shm_ctime; ++ __kernel_ipc_pid_t shm_cpid; ++ __kernel_ipc_pid_t shm_lpid; ++ short unsigned int shm_nattch; ++ short unsigned int shm_unused; ++ void *shm_unused2; ++ void *shm_unused3; ++}; ++ ++struct shmid64_ds { ++ struct ipc64_perm shm_perm; ++ size_t shm_segsz; ++ __kernel_time_t shm_atime; ++ __kernel_time_t shm_dtime; ++ __kernel_time_t shm_ctime; ++ __kernel_pid_t shm_cpid; ++ __kernel_pid_t shm_lpid; ++ long unsigned int shm_nattch; ++ long unsigned int __unused4; ++ long unsigned int __unused5; ++}; ++ ++struct shminfo64 { ++ long unsigned int shmmax; ++ long unsigned int shmmin; ++ long unsigned int shmmni; ++ long unsigned int shmseg; ++ long unsigned int shmall; ++ long unsigned int __unused1; ++ long unsigned int __unused2; ++ long unsigned int __unused3; ++ long unsigned int __unused4; ++}; ++ ++struct shminfo { ++ int shmmax; ++ int shmmin; ++ int shmmni; ++ int shmseg; ++ int shmall; ++}; ++ ++struct shm_info { ++ int used_ids; ++ __kernel_ulong_t shm_tot; ++ __kernel_ulong_t shm_rss; ++ __kernel_ulong_t shm_swp; ++ __kernel_ulong_t swap_attempts; ++ __kernel_ulong_t swap_successes; ++}; ++ ++struct compat_shmid64_ds { ++ struct compat_ipc64_perm shm_perm; ++ compat_size_t shm_segsz; ++ compat_ulong_t shm_atime; ++ compat_ulong_t shm_atime_high; ++ compat_ulong_t shm_dtime; ++ compat_ulong_t shm_dtime_high; ++ compat_ulong_t shm_ctime; ++ compat_ulong_t shm_ctime_high; ++ compat_pid_t shm_cpid; ++ compat_pid_t shm_lpid; ++ compat_ulong_t shm_nattch; ++ compat_ulong_t __unused4; ++ compat_ulong_t __unused5; ++}; ++ ++struct shmid_kernel { ++ struct kern_ipc_perm shm_perm; ++ struct file___2 *shm_file; ++ long unsigned int shm_nattch; ++ long unsigned int shm_segsz; ++ time64_t shm_atim; ++ time64_t shm_dtim; ++ time64_t shm_ctim; ++ struct pid *shm_cprid; ++ struct pid *shm_lprid; ++ struct user_struct *mlock_user; ++ struct task_struct *shm_creator; ++ struct list_head shm_clist; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct shm_file_data { ++ int id; ++ struct ipc_namespace *ns; ++ struct file___2 *file; ++ const struct vm_operations_struct *vm_ops; ++}; ++ ++struct compat_shmid_ds { ++ struct compat_ipc_perm shm_perm; ++ int shm_segsz; ++ compat_time_t shm_atime; ++ compat_time_t shm_dtime; ++ compat_time_t shm_ctime; ++ compat_ipc_pid_t shm_cpid; ++ compat_ipc_pid_t shm_lpid; ++ short unsigned int shm_nattch; ++ short unsigned int shm_unused; ++ compat_uptr_t shm_unused2; ++ compat_uptr_t shm_unused3; ++}; ++ ++struct compat_shminfo64 { ++ compat_ulong_t shmmax; ++ compat_ulong_t shmmin; ++ compat_ulong_t shmmni; ++ compat_ulong_t shmseg; ++ compat_ulong_t shmall; ++ compat_ulong_t __unused1; ++ compat_ulong_t __unused2; ++ compat_ulong_t __unused3; ++ compat_ulong_t __unused4; ++}; ++ ++struct compat_shm_info { ++ compat_int_t used_ids; ++ compat_ulong_t shm_tot; ++ compat_ulong_t shm_rss; ++ compat_ulong_t shm_swp; ++ compat_ulong_t swap_attempts; ++ compat_ulong_t swap_successes; ++}; ++ ++struct posix_msg_tree_node { ++ struct rb_node rb_node; ++ struct list_head msg_list; ++ int priority; ++}; ++ ++struct ext_wait_queue { ++ struct task_struct *task; ++ struct list_head list; ++ struct msg_msg *msg; ++ int state; ++}; ++ ++struct mqueue_inode_info { ++ spinlock_t lock; ++ struct inode vfs_inode; ++ wait_queue_head_t wait_q; ++ struct rb_root msg_tree; ++ struct posix_msg_tree_node *node_cache; ++ struct mq_attr attr; ++ struct sigevent notify; ++ struct pid *notify_owner; ++ u32 notify_self_exec_id; ++ struct user_namespace *notify_user_ns; ++ struct user_struct *user; ++ struct sock *notify_sock; ++ struct sk_buff *notify_cookie; ++ struct ext_wait_queue e_wait_q[2]; ++ long unsigned int qsize; ++}; ++ ++struct compat_mq_attr { ++ compat_long_t mq_flags; ++ compat_long_t mq_maxmsg; ++ compat_long_t mq_msgsize; ++ compat_long_t mq_curmsgs; ++ compat_long_t __reserved[4]; ++}; ++ ++enum key_state { ++ KEY_IS_UNINSTANTIATED = 0, ++ KEY_IS_POSITIVE = 1, ++}; ++ ++struct key_user { ++ struct rb_node node; ++ struct mutex cons_lock; ++ spinlock_t lock; ++ refcount_t usage; ++ atomic_t nkeys; ++ atomic_t nikeys; ++ kuid_t uid; ++ int qnkeys; ++ int qnbytes; ++}; ++ ++struct assoc_array_edit; ++ ++struct assoc_array_ops { ++ long unsigned int (*get_key_chunk)(const void *, int); ++ long unsigned int (*get_object_key_chunk)(const void *, int); ++ bool (*compare_object)(const void *, const void *); ++ int (*diff_objects)(const void *, const void *); ++ void (*free_object)(void *); ++}; ++ ++struct assoc_array_node { ++ struct assoc_array_ptr *back_pointer; ++ u8 parent_slot; ++ struct assoc_array_ptr *slots[16]; ++ long unsigned int nr_leaves_on_branch; ++}; ++ ++struct assoc_array_shortcut { ++ struct assoc_array_ptr *back_pointer; ++ int parent_slot; ++ int skip_to_level; ++ struct assoc_array_ptr *next_node; ++ long unsigned int index_key[0]; ++}; ++ ++struct assoc_array_edit___2 { ++ struct callback_head rcu; ++ struct assoc_array *array; ++ const struct assoc_array_ops *ops; ++ const struct assoc_array_ops *ops_for_excised_subtree; ++ struct assoc_array_ptr *leaf; ++ struct assoc_array_ptr **leaf_p; ++ struct assoc_array_ptr *dead_leaf; ++ struct assoc_array_ptr *new_meta[3]; ++ struct assoc_array_ptr *excised_meta[1]; ++ struct assoc_array_ptr *excised_subtree; ++ struct assoc_array_ptr **set_backpointers[16]; ++ struct assoc_array_ptr *set_backpointers_to; ++ struct assoc_array_node *adjust_count_on; ++ long int adjust_count_by; ++ struct { ++ struct assoc_array_ptr **ptr; ++ struct assoc_array_ptr *to; ++ } set[2]; ++ struct { ++ u8 *p; ++ u8 to; ++ } set_parent_slot[1]; ++ u8 segment_cache[17]; ++}; ++ ++struct keyring_search_context { ++ struct keyring_index_key index_key; ++ const struct cred___2 *cred; ++ struct key_match_data match_data; ++ unsigned int flags; ++ int (*iterator)(const void *, void *); ++ int skipped_ret; ++ bool possessed; ++ key_ref_t result; ++ time64_t now; ++}; ++ ++struct keyring_read_iterator_context { ++ size_t buflen; ++ size_t count; ++ key_serial_t *buffer; ++}; ++ ++struct keyctl_dh_params { ++ union { ++ __s32 private; ++ __s32 priv; ++ }; ++ __s32 prime; ++ __s32 base; ++}; ++ ++struct keyctl_kdf_params { ++ char *hashname; ++ char *otherinfo; ++ __u32 otherinfolen; ++ __u32 __spare[8]; ++}; ++ ++struct request_key_auth { ++ struct key___2 *target_key; ++ struct key___2 *dest_keyring; ++ const struct cred *cred; ++ void *callout_info; ++ size_t callout_len; ++ pid_t pid; ++ char op[8]; ++}; ++ ++struct user_key_payload { ++ struct callback_head rcu; ++ short unsigned int datalen; ++ long: 48; ++ char data[0]; ++}; ++ ++struct aead_request { ++ struct crypto_async_request base; ++ unsigned int assoclen; ++ unsigned int cryptlen; ++ u8 *iv; ++ struct scatterlist *src; ++ struct scatterlist *dst; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_aead; ++ ++struct aead_alg { ++ int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); ++ int (*setauthsize)(struct crypto_aead *, unsigned int); ++ int (*encrypt)(struct aead_request *); ++ int (*decrypt)(struct aead_request *); ++ int (*init)(struct crypto_aead *); ++ void (*exit)(struct crypto_aead *); ++ const char *geniv; ++ unsigned int ivsize; ++ unsigned int maxauthsize; ++ unsigned int chunksize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct crypto_aead { ++ unsigned int authsize; ++ unsigned int reqsize; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_tfm base; ++}; ++ ++struct big_key_buf { ++ unsigned int nr_pages; ++ void *virt; ++ struct scatterlist *sg; ++ struct page *pages[0]; ++}; ++ ++enum { ++ big_key_data = 0, ++ big_key_path = 1, ++ big_key_path_2nd_part = 2, ++ big_key_len = 3, ++}; ++ ++enum big_key_op { ++ BIG_KEY_ENC = 0, ++ BIG_KEY_DEC = 1, ++}; ++ ++struct vfs_cap_data { ++ __le32 magic_etc; ++ struct { ++ __le32 permitted; ++ __le32 inheritable; ++ } data[2]; ++}; ++ ++struct vfs_ns_cap_data { ++ __le32 magic_etc; ++ struct { ++ __le32 permitted; ++ __le32 inheritable; ++ } data[2]; ++ __le32 rootid; ++}; ++ ++struct security_mnt_opts { ++ char **mnt_opts; ++ int *mnt_opts_flags; ++ int num_mnt_opts; ++}; ++ ++struct sctp_endpoint; ++ ++struct xfrm_sec_ctx; ++ ++struct xfrm_user_sec_ctx; ++ ++union security_list_options { ++ int (*binder_set_context_mgr)(struct task_struct *); ++ int (*binder_transaction)(struct task_struct *, struct task_struct *); ++ int (*binder_transfer_binder)(struct task_struct *, struct task_struct *); ++ int (*binder_transfer_file)(struct task_struct *, struct task_struct *, struct file___2 *); ++ int (*ptrace_access_check)(struct task_struct *, unsigned int); ++ int (*ptrace_traceme)(struct task_struct *); ++ int (*capget)(struct task_struct *, kernel_cap_t *, kernel_cap_t *, kernel_cap_t *); ++ int (*capset)(struct cred *, const struct cred *, const kernel_cap_t *, const kernel_cap_t *, const kernel_cap_t *); ++ int (*capable)(const struct cred *, struct user_namespace *, int, int); ++ int (*quotactl)(int, int, int, struct super_block___2 *); ++ int (*quota_on)(struct dentry___2 *); ++ int (*syslog)(int); ++ int (*settime)(const struct timespec64 *, const struct timezone *); ++ int (*vm_enough_memory)(struct mm_struct *, long int); ++ int (*bprm_set_creds)(struct linux_binprm *); ++ int (*bprm_check_security)(struct linux_binprm *); ++ void (*bprm_committing_creds)(struct linux_binprm *); ++ void (*bprm_committed_creds)(struct linux_binprm *); ++ int (*sb_alloc_security)(struct super_block___2 *); ++ void (*sb_free_security)(struct super_block___2 *); ++ int (*sb_copy_data)(char *, char *); ++ int (*sb_remount)(struct super_block___2 *, void *); ++ int (*sb_kern_mount)(struct super_block___2 *, int, void *); ++ int (*sb_show_options)(struct seq_file___2 *, struct super_block___2 *); ++ int (*sb_statfs)(struct dentry___2 *); ++ int (*sb_mount)(const char *, const struct path___2 *, const char *, long unsigned int, void *); ++ int (*sb_umount)(struct vfsmount___2 *, int); ++ int (*sb_pivotroot)(const struct path___2 *, const struct path___2 *); ++ int (*sb_set_mnt_opts)(struct super_block___2 *, struct security_mnt_opts *, long unsigned int, long unsigned int *); ++ int (*sb_clone_mnt_opts)(const struct super_block___2 *, struct super_block___2 *, long unsigned int, long unsigned int *); ++ int (*sb_parse_opts_str)(char *, struct security_mnt_opts *); ++ int (*dentry_init_security)(struct dentry___2 *, int, const struct qstr *, void **, u32 *); ++ int (*dentry_create_files_as)(struct dentry___2 *, int, struct qstr *, const struct cred *, struct cred *); ++ int (*path_unlink)(const struct path___2 *, struct dentry___2 *); ++ int (*path_mkdir)(const struct path___2 *, struct dentry___2 *, umode_t); ++ int (*path_rmdir)(const struct path___2 *, struct dentry___2 *); ++ int (*path_mknod)(const struct path___2 *, struct dentry___2 *, umode_t, unsigned int); ++ int (*path_truncate)(const struct path___2 *); ++ int (*path_symlink)(const struct path___2 *, struct dentry___2 *, const char *); ++ int (*path_link)(struct dentry___2 *, const struct path___2 *, struct dentry___2 *); ++ int (*path_rename)(const struct path___2 *, struct dentry___2 *, const struct path___2 *, struct dentry___2 *); ++ int (*path_chmod)(const struct path___2 *, umode_t); ++ int (*path_chown)(const struct path___2 *, kuid_t, kgid_t); ++ int (*path_chroot)(const struct path___2 *); ++ int (*inode_alloc_security)(struct inode___2 *); ++ void (*inode_free_security)(struct inode___2 *); ++ int (*inode_init_security)(struct inode___2 *, struct inode___2 *, const struct qstr *, const char **, void **, size_t *); ++ int (*inode_create)(struct inode___2 *, struct dentry___2 *, umode_t); ++ int (*inode_link)(struct dentry___2 *, struct inode___2 *, struct dentry___2 *); ++ int (*inode_unlink)(struct inode___2 *, struct dentry___2 *); ++ int (*inode_symlink)(struct inode___2 *, struct dentry___2 *, const char *); ++ int (*inode_mkdir)(struct inode___2 *, struct dentry___2 *, umode_t); ++ int (*inode_rmdir)(struct inode___2 *, struct dentry___2 *); ++ int (*inode_mknod)(struct inode___2 *, struct dentry___2 *, umode_t, dev_t); ++ int (*inode_rename)(struct inode___2 *, struct dentry___2 *, struct inode___2 *, struct dentry___2 *); ++ int (*inode_readlink)(struct dentry___2 *); ++ int (*inode_follow_link)(struct dentry___2 *, struct inode___2 *, bool); ++ int (*inode_permission)(struct inode___2 *, int); ++ int (*inode_setattr)(struct dentry___2 *, struct iattr___2 *); ++ int (*inode_getattr)(const struct path___2 *); ++ int (*inode_setxattr)(struct dentry___2 *, const char *, const void *, size_t, int); ++ void (*inode_post_setxattr)(struct dentry___2 *, const char *, const void *, size_t, int); ++ int (*inode_getxattr)(struct dentry___2 *, const char *); ++ int (*inode_listxattr)(struct dentry___2 *); ++ int (*inode_removexattr)(struct dentry___2 *, const char *); ++ int (*inode_need_killpriv)(struct dentry___2 *); ++ int (*inode_killpriv)(struct dentry___2 *); ++ int (*inode_getsecurity)(struct inode___2 *, const char *, void **, bool); ++ int (*inode_setsecurity)(struct inode___2 *, const char *, const void *, size_t, int); ++ int (*inode_listsecurity)(struct inode___2 *, char *, size_t); ++ void (*inode_getsecid)(struct inode___2 *, u32 *); ++ int (*inode_copy_up)(struct dentry___2 *, struct cred **); ++ int (*inode_copy_up_xattr)(const char *); ++ int (*file_permission)(struct file___2 *, int); ++ int (*file_alloc_security)(struct file___2 *); ++ void (*file_free_security)(struct file___2 *); ++ int (*file_ioctl)(struct file___2 *, unsigned int, long unsigned int); ++ int (*mmap_addr)(long unsigned int); ++ int (*mmap_file)(struct file___2 *, long unsigned int, long unsigned int, long unsigned int); ++ int (*file_mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int); ++ int (*file_lock)(struct file___2 *, unsigned int); ++ int (*file_fcntl)(struct file___2 *, unsigned int, long unsigned int); ++ void (*file_set_fowner)(struct file___2 *); ++ int (*file_send_sigiotask)(struct task_struct *, struct fown_struct *, int); ++ int (*file_receive)(struct file___2 *); ++ int (*file_open)(struct file___2 *); ++ int (*task_alloc)(struct task_struct *, long unsigned int); ++ void (*task_free)(struct task_struct *); ++ int (*cred_alloc_blank)(struct cred *, gfp_t); ++ void (*cred_free)(struct cred *); ++ int (*cred_prepare)(struct cred *, const struct cred *, gfp_t); ++ void (*cred_transfer)(struct cred *, const struct cred *); ++ void (*cred_getsecid)(const struct cred *, u32 *); ++ int (*kernel_act_as)(struct cred *, u32); ++ int (*kernel_create_files_as)(struct cred *, struct inode___2 *); ++ int (*kernel_module_request)(char *); ++ int (*kernel_load_data)(enum kernel_load_data_id); ++ int (*kernel_read_file)(struct file___2 *, enum kernel_read_file_id); ++ int (*kernel_post_read_file)(struct file___2 *, char *, loff_t, enum kernel_read_file_id); ++ int (*task_fix_setuid)(struct cred *, const struct cred *, int); ++ int (*task_setpgid)(struct task_struct *, pid_t); ++ int (*task_getpgid)(struct task_struct *); ++ int (*task_getsid)(struct task_struct *); ++ void (*task_getsecid)(struct task_struct *, u32 *); ++ int (*task_setnice)(struct task_struct *, int); ++ int (*task_setioprio)(struct task_struct *, int); ++ int (*task_getioprio)(struct task_struct *); ++ int (*task_prlimit)(const struct cred *, const struct cred *, unsigned int); ++ int (*task_setrlimit)(struct task_struct *, unsigned int, struct rlimit *); ++ int (*task_setscheduler)(struct task_struct *); ++ int (*task_getscheduler)(struct task_struct *); ++ int (*task_movememory)(struct task_struct *); ++ int (*task_kill)(struct task_struct *, struct siginfo *, int, const struct cred *); ++ int (*task_prctl)(int, long unsigned int, long unsigned int, long unsigned int, long unsigned int); ++ void (*task_to_inode)(struct task_struct *, struct inode___2 *); ++ int (*ipc_permission)(struct kern_ipc_perm *, short int); ++ void (*ipc_getsecid)(struct kern_ipc_perm *, u32 *); ++ int (*msg_msg_alloc_security)(struct msg_msg *); ++ void (*msg_msg_free_security)(struct msg_msg *); ++ int (*msg_queue_alloc_security)(struct kern_ipc_perm *); ++ void (*msg_queue_free_security)(struct kern_ipc_perm *); ++ int (*msg_queue_associate)(struct kern_ipc_perm *, int); ++ int (*msg_queue_msgctl)(struct kern_ipc_perm *, int); ++ int (*msg_queue_msgsnd)(struct kern_ipc_perm *, struct msg_msg *, int); ++ int (*msg_queue_msgrcv)(struct kern_ipc_perm *, struct msg_msg *, struct task_struct *, long int, int); ++ int (*shm_alloc_security)(struct kern_ipc_perm *); ++ void (*shm_free_security)(struct kern_ipc_perm *); ++ int (*shm_associate)(struct kern_ipc_perm *, int); ++ int (*shm_shmctl)(struct kern_ipc_perm *, int); ++ int (*shm_shmat)(struct kern_ipc_perm *, char *, int); ++ int (*sem_alloc_security)(struct kern_ipc_perm *); ++ void (*sem_free_security)(struct kern_ipc_perm *); ++ int (*sem_associate)(struct kern_ipc_perm *, int); ++ int (*sem_semctl)(struct kern_ipc_perm *, int); ++ int (*sem_semop)(struct kern_ipc_perm *, struct sembuf *, unsigned int, int); ++ int (*netlink_send)(struct sock *, struct sk_buff___2 *); ++ void (*d_instantiate)(struct dentry___2 *, struct inode___2 *); ++ int (*getprocattr)(struct task_struct *, char *, char **); ++ int (*setprocattr)(const char *, void *, size_t); ++ int (*ismaclabel)(const char *); ++ int (*secid_to_secctx)(u32, char **, u32 *); ++ int (*secctx_to_secid)(const char *, u32, u32 *); ++ void (*release_secctx)(char *, u32); ++ void (*inode_invalidate_secctx)(struct inode___2 *); ++ int (*inode_notifysecctx)(struct inode___2 *, void *, u32); ++ int (*inode_setsecctx)(struct dentry___2 *, void *, u32); ++ int (*inode_getsecctx)(struct inode___2 *, void **, u32 *); ++ int (*unix_stream_connect)(struct sock *, struct sock *, struct sock *); ++ int (*unix_may_send)(struct socket *, struct socket *); ++ int (*socket_create)(int, int, int, int); ++ int (*socket_post_create)(struct socket *, int, int, int, int); ++ int (*socket_socketpair)(struct socket *, struct socket *); ++ int (*socket_bind)(struct socket *, struct sockaddr *, int); ++ int (*socket_connect)(struct socket *, struct sockaddr *, int); ++ int (*socket_listen)(struct socket *, int); ++ int (*socket_accept)(struct socket *, struct socket *); ++ int (*socket_sendmsg)(struct socket *, struct msghdr *, int); ++ int (*socket_recvmsg)(struct socket *, struct msghdr *, int, int); ++ int (*socket_getsockname)(struct socket *); ++ int (*socket_getpeername)(struct socket *); ++ int (*socket_getsockopt)(struct socket *, int, int); ++ int (*socket_setsockopt)(struct socket *, int, int); ++ int (*socket_shutdown)(struct socket *, int); ++ int (*socket_sock_rcv_skb)(struct sock *, struct sk_buff___2 *); ++ int (*socket_getpeersec_stream)(struct socket *, char *, int *, unsigned int); ++ int (*socket_getpeersec_dgram)(struct socket *, struct sk_buff___2 *, u32 *); ++ int (*sk_alloc_security)(struct sock *, int, gfp_t); ++ void (*sk_free_security)(struct sock *); ++ void (*sk_clone_security)(const struct sock *, struct sock *); ++ void (*sk_getsecid)(struct sock *, u32 *); ++ void (*sock_graft)(struct sock *, struct socket *); ++ int (*inet_conn_request)(struct sock *, struct sk_buff___2 *, struct request_sock *); ++ void (*inet_csk_clone)(struct sock *, const struct request_sock *); ++ void (*inet_conn_established)(struct sock *, struct sk_buff___2 *); ++ int (*secmark_relabel_packet)(u32); ++ void (*secmark_refcount_inc)(); ++ void (*secmark_refcount_dec)(); ++ void (*req_classify_flow)(const struct request_sock *, struct flowi *); ++ int (*tun_dev_alloc_security)(void **); ++ void (*tun_dev_free_security)(void *); ++ int (*tun_dev_create)(); ++ int (*tun_dev_attach_queue)(void *); ++ int (*tun_dev_attach)(struct sock *, void *); ++ int (*tun_dev_open)(void *); ++ int (*sctp_assoc_request)(struct sctp_endpoint *, struct sk_buff___2 *); ++ int (*sctp_bind_connect)(struct sock *, int, struct sockaddr *, int); ++ void (*sctp_sk_clone)(struct sctp_endpoint *, struct sock *, struct sock *); ++ int (*ib_pkey_access)(void *, u64, u16); ++ int (*ib_endport_manage_subnet)(void *, const char *, u8); ++ int (*ib_alloc_security)(void **); ++ void (*ib_free_security)(void *); ++ int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **, struct xfrm_user_sec_ctx *, gfp_t); ++ int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *, struct xfrm_sec_ctx **); ++ void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *); ++ int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *); ++ int (*xfrm_state_alloc)(struct xfrm_state *, struct xfrm_user_sec_ctx *); ++ int (*xfrm_state_alloc_acquire)(struct xfrm_state *, struct xfrm_sec_ctx *, u32); ++ void (*xfrm_state_free_security)(struct xfrm_state *); ++ int (*xfrm_state_delete_security)(struct xfrm_state *); ++ int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *, u32, u8); ++ int (*xfrm_state_pol_flow_match)(struct xfrm_state *, struct xfrm_policy *, const struct flowi *); ++ int (*xfrm_decode_session)(struct sk_buff___2 *, u32 *, int); ++ int (*key_alloc)(struct key *, const struct cred *, long unsigned int); ++ void (*key_free)(struct key *); ++ int (*key_permission)(key_ref_t, const struct cred *, unsigned int); ++ int (*key_getsecurity)(struct key *, char **); ++ int (*audit_rule_init)(u32, u32, char *, void **); ++ int (*audit_rule_known)(struct audit_krule *); ++ int (*audit_rule_match)(u32, u32, u32, void *, struct audit_context *); ++ void (*audit_rule_free)(void *); ++ int (*bpf)(int, union bpf_attr *, unsigned int); ++ int (*bpf_map)(struct bpf_map *, fmode_t); ++ int (*bpf_prog)(struct bpf_prog *); ++ int (*bpf_map_alloc_security)(struct bpf_map *); ++ void (*bpf_map_free_security)(struct bpf_map *); ++ int (*bpf_prog_alloc_security)(struct bpf_prog_aux *); ++ void (*bpf_prog_free_security)(struct bpf_prog_aux *); ++}; ++ ++struct security_hook_heads { ++ struct hlist_head binder_set_context_mgr; ++ struct hlist_head binder_transaction; ++ struct hlist_head binder_transfer_binder; ++ struct hlist_head binder_transfer_file; ++ struct hlist_head ptrace_access_check; ++ struct hlist_head ptrace_traceme; ++ struct hlist_head capget; ++ struct hlist_head capset; ++ struct hlist_head capable; ++ struct hlist_head quotactl; ++ struct hlist_head quota_on; ++ struct hlist_head syslog; ++ struct hlist_head settime; ++ struct hlist_head vm_enough_memory; ++ struct hlist_head bprm_set_creds; ++ struct hlist_head bprm_check_security; ++ struct hlist_head bprm_committing_creds; ++ struct hlist_head bprm_committed_creds; ++ struct hlist_head sb_alloc_security; ++ struct hlist_head sb_free_security; ++ struct hlist_head sb_copy_data; ++ struct hlist_head sb_remount; ++ struct hlist_head sb_kern_mount; ++ struct hlist_head sb_show_options; ++ struct hlist_head sb_statfs; ++ struct hlist_head sb_mount; ++ struct hlist_head sb_umount; ++ struct hlist_head sb_pivotroot; ++ struct hlist_head sb_set_mnt_opts; ++ struct hlist_head sb_clone_mnt_opts; ++ struct hlist_head sb_parse_opts_str; ++ struct hlist_head dentry_init_security; ++ struct hlist_head dentry_create_files_as; ++ struct hlist_head path_unlink; ++ struct hlist_head path_mkdir; ++ struct hlist_head path_rmdir; ++ struct hlist_head path_mknod; ++ struct hlist_head path_truncate; ++ struct hlist_head path_symlink; ++ struct hlist_head path_link; ++ struct hlist_head path_rename; ++ struct hlist_head path_chmod; ++ struct hlist_head path_chown; ++ struct hlist_head path_chroot; ++ struct hlist_head inode_alloc_security; ++ struct hlist_head inode_free_security; ++ struct hlist_head inode_init_security; ++ struct hlist_head inode_create; ++ struct hlist_head inode_link; ++ struct hlist_head inode_unlink; ++ struct hlist_head inode_symlink; ++ struct hlist_head inode_mkdir; ++ struct hlist_head inode_rmdir; ++ struct hlist_head inode_mknod; ++ struct hlist_head inode_rename; ++ struct hlist_head inode_readlink; ++ struct hlist_head inode_follow_link; ++ struct hlist_head inode_permission; ++ struct hlist_head inode_setattr; ++ struct hlist_head inode_getattr; ++ struct hlist_head inode_setxattr; ++ struct hlist_head inode_post_setxattr; ++ struct hlist_head inode_getxattr; ++ struct hlist_head inode_listxattr; ++ struct hlist_head inode_removexattr; ++ struct hlist_head inode_need_killpriv; ++ struct hlist_head inode_killpriv; ++ struct hlist_head inode_getsecurity; ++ struct hlist_head inode_setsecurity; ++ struct hlist_head inode_listsecurity; ++ struct hlist_head inode_getsecid; ++ struct hlist_head inode_copy_up; ++ struct hlist_head inode_copy_up_xattr; ++ struct hlist_head file_permission; ++ struct hlist_head file_alloc_security; ++ struct hlist_head file_free_security; ++ struct hlist_head file_ioctl; ++ struct hlist_head mmap_addr; ++ struct hlist_head mmap_file; ++ struct hlist_head file_mprotect; ++ struct hlist_head file_lock; ++ struct hlist_head file_fcntl; ++ struct hlist_head file_set_fowner; ++ struct hlist_head file_send_sigiotask; ++ struct hlist_head file_receive; ++ struct hlist_head file_open; ++ struct hlist_head task_alloc; ++ struct hlist_head task_free; ++ struct hlist_head cred_alloc_blank; ++ struct hlist_head cred_free; ++ struct hlist_head cred_prepare; ++ struct hlist_head cred_transfer; ++ struct hlist_head cred_getsecid; ++ struct hlist_head kernel_act_as; ++ struct hlist_head kernel_create_files_as; ++ struct hlist_head kernel_load_data; ++ struct hlist_head kernel_read_file; ++ struct hlist_head kernel_post_read_file; ++ struct hlist_head kernel_module_request; ++ struct hlist_head task_fix_setuid; ++ struct hlist_head task_setpgid; ++ struct hlist_head task_getpgid; ++ struct hlist_head task_getsid; ++ struct hlist_head task_getsecid; ++ struct hlist_head task_setnice; ++ struct hlist_head task_setioprio; ++ struct hlist_head task_getioprio; ++ struct hlist_head task_prlimit; ++ struct hlist_head task_setrlimit; ++ struct hlist_head task_setscheduler; ++ struct hlist_head task_getscheduler; ++ struct hlist_head task_movememory; ++ struct hlist_head task_kill; ++ struct hlist_head task_prctl; ++ struct hlist_head task_to_inode; ++ struct hlist_head ipc_permission; ++ struct hlist_head ipc_getsecid; ++ struct hlist_head msg_msg_alloc_security; ++ struct hlist_head msg_msg_free_security; ++ struct hlist_head msg_queue_alloc_security; ++ struct hlist_head msg_queue_free_security; ++ struct hlist_head msg_queue_associate; ++ struct hlist_head msg_queue_msgctl; ++ struct hlist_head msg_queue_msgsnd; ++ struct hlist_head msg_queue_msgrcv; ++ struct hlist_head shm_alloc_security; ++ struct hlist_head shm_free_security; ++ struct hlist_head shm_associate; ++ struct hlist_head shm_shmctl; ++ struct hlist_head shm_shmat; ++ struct hlist_head sem_alloc_security; ++ struct hlist_head sem_free_security; ++ struct hlist_head sem_associate; ++ struct hlist_head sem_semctl; ++ struct hlist_head sem_semop; ++ struct hlist_head netlink_send; ++ struct hlist_head d_instantiate; ++ struct hlist_head getprocattr; ++ struct hlist_head setprocattr; ++ struct hlist_head ismaclabel; ++ struct hlist_head secid_to_secctx; ++ struct hlist_head secctx_to_secid; ++ struct hlist_head release_secctx; ++ struct hlist_head inode_invalidate_secctx; ++ struct hlist_head inode_notifysecctx; ++ struct hlist_head inode_setsecctx; ++ struct hlist_head inode_getsecctx; ++ struct hlist_head unix_stream_connect; ++ struct hlist_head unix_may_send; ++ struct hlist_head socket_create; ++ struct hlist_head socket_post_create; ++ struct hlist_head socket_socketpair; ++ struct hlist_head socket_bind; ++ struct hlist_head socket_connect; ++ struct hlist_head socket_listen; ++ struct hlist_head socket_accept; ++ struct hlist_head socket_sendmsg; ++ struct hlist_head socket_recvmsg; ++ struct hlist_head socket_getsockname; ++ struct hlist_head socket_getpeername; ++ struct hlist_head socket_getsockopt; ++ struct hlist_head socket_setsockopt; ++ struct hlist_head socket_shutdown; ++ struct hlist_head socket_sock_rcv_skb; ++ struct hlist_head socket_getpeersec_stream; ++ struct hlist_head socket_getpeersec_dgram; ++ struct hlist_head sk_alloc_security; ++ struct hlist_head sk_free_security; ++ struct hlist_head sk_clone_security; ++ struct hlist_head sk_getsecid; ++ struct hlist_head sock_graft; ++ struct hlist_head inet_conn_request; ++ struct hlist_head inet_csk_clone; ++ struct hlist_head inet_conn_established; ++ struct hlist_head secmark_relabel_packet; ++ struct hlist_head secmark_refcount_inc; ++ struct hlist_head secmark_refcount_dec; ++ struct hlist_head req_classify_flow; ++ struct hlist_head tun_dev_alloc_security; ++ struct hlist_head tun_dev_free_security; ++ struct hlist_head tun_dev_create; ++ struct hlist_head tun_dev_attach_queue; ++ struct hlist_head tun_dev_attach; ++ struct hlist_head tun_dev_open; ++ struct hlist_head sctp_assoc_request; ++ struct hlist_head sctp_bind_connect; ++ struct hlist_head sctp_sk_clone; ++ struct hlist_head ib_pkey_access; ++ struct hlist_head ib_endport_manage_subnet; ++ struct hlist_head ib_alloc_security; ++ struct hlist_head ib_free_security; ++ struct hlist_head xfrm_policy_alloc_security; ++ struct hlist_head xfrm_policy_clone_security; ++ struct hlist_head xfrm_policy_free_security; ++ struct hlist_head xfrm_policy_delete_security; ++ struct hlist_head xfrm_state_alloc; ++ struct hlist_head xfrm_state_alloc_acquire; ++ struct hlist_head xfrm_state_free_security; ++ struct hlist_head xfrm_state_delete_security; ++ struct hlist_head xfrm_policy_lookup; ++ struct hlist_head xfrm_state_pol_flow_match; ++ struct hlist_head xfrm_decode_session; ++ struct hlist_head key_alloc; ++ struct hlist_head key_free; ++ struct hlist_head key_permission; ++ struct hlist_head key_getsecurity; ++ struct hlist_head audit_rule_init; ++ struct hlist_head audit_rule_known; ++ struct hlist_head audit_rule_match; ++ struct hlist_head audit_rule_free; ++ struct hlist_head bpf; ++ struct hlist_head bpf_map; ++ struct hlist_head bpf_prog; ++ struct hlist_head bpf_map_alloc_security; ++ struct hlist_head bpf_map_free_security; ++ struct hlist_head bpf_prog_alloc_security; ++ struct hlist_head bpf_prog_free_security; ++}; ++ ++struct security_hook_list { ++ struct hlist_node list; ++ struct hlist_head *head; ++ union security_list_options hook; ++ char *lsm; ++}; ++ ++typedef int (*sk_read_actor_t___2)(read_descriptor_t *, struct sk_buff___2 *, unsigned int, size_t); ++ ++enum lsm_event { ++ LSM_POLICY_CHANGE = 0, ++}; ++ ++typedef int (*initxattrs)(struct inode___2 *, const struct xattr *, void *); ++ ++enum { ++ IB_USER_VERBS_CMD_GET_CONTEXT = 0, ++ IB_USER_VERBS_CMD_QUERY_DEVICE = 1, ++ IB_USER_VERBS_CMD_QUERY_PORT = 2, ++ IB_USER_VERBS_CMD_ALLOC_PD = 3, ++ IB_USER_VERBS_CMD_DEALLOC_PD = 4, ++ IB_USER_VERBS_CMD_CREATE_AH = 5, ++ IB_USER_VERBS_CMD_MODIFY_AH = 6, ++ IB_USER_VERBS_CMD_QUERY_AH = 7, ++ IB_USER_VERBS_CMD_DESTROY_AH = 8, ++ IB_USER_VERBS_CMD_REG_MR = 9, ++ IB_USER_VERBS_CMD_REG_SMR = 10, ++ IB_USER_VERBS_CMD_REREG_MR = 11, ++ IB_USER_VERBS_CMD_QUERY_MR = 12, ++ IB_USER_VERBS_CMD_DEREG_MR = 13, ++ IB_USER_VERBS_CMD_ALLOC_MW = 14, ++ IB_USER_VERBS_CMD_BIND_MW = 15, ++ IB_USER_VERBS_CMD_DEALLOC_MW = 16, ++ IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, ++ IB_USER_VERBS_CMD_CREATE_CQ = 18, ++ IB_USER_VERBS_CMD_RESIZE_CQ = 19, ++ IB_USER_VERBS_CMD_DESTROY_CQ = 20, ++ IB_USER_VERBS_CMD_POLL_CQ = 21, ++ IB_USER_VERBS_CMD_PEEK_CQ = 22, ++ IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, ++ IB_USER_VERBS_CMD_CREATE_QP = 24, ++ IB_USER_VERBS_CMD_QUERY_QP = 25, ++ IB_USER_VERBS_CMD_MODIFY_QP = 26, ++ IB_USER_VERBS_CMD_DESTROY_QP = 27, ++ IB_USER_VERBS_CMD_POST_SEND = 28, ++ IB_USER_VERBS_CMD_POST_RECV = 29, ++ IB_USER_VERBS_CMD_ATTACH_MCAST = 30, ++ IB_USER_VERBS_CMD_DETACH_MCAST = 31, ++ IB_USER_VERBS_CMD_CREATE_SRQ = 32, ++ IB_USER_VERBS_CMD_MODIFY_SRQ = 33, ++ IB_USER_VERBS_CMD_QUERY_SRQ = 34, ++ IB_USER_VERBS_CMD_DESTROY_SRQ = 35, ++ IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, ++ IB_USER_VERBS_CMD_OPEN_XRCD = 37, ++ IB_USER_VERBS_CMD_CLOSE_XRCD = 38, ++ IB_USER_VERBS_CMD_CREATE_XSRQ = 39, ++ IB_USER_VERBS_CMD_OPEN_QP = 40, ++}; ++ ++enum ib_uverbs_create_qp_mask { ++ IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, ++}; ++ ++enum ib_uverbs_wr_opcode { ++ IB_UVERBS_WR_RDMA_WRITE = 0, ++ IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, ++ IB_UVERBS_WR_SEND = 2, ++ IB_UVERBS_WR_SEND_WITH_IMM = 3, ++ IB_UVERBS_WR_RDMA_READ = 4, ++ IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, ++ IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, ++ IB_UVERBS_WR_LOCAL_INV = 7, ++ IB_UVERBS_WR_BIND_MW = 8, ++ IB_UVERBS_WR_SEND_WITH_INV = 9, ++ IB_UVERBS_WR_TSO = 10, ++ IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, ++ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, ++ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, ++}; ++ ++enum ib_uverbs_access_flags { ++ IB_UVERBS_ACCESS_LOCAL_WRITE = 1, ++ IB_UVERBS_ACCESS_REMOTE_WRITE = 2, ++ IB_UVERBS_ACCESS_REMOTE_READ = 4, ++ IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, ++ IB_UVERBS_ACCESS_MW_BIND = 16, ++ IB_UVERBS_ACCESS_ZERO_BASED = 32, ++ IB_UVERBS_ACCESS_ON_DEMAND = 64, ++ IB_UVERBS_ACCESS_HUGETLB = 128, ++}; ++ ++union ib_gid { ++ u8 raw[16]; ++ struct { ++ __be64 subnet_prefix; ++ __be64 interface_id; ++ } global; ++}; ++ ++struct lsm_network_audit { ++ int netif; ++ struct sock *sk; ++ u16 family; ++ __be16 dport; ++ __be16 sport; ++ union { ++ struct { ++ __be32 daddr; ++ __be32 saddr; ++ } v4; ++ struct { ++ struct in6_addr daddr; ++ struct in6_addr saddr; ++ } v6; ++ } fam; ++}; ++ ++struct lsm_ioctlop_audit { ++ struct path path; ++ u16 cmd; ++}; ++ ++struct lsm_ibpkey_audit { ++ u64 subnet_prefix; ++ u16 pkey; ++}; ++ ++struct lsm_ibendport_audit { ++ char dev_name[64]; ++ u8 port; ++}; ++ ++struct selinux_state; ++ ++struct selinux_audit_data { ++ u32 ssid; ++ u32 tsid; ++ u16 tclass; ++ u32 requested; ++ u32 audited; ++ u32 denied; ++ int result; ++ struct selinux_state *state; ++}; ++ ++struct common_audit_data { ++ char type; ++ union { ++ struct path path; ++ struct dentry *dentry; ++ struct inode *inode; ++ struct lsm_network_audit *net; ++ int cap; ++ int ipc_id; ++ struct task_struct *tsk; ++ struct { ++ key_serial_t key; ++ char *key_desc; ++ } key_struct; ++ char *kmod_name; ++ struct lsm_ioctlop_audit *op; ++ struct file *file; ++ struct lsm_ibpkey_audit *ibpkey; ++ struct lsm_ibendport_audit *ibendport; ++ } u; ++ union { ++ struct selinux_audit_data *selinux_audit_data; ++ }; ++}; ++ ++enum { ++ POLICYDB_CAPABILITY_NETPEER = 0, ++ POLICYDB_CAPABILITY_OPENPERM = 1, ++ POLICYDB_CAPABILITY_EXTSOCKCLASS = 2, ++ POLICYDB_CAPABILITY_ALWAYSNETWORK = 3, ++ POLICYDB_CAPABILITY_CGROUPSECLABEL = 4, ++ POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION = 5, ++ __POLICYDB_CAPABILITY_MAX = 6, ++}; ++ ++struct selinux_avc; ++ ++struct selinux_ss; ++ ++struct selinux_state { ++ bool disabled; ++ bool enforcing; ++ bool checkreqprot; ++ bool initialized; ++ bool policycap[6]; ++ struct selinux_avc *avc; ++ struct selinux_ss *ss; ++}; ++ ++struct avc_cache { ++ struct hlist_head slots[512]; ++ spinlock_t slots_lock[512]; ++ atomic_t lru_hint; ++ atomic_t active_nodes; ++ u32 latest_notif; ++}; ++ ++struct selinux_avc { ++ unsigned int avc_cache_threshold; ++ struct avc_cache avc_cache; ++}; ++ ++struct av_decision { ++ u32 allowed; ++ u32 auditallow; ++ u32 auditdeny; ++ u32 seqno; ++ u32 flags; ++}; ++ ++struct extended_perms_data { ++ u32 p[8]; ++}; ++ ++struct extended_perms_decision { ++ u8 used; ++ u8 driver; ++ struct extended_perms_data *allowed; ++ struct extended_perms_data *auditallow; ++ struct extended_perms_data *dontaudit; ++}; ++ ++struct extended_perms { ++ u16 len; ++ struct extended_perms_data drivers; ++}; ++ ++struct avc_cache_stats { ++ unsigned int lookups; ++ unsigned int misses; ++ unsigned int allocations; ++ unsigned int reclaims; ++ unsigned int frees; ++}; ++ ++struct security_class_mapping { ++ const char *name; ++ const char *perms[33]; ++}; ++ ++struct avc_xperms_node; ++ ++struct avc_entry { ++ u32 ssid; ++ u32 tsid; ++ u16 tclass; ++ struct av_decision avd; ++ struct avc_xperms_node *xp_node; ++}; ++ ++struct avc_xperms_node { ++ struct extended_perms xp; ++ struct list_head xpd_head; ++}; ++ ++struct avc_node { ++ struct avc_entry ae; ++ struct hlist_node list; ++ struct callback_head rhead; ++}; ++ ++struct avc_xperms_decision_node { ++ struct extended_perms_decision xpd; ++ struct list_head xpd_list; ++}; ++ ++struct avc_callback_node { ++ int (*callback)(u32); ++ u32 events; ++ struct avc_callback_node *next; ++}; ++ ++enum sctp_endpoint_type { ++ SCTP_EP_TYPE_SOCKET = 0, ++ SCTP_EP_TYPE_ASSOCIATION = 1, ++}; ++ ++struct sctp_chunk; ++ ++struct sctp_inq { ++ struct list_head in_chunk_list; ++ struct sctp_chunk *in_progress; ++ struct work_struct immediate; ++}; ++ ++struct sctp_bind_addr { ++ __u16 port; ++ struct list_head address_list; ++}; ++ ++struct sctp_ep_common { ++ struct hlist_node node; ++ int hashent; ++ enum sctp_endpoint_type type; ++ refcount_t refcnt; ++ bool dead; ++ struct sock *sk; ++ struct net *net; ++ struct sctp_inq inqueue; ++ struct sctp_bind_addr bind_addr; ++}; ++ ++struct crypto_shash; ++ ++struct sctp_hmac_algo_param; ++ ++struct sctp_chunks_param; ++ ++struct sctp_endpoint { ++ struct sctp_ep_common base; ++ struct list_head asocs; ++ __u8 secret_key[32]; ++ __u8 *digest; ++ __u32 sndbuf_policy; ++ __u32 rcvbuf_policy; ++ struct crypto_shash **auth_hmacs; ++ struct sctp_hmac_algo_param *auth_hmacs_list; ++ struct sctp_chunks_param *auth_chunk_list; ++ struct list_head endpoint_shared_keys; ++ __u16 active_key_id; ++ __u8 auth_enable: 1; ++ __u8 prsctp_enable: 1; ++ __u8 reconf_enable: 1; ++ __u8 strreset_enable; ++ u32 secid; ++ u32 peer_secid; ++}; ++ ++struct xfrm_sec_ctx { ++ __u8 ctx_doi; ++ __u8 ctx_alg; ++ __u16 ctx_len; ++ __u32 ctx_sid; ++ char ctx_str[0]; ++}; ++ ++struct xfrm_user_sec_ctx { ++ __u16 len; ++ __u16 exttype; ++ __u8 ctx_alg; ++ __u8 ctx_doi; ++ __u16 ctx_len; ++}; ++ ++struct tty_file_private { ++ struct tty_struct *tty; ++ struct file *file; ++ struct list_head list; ++}; ++ ++struct socket_alloc { ++ struct socket socket; ++ struct inode vfs_inode; ++}; ++ ++struct netlbl_lsm_cache { ++ refcount_t refcount; ++ void (*free)(const void *); ++ void *data; ++}; ++ ++struct netlbl_lsm_catmap { ++ u32 startbit; ++ u64 bitmap[4]; ++ struct netlbl_lsm_catmap *next; ++}; ++ ++struct netlbl_lsm_secattr { ++ u32 flags; ++ u32 type; ++ char *domain; ++ struct netlbl_lsm_cache *cache; ++ struct { ++ struct { ++ struct netlbl_lsm_catmap *cat; ++ u32 lvl; ++ } mls; ++ u32 secid; ++ } attr; ++}; ++ ++struct dccp_hdr { ++ __be16 dccph_sport; ++ __be16 dccph_dport; ++ __u8 dccph_doff; ++ __u8 dccph_cscov: 4; ++ __u8 dccph_ccval: 4; ++ __sum16 dccph_checksum; ++ __u8 dccph_x: 1; ++ __u8 dccph_type: 4; ++ __u8 dccph_reserved: 3; ++ __u8 dccph_seq2; ++ __be16 dccph_seq; ++}; ++ ++enum dccp_state { ++ DCCP_OPEN = 1, ++ DCCP_REQUESTING = 2, ++ DCCP_LISTEN = 10, ++ DCCP_RESPOND = 3, ++ DCCP_ACTIVE_CLOSEREQ = 4, ++ DCCP_PASSIVE_CLOSE = 8, ++ DCCP_CLOSING = 11, ++ DCCP_TIME_WAIT = 6, ++ DCCP_CLOSED = 7, ++ DCCP_NEW_SYN_RECV = 12, ++ DCCP_PARTOPEN = 13, ++ DCCP_PASSIVE_CLOSEREQ = 14, ++ DCCP_MAX_STATES = 15, ++}; ++ ++typedef __s32 sctp_assoc_t; ++ ++struct sctp_initmsg { ++ __u16 sinit_num_ostreams; ++ __u16 sinit_max_instreams; ++ __u16 sinit_max_attempts; ++ __u16 sinit_max_init_timeo; ++}; ++ ++struct sctp_sndrcvinfo { ++ __u16 sinfo_stream; ++ __u16 sinfo_ssn; ++ __u16 sinfo_flags; ++ __u32 sinfo_ppid; ++ __u32 sinfo_context; ++ __u32 sinfo_timetolive; ++ __u32 sinfo_tsn; ++ __u32 sinfo_cumtsn; ++ sctp_assoc_t sinfo_assoc_id; ++}; ++ ++struct sctp_event_subscribe { ++ __u8 sctp_data_io_event; ++ __u8 sctp_association_event; ++ __u8 sctp_address_event; ++ __u8 sctp_send_failure_event; ++ __u8 sctp_peer_error_event; ++ __u8 sctp_shutdown_event; ++ __u8 sctp_partial_delivery_event; ++ __u8 sctp_adaptation_layer_event; ++ __u8 sctp_authentication_event; ++ __u8 sctp_sender_dry_event; ++ __u8 sctp_stream_reset_event; ++ __u8 sctp_assoc_reset_event; ++ __u8 sctp_stream_change_event; ++}; ++ ++struct sctp_rtoinfo { ++ sctp_assoc_t srto_assoc_id; ++ __u32 srto_initial; ++ __u32 srto_max; ++ __u32 srto_min; ++}; ++ ++struct sctp_assocparams { ++ sctp_assoc_t sasoc_assoc_id; ++ __u16 sasoc_asocmaxrxt; ++ __u16 sasoc_number_peer_destinations; ++ __u32 sasoc_peer_rwnd; ++ __u32 sasoc_local_rwnd; ++ __u32 sasoc_cookie_life; ++}; ++ ++struct sctp_paddrparams { ++ sctp_assoc_t spp_assoc_id; ++ struct __kernel_sockaddr_storage spp_address; ++ __u32 spp_hbinterval; ++ __u16 spp_pathmaxrxt; ++ __u32 spp_pathmtu; ++ __u32 spp_sackdelay; ++ __u32 spp_flags; ++ __u32 spp_ipv6_flowlabel; ++ __u8 spp_dscp; ++ char: 8; ++} __attribute__((packed)); ++ ++struct sctp_paramhdr { ++ __be16 type; ++ __be16 length; ++}; ++ ++enum sctp_param { ++ SCTP_PARAM_HEARTBEAT_INFO = 256, ++ SCTP_PARAM_IPV4_ADDRESS = 1280, ++ SCTP_PARAM_IPV6_ADDRESS = 1536, ++ SCTP_PARAM_STATE_COOKIE = 1792, ++ SCTP_PARAM_UNRECOGNIZED_PARAMETERS = 2048, ++ SCTP_PARAM_COOKIE_PRESERVATIVE = 2304, ++ SCTP_PARAM_HOST_NAME_ADDRESS = 2816, ++ SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = 3072, ++ SCTP_PARAM_ECN_CAPABLE = 128, ++ SCTP_PARAM_RANDOM = 640, ++ SCTP_PARAM_CHUNKS = 896, ++ SCTP_PARAM_HMAC_ALGO = 1152, ++ SCTP_PARAM_SUPPORTED_EXT = 2176, ++ SCTP_PARAM_FWD_TSN_SUPPORT = 192, ++ SCTP_PARAM_ADD_IP = 448, ++ SCTP_PARAM_DEL_IP = 704, ++ SCTP_PARAM_ERR_CAUSE = 960, ++ SCTP_PARAM_SET_PRIMARY = 1216, ++ SCTP_PARAM_SUCCESS_REPORT = 1472, ++ SCTP_PARAM_ADAPTATION_LAYER_IND = 1728, ++ SCTP_PARAM_RESET_OUT_REQUEST = 3328, ++ SCTP_PARAM_RESET_IN_REQUEST = 3584, ++ SCTP_PARAM_RESET_TSN_REQUEST = 3840, ++ SCTP_PARAM_RESET_RESPONSE = 4096, ++ SCTP_PARAM_RESET_ADD_OUT_STREAMS = 4352, ++ SCTP_PARAM_RESET_ADD_IN_STREAMS = 4608, ++}; ++ ++struct sctp_datahdr { ++ __be32 tsn; ++ __be16 stream; ++ __be16 ssn; ++ __u32 ppid; ++ __u8 payload[0]; ++}; ++ ++struct sctp_idatahdr { ++ __be32 tsn; ++ __be16 stream; ++ __be16 reserved; ++ __be32 mid; ++ union { ++ __u32 ppid; ++ __be32 fsn; ++ }; ++ __u8 payload[0]; ++}; ++ ++struct sctp_inithdr { ++ __be32 init_tag; ++ __be32 a_rwnd; ++ __be16 num_outbound_streams; ++ __be16 num_inbound_streams; ++ __be32 initial_tsn; ++ __u8 params[0]; ++}; ++ ++struct sctp_init_chunk { ++ struct sctp_chunkhdr chunk_hdr; ++ struct sctp_inithdr init_hdr; ++}; ++ ++struct sctp_ipv4addr_param { ++ struct sctp_paramhdr param_hdr; ++ struct in_addr addr; ++}; ++ ++struct sctp_ipv6addr_param { ++ struct sctp_paramhdr param_hdr; ++ struct in6_addr addr; ++}; ++ ++struct sctp_cookie_preserve_param { ++ struct sctp_paramhdr param_hdr; ++ __be32 lifespan_increment; ++}; ++ ++struct sctp_hostname_param { ++ struct sctp_paramhdr param_hdr; ++ uint8_t hostname[0]; ++}; ++ ++struct sctp_supported_addrs_param { ++ struct sctp_paramhdr param_hdr; ++ __be16 types[0]; ++}; ++ ++struct sctp_adaptation_ind_param { ++ struct sctp_paramhdr param_hdr; ++ __be32 adaptation_ind; ++}; ++ ++struct sctp_supported_ext_param { ++ struct sctp_paramhdr param_hdr; ++ __u8 chunks[0]; ++}; ++ ++struct sctp_random_param { ++ struct sctp_paramhdr param_hdr; ++ __u8 random_val[0]; ++}; ++ ++struct sctp_chunks_param { ++ struct sctp_paramhdr param_hdr; ++ __u8 chunks[0]; ++}; ++ ++struct sctp_hmac_algo_param { ++ struct sctp_paramhdr param_hdr; ++ __be16 hmac_ids[0]; ++}; ++ ++struct sctp_cookie_param { ++ struct sctp_paramhdr p; ++ __u8 body[0]; ++}; ++ ++struct sctp_gap_ack_block { ++ __be16 start; ++ __be16 end; ++}; ++ ++union sctp_sack_variable { ++ struct sctp_gap_ack_block gab; ++ __be32 dup; ++}; ++ ++struct sctp_sackhdr { ++ __be32 cum_tsn_ack; ++ __be32 a_rwnd; ++ __be16 num_gap_ack_blocks; ++ __be16 num_dup_tsns; ++ union sctp_sack_variable variable[0]; ++}; ++ ++struct sctp_heartbeathdr { ++ struct sctp_paramhdr info; ++}; ++ ++struct sctp_shutdownhdr { ++ __be32 cum_tsn_ack; ++}; ++ ++struct sctp_errhdr { ++ __be16 cause; ++ __be16 length; ++ __u8 variable[0]; ++}; ++ ++struct sctp_ecnehdr { ++ __be32 lowest_tsn; ++}; ++ ++struct sctp_cwrhdr { ++ __be32 lowest_tsn; ++}; ++ ++struct sctp_fwdtsn_skip { ++ __be16 stream; ++ __be16 ssn; ++}; ++ ++struct sctp_fwdtsn_hdr { ++ __be32 new_cum_tsn; ++ struct sctp_fwdtsn_skip skip[0]; ++}; ++ ++struct sctp_ifwdtsn_skip { ++ __be16 stream; ++ __u8 reserved; ++ __u8 flags; ++ __be32 mid; ++}; ++ ++struct sctp_ifwdtsn_hdr { ++ __be32 new_cum_tsn; ++ struct sctp_ifwdtsn_skip skip[0]; ++}; ++ ++struct sctp_addip_param { ++ struct sctp_paramhdr param_hdr; ++ __be32 crr_id; ++}; ++ ++struct sctp_addiphdr { ++ __be32 serial; ++ __u8 params[0]; ++}; ++ ++struct sctp_authhdr { ++ __be16 shkey_id; ++ __be16 hmac_id; ++ __u8 hmac[0]; ++}; ++ ++union sctp_addr { ++ struct sockaddr_in v4; ++ struct sockaddr_in6 v6; ++ struct sockaddr sa; ++}; ++ ++struct sctp_cookie { ++ __u32 my_vtag; ++ __u32 peer_vtag; ++ __u32 my_ttag; ++ __u32 peer_ttag; ++ ktime_t expiration; ++ __u16 sinit_num_ostreams; ++ __u16 sinit_max_instreams; ++ __u32 initial_tsn; ++ union sctp_addr peer_addr; ++ __u16 my_port; ++ __u8 prsctp_capable; ++ __u8 padding; ++ __u32 adaptation_ind; ++ __u8 auth_random[36]; ++ __u8 auth_hmacs[10]; ++ __u8 auth_chunks[20]; ++ __u32 raw_addr_list_len; ++ struct sctp_init_chunk peer_init[0]; ++}; ++ ++struct sctp_tsnmap { ++ long unsigned int *tsn_map; ++ __u32 base_tsn; ++ __u32 cumulative_tsn_ack_point; ++ __u32 max_tsn_seen; ++ __u16 len; ++ __u16 pending_data; ++ __u16 num_dup_tsns; ++ __be32 dup_tsns[16]; ++}; ++ ++struct sctp_inithdr_host { ++ __u32 init_tag; ++ __u32 a_rwnd; ++ __u16 num_outbound_streams; ++ __u16 num_inbound_streams; ++ __u32 initial_tsn; ++}; ++ ++enum sctp_state { ++ SCTP_STATE_CLOSED = 0, ++ SCTP_STATE_COOKIE_WAIT = 1, ++ SCTP_STATE_COOKIE_ECHOED = 2, ++ SCTP_STATE_ESTABLISHED = 3, ++ SCTP_STATE_SHUTDOWN_PENDING = 4, ++ SCTP_STATE_SHUTDOWN_SENT = 5, ++ SCTP_STATE_SHUTDOWN_RECEIVED = 6, ++ SCTP_STATE_SHUTDOWN_ACK_SENT = 7, ++}; ++ ++struct sctp_stream_out; ++ ++struct sctp_stream_out_ext; ++ ++struct sctp_stream_interleave; ++ ++struct sctp_stream { ++ struct flex_array *out; ++ struct flex_array *in; ++ __u16 outcnt; ++ __u16 incnt; ++ struct sctp_stream_out *out_curr; ++ union { ++ struct { ++ struct list_head prio_list; ++ }; ++ struct { ++ struct list_head rr_list; ++ struct sctp_stream_out_ext *rr_next; ++ }; ++ }; ++ struct sctp_stream_interleave *si; ++}; ++ ++struct sctp_sched_ops; ++ ++struct sctp_association; ++ ++struct sctp_outq { ++ struct sctp_association *asoc; ++ struct list_head out_chunk_list; ++ struct sctp_sched_ops *sched; ++ unsigned int out_qlen; ++ unsigned int error; ++ struct list_head control_chunk_list; ++ struct list_head sacked; ++ struct list_head retransmit; ++ struct list_head abandoned; ++ __u32 outstanding_bytes; ++ char fast_rtx; ++ char cork; ++}; ++ ++struct sctp_ulpq { ++ char pd_mode; ++ struct sctp_association *asoc; ++ struct sk_buff_head reasm; ++ struct sk_buff_head reasm_uo; ++ struct sk_buff_head lobby; ++}; ++ ++struct sctp_priv_assoc_stats { ++ struct __kernel_sockaddr_storage obs_rto_ipaddr; ++ __u64 max_obs_rto; ++ __u64 isacks; ++ __u64 osacks; ++ __u64 opackets; ++ __u64 ipackets; ++ __u64 rtxchunks; ++ __u64 outofseqtsns; ++ __u64 idupchunks; ++ __u64 gapcnt; ++ __u64 ouodchunks; ++ __u64 iuodchunks; ++ __u64 oodchunks; ++ __u64 iodchunks; ++ __u64 octrlchunks; ++ __u64 ictrlchunks; ++}; ++ ++struct sctp_transport; ++ ++struct sctp_auth_bytes; ++ ++struct sctp_shared_key; ++ ++struct sctp_association { ++ struct sctp_ep_common base; ++ struct list_head asocs; ++ sctp_assoc_t assoc_id; ++ struct sctp_endpoint *ep; ++ struct sctp_cookie c; ++ struct { ++ struct list_head transport_addr_list; ++ __u32 rwnd; ++ __u16 transport_count; ++ __u16 port; ++ struct sctp_transport *primary_path; ++ union sctp_addr primary_addr; ++ struct sctp_transport *active_path; ++ struct sctp_transport *retran_path; ++ struct sctp_transport *last_sent_to; ++ struct sctp_transport *last_data_from; ++ struct sctp_tsnmap tsn_map; ++ __be16 addip_disabled_mask; ++ __u8 ecn_capable: 1; ++ __u8 ipv4_address: 1; ++ __u8 ipv6_address: 1; ++ __u8 hostname_address: 1; ++ __u8 asconf_capable: 1; ++ __u8 prsctp_capable: 1; ++ __u8 reconf_capable: 1; ++ __u8 auth_capable: 1; ++ __u8 sack_needed: 1; ++ __u8 sack_generation: 1; ++ __u8 zero_window_announced: 1; ++ __u32 sack_cnt; ++ __u32 adaptation_ind; ++ struct sctp_inithdr_host i; ++ void *cookie; ++ int cookie_len; ++ __u32 addip_serial; ++ struct sctp_random_param *peer_random; ++ struct sctp_chunks_param *peer_chunks; ++ struct sctp_hmac_algo_param *peer_hmacs; ++ } peer; ++ enum sctp_state state; ++ int overall_error_count; ++ ktime_t cookie_life; ++ long unsigned int rto_initial; ++ long unsigned int rto_max; ++ long unsigned int rto_min; ++ int max_burst; ++ int max_retrans; ++ int pf_retrans; ++ __u16 max_init_attempts; ++ __u16 init_retries; ++ long unsigned int max_init_timeo; ++ long unsigned int hbinterval; ++ __u16 pathmaxrxt; ++ __u32 flowlabel; ++ __u8 dscp; ++ __u8 pmtu_pending; ++ __u32 pathmtu; ++ __u32 param_flags; ++ __u32 sackfreq; ++ long unsigned int sackdelay; ++ long unsigned int timeouts[11]; ++ struct timer_list timers[11]; ++ struct sctp_transport *shutdown_last_sent_to; ++ struct sctp_transport *init_last_sent_to; ++ int shutdown_retries; ++ __u32 next_tsn; ++ __u32 ctsn_ack_point; ++ __u32 adv_peer_ack_point; ++ __u32 highest_sacked; ++ __u32 fast_recovery_exit; ++ __u8 fast_recovery; ++ __u16 unack_data; ++ __u32 rtx_data_chunks; ++ __u32 rwnd; ++ __u32 a_rwnd; ++ __u32 rwnd_over; ++ __u32 rwnd_press; ++ int sndbuf_used; ++ atomic_t rmem_alloc; ++ wait_queue_head_t wait; ++ __u32 frag_point; ++ __u32 user_frag; ++ int init_err_counter; ++ int init_cycle; ++ __u16 default_stream; ++ __u16 default_flags; ++ __u32 default_ppid; ++ __u32 default_context; ++ __u32 default_timetolive; ++ __u32 default_rcv_context; ++ struct sctp_stream stream; ++ struct sctp_outq outqueue; ++ struct sctp_ulpq ulpq; ++ __u32 last_ecne_tsn; ++ __u32 last_cwr_tsn; ++ int numduptsns; ++ struct sctp_chunk *addip_last_asconf; ++ struct list_head asconf_ack_list; ++ struct list_head addip_chunk_list; ++ __u32 addip_serial; ++ int src_out_of_asoc_ok; ++ union sctp_addr *asconf_addr_del_pending; ++ struct sctp_transport *new_transport; ++ struct list_head endpoint_shared_keys; ++ struct sctp_auth_bytes *asoc_shared_key; ++ struct sctp_shared_key *shkey; ++ __u16 default_hmac_id; ++ __u16 active_key_id; ++ __u8 need_ecne: 1; ++ __u8 temp: 1; ++ __u8 force_delay: 1; ++ __u8 intl_enable: 1; ++ __u8 prsctp_enable: 1; ++ __u8 reconf_enable: 1; ++ __u8 strreset_enable; ++ __u8 strreset_outstanding; ++ __u32 strreset_outseq; ++ __u32 strreset_inseq; ++ __u32 strreset_result[2]; ++ struct sctp_chunk *strreset_chunk; ++ struct sctp_priv_assoc_stats stats; ++ int sent_cnt_removable; ++ __u64 abandoned_unsent[3]; ++ __u64 abandoned_sent[3]; ++ struct callback_head rcu; ++}; ++ ++struct sctp_auth_bytes { ++ refcount_t refcnt; ++ __u32 len; ++ __u8 data[0]; ++}; ++ ++struct sctp_shared_key { ++ struct list_head key_list; ++ struct sctp_auth_bytes *key; ++ refcount_t refcnt; ++ __u16 key_id; ++ __u8 deactivated; ++}; ++ ++enum sctp_scope { ++ SCTP_SCOPE_GLOBAL = 0, ++ SCTP_SCOPE_PRIVATE = 1, ++ SCTP_SCOPE_LINK = 2, ++ SCTP_SCOPE_LOOPBACK = 3, ++ SCTP_SCOPE_UNUSABLE = 4, ++}; ++ ++struct sctp_ulpevent { ++ struct sctp_association *asoc; ++ struct sctp_chunk *chunk; ++ unsigned int rmem_len; ++ union { ++ __u32 mid; ++ __u16 ssn; ++ }; ++ union { ++ __u32 ppid; ++ __u32 fsn; ++ }; ++ __u32 tsn; ++ __u32 cumtsn; ++ __u16 stream; ++ __u16 flags; ++ __u16 msg_flags; ++} __attribute__((packed)); ++ ++union sctp_addr_param; ++ ++union sctp_params { ++ void *v; ++ struct sctp_paramhdr *p; ++ struct sctp_cookie_preserve_param *life; ++ struct sctp_hostname_param *dns; ++ struct sctp_cookie_param *cookie; ++ struct sctp_supported_addrs_param *sat; ++ struct sctp_ipv4addr_param *v4; ++ struct sctp_ipv6addr_param *v6; ++ union sctp_addr_param *addr; ++ struct sctp_adaptation_ind_param *aind; ++ struct sctp_supported_ext_param *ext; ++ struct sctp_random_param *random; ++ struct sctp_chunks_param *chunks; ++ struct sctp_hmac_algo_param *hmac_algo; ++ struct sctp_addip_param *addip; ++}; ++ ++struct sctp_sender_hb_info; ++ ++struct sctp_signed_cookie; ++ ++struct sctp_datamsg; ++ ++struct sctp_chunk { ++ struct list_head list; ++ refcount_t refcnt; ++ int sent_count; ++ union { ++ struct list_head transmitted_list; ++ struct list_head stream_list; ++ }; ++ struct list_head frag_list; ++ struct sk_buff *skb; ++ union { ++ struct sk_buff *head_skb; ++ struct sctp_shared_key *shkey; ++ }; ++ union sctp_params param_hdr; ++ union { ++ __u8 *v; ++ struct sctp_datahdr *data_hdr; ++ struct sctp_inithdr *init_hdr; ++ struct sctp_sackhdr *sack_hdr; ++ struct sctp_heartbeathdr *hb_hdr; ++ struct sctp_sender_hb_info *hbs_hdr; ++ struct sctp_shutdownhdr *shutdown_hdr; ++ struct sctp_signed_cookie *cookie_hdr; ++ struct sctp_ecnehdr *ecne_hdr; ++ struct sctp_cwrhdr *ecn_cwr_hdr; ++ struct sctp_errhdr *err_hdr; ++ struct sctp_addiphdr *addip_hdr; ++ struct sctp_fwdtsn_hdr *fwdtsn_hdr; ++ struct sctp_authhdr *auth_hdr; ++ struct sctp_idatahdr *idata_hdr; ++ struct sctp_ifwdtsn_hdr *ifwdtsn_hdr; ++ } subh; ++ __u8 *chunk_end; ++ struct sctp_chunkhdr *chunk_hdr; ++ struct sctphdr *sctp_hdr; ++ struct sctp_sndrcvinfo sinfo; ++ struct sctp_association *asoc; ++ struct sctp_ep_common *rcvr; ++ long unsigned int sent_at; ++ union sctp_addr source; ++ union sctp_addr dest; ++ struct sctp_datamsg *msg; ++ struct sctp_transport *transport; ++ struct sk_buff *auth_chunk; ++ __u16 rtt_in_progress: 1; ++ __u16 has_tsn: 1; ++ __u16 has_ssn: 1; ++ __u16 singleton: 1; ++ __u16 end_of_packet: 1; ++ __u16 ecn_ce_done: 1; ++ __u16 pdiscard: 1; ++ __u16 tsn_gap_acked: 1; ++ __u16 data_accepted: 1; ++ __u16 auth: 1; ++ __u16 has_asconf: 1; ++ __u16 tsn_missing_report: 2; ++ __u16 fast_retransmit: 2; ++}; ++ ++struct sctp_stream_interleave { ++ __u16 data_chunk_len; ++ __u16 ftsn_chunk_len; ++ struct sctp_chunk * (*make_datafrag)(const struct sctp_association *, const struct sctp_sndrcvinfo *, int, __u8, gfp_t); ++ void (*assign_number)(struct sctp_chunk *); ++ bool (*validate_data)(struct sctp_chunk *); ++ int (*ulpevent_data)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); ++ int (*enqueue_event)(struct sctp_ulpq *, struct sctp_ulpevent *); ++ void (*renege_events)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); ++ void (*start_pd)(struct sctp_ulpq *, gfp_t); ++ void (*abort_pd)(struct sctp_ulpq *, gfp_t); ++ void (*generate_ftsn)(struct sctp_outq *, __u32); ++ bool (*validate_ftsn)(struct sctp_chunk *); ++ void (*report_ftsn)(struct sctp_ulpq *, __u32); ++ void (*handle_ftsn)(struct sctp_ulpq *, struct sctp_chunk *); ++}; ++ ++struct sctp_bind_bucket { ++ short unsigned int port; ++ short unsigned int fastreuse; ++ struct hlist_node node; ++ struct hlist_head owner; ++ struct net *net; ++}; ++ ++enum sctp_socket_type { ++ SCTP_SOCKET_UDP = 0, ++ SCTP_SOCKET_UDP_HIGH_BANDWIDTH = 1, ++ SCTP_SOCKET_TCP = 2, ++}; ++ ++struct sctp_pf; ++ ++struct sctp_sock { ++ struct inet_sock inet; ++ enum sctp_socket_type type; ++ struct sctp_pf *pf; ++ struct crypto_shash *hmac; ++ char *sctp_hmac_alg; ++ struct sctp_endpoint *ep; ++ struct sctp_bind_bucket *bind_hash; ++ __u16 default_stream; ++ __u32 default_ppid; ++ __u16 default_flags; ++ __u32 default_context; ++ __u32 default_timetolive; ++ __u32 default_rcv_context; ++ int max_burst; ++ __u32 hbinterval; ++ __u16 pathmaxrxt; ++ __u32 flowlabel; ++ __u8 dscp; ++ __u32 pathmtu; ++ __u32 sackdelay; ++ __u32 sackfreq; ++ __u32 param_flags; ++ struct sctp_rtoinfo rtoinfo; ++ struct sctp_paddrparams paddrparam; ++ struct sctp_assocparams assocparams; ++ struct sctp_event_subscribe subscribe; ++ struct sctp_initmsg initmsg; ++ int user_frag; ++ __u32 autoclose; ++ __u32 adaptation_ind; ++ __u32 pd_point; ++ __u16 nodelay: 1; ++ __u16 reuse: 1; ++ __u16 disable_fragments: 1; ++ __u16 v4mapped: 1; ++ __u16 frag_interleave: 1; ++ __u16 strm_interleave: 1; ++ __u16 recvrcvinfo: 1; ++ __u16 recvnxtinfo: 1; ++ __u16 data_ready_signalled: 1; ++ atomic_t pd_mode; ++ struct sk_buff_head pd_lobby; ++ struct list_head auto_asconf_list; ++ int do_auto_asconf; ++}; ++ ++struct sctp_af; ++ ++struct sctp_pf { ++ void (*event_msgname)(struct sctp_ulpevent *, char *, int *); ++ void (*skb_msgname)(struct sk_buff *, char *, int *); ++ int (*af_supported)(sa_family_t, struct sctp_sock *); ++ int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *, struct sctp_sock *); ++ int (*bind_verify)(struct sctp_sock *, union sctp_addr *); ++ int (*send_verify)(struct sctp_sock *, union sctp_addr *); ++ int (*supported_addrs)(const struct sctp_sock *, __be16 *); ++ struct sock * (*create_accept_sk)(struct sock *, struct sctp_association *, bool); ++ int (*addr_to_user)(struct sctp_sock *, union sctp_addr *); ++ void (*to_sk_saddr)(union sctp_addr *, struct sock *); ++ void (*to_sk_daddr)(union sctp_addr *, struct sock *); ++ void (*copy_ip_options)(struct sock *, struct sock *); ++ struct sctp_af *af; ++}; ++ ++struct sctp_signed_cookie { ++ __u8 signature[32]; ++ __u32 __pad; ++ struct sctp_cookie c; ++} __attribute__((packed)); ++ ++union sctp_addr_param { ++ struct sctp_paramhdr p; ++ struct sctp_ipv4addr_param v4; ++ struct sctp_ipv6addr_param v6; ++}; ++ ++struct sctp_sender_hb_info { ++ struct sctp_paramhdr param_hdr; ++ union sctp_addr daddr; ++ long unsigned int sent_at; ++ __u64 hb_nonce; ++}; ++ ++struct sctp_af { ++ int (*sctp_xmit)(struct sk_buff *, struct sctp_transport *); ++ int (*setsockopt)(struct sock *, int, int, char *, unsigned int); ++ int (*getsockopt)(struct sock *, int, int, char *, int *); ++ int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); ++ int (*compat_getsockopt)(struct sock *, int, int, char *, int *); ++ void (*get_dst)(struct sctp_transport *, union sctp_addr *, struct flowi *, struct sock *); ++ void (*get_saddr)(struct sctp_sock *, struct sctp_transport *, struct flowi *); ++ void (*copy_addrlist)(struct list_head *, struct net_device *); ++ int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *); ++ void (*addr_copy)(union sctp_addr *, union sctp_addr *); ++ void (*from_skb)(union sctp_addr *, struct sk_buff *, int); ++ void (*from_sk)(union sctp_addr *, struct sock *); ++ void (*from_addr_param)(union sctp_addr *, union sctp_addr_param *, __be16, int); ++ int (*to_addr_param)(const union sctp_addr *, union sctp_addr_param *); ++ int (*addr_valid)(union sctp_addr *, struct sctp_sock *, const struct sk_buff *); ++ enum sctp_scope (*scope)(union sctp_addr *); ++ void (*inaddr_any)(union sctp_addr *, __be16); ++ int (*is_any)(const union sctp_addr *); ++ int (*available)(union sctp_addr *, struct sctp_sock *); ++ int (*skb_iif)(const struct sk_buff *); ++ int (*is_ce)(const struct sk_buff *); ++ void (*seq_dump_addr)(struct seq_file *, union sctp_addr *); ++ void (*ecn_capable)(struct sock *); ++ __u16 net_header_len; ++ int sockaddr_len; ++ int (*ip_options_len)(struct sock *); ++ sa_family_t sa_family; ++ struct list_head list; ++}; ++ ++struct sctp_packet { ++ __u16 source_port; ++ __u16 destination_port; ++ __u32 vtag; ++ struct list_head chunk_list; ++ size_t overhead; ++ size_t size; ++ size_t max_size; ++ struct sctp_transport *transport; ++ struct sctp_chunk *auth; ++ u8 has_cookie_echo: 1; ++ u8 has_sack: 1; ++ u8 has_auth: 1; ++ u8 has_data: 1; ++ u8 ipfragok: 1; ++}; ++ ++struct sctp_transport { ++ struct list_head transports; ++ struct rhlist_head node; ++ refcount_t refcnt; ++ __u32 rto_pending: 1; ++ __u32 hb_sent: 1; ++ __u32 pmtu_pending: 1; ++ __u32 dst_pending_confirm: 1; ++ __u32 sack_generation: 1; ++ u32 dst_cookie; ++ struct flowi fl; ++ union sctp_addr ipaddr; ++ struct sctp_af *af_specific; ++ struct sctp_association *asoc; ++ long unsigned int rto; ++ __u32 rtt; ++ __u32 rttvar; ++ __u32 srtt; ++ __u32 cwnd; ++ __u32 ssthresh; ++ __u32 partial_bytes_acked; ++ __u32 flight_size; ++ __u32 burst_limited; ++ struct dst_entry *dst; ++ union sctp_addr saddr; ++ long unsigned int hbinterval; ++ long unsigned int sackdelay; ++ __u32 sackfreq; ++ atomic_t mtu_info; ++ ktime_t last_time_heard; ++ long unsigned int last_time_sent; ++ long unsigned int last_time_ecne_reduced; ++ __u16 pathmaxrxt; ++ __u32 flowlabel; ++ __u8 dscp; ++ int pf_retrans; ++ __u32 pathmtu; ++ __u32 param_flags; ++ int init_sent_count; ++ int state; ++ short unsigned int error_count; ++ struct timer_list T3_rtx_timer; ++ struct timer_list hb_timer; ++ struct timer_list proto_unreach_timer; ++ struct timer_list reconf_timer; ++ struct list_head transmitted; ++ struct sctp_packet packet; ++ struct list_head send_ready; ++ struct { ++ __u32 next_tsn_at_change; ++ char changeover_active; ++ char cycling_changeover; ++ char cacc_saw_newack; ++ } cacc; ++ __u64 hb_nonce; ++ struct callback_head rcu; ++}; ++ ++struct sctp_datamsg { ++ struct list_head chunks; ++ refcount_t refcnt; ++ long unsigned int expires_at; ++ int send_error; ++ u8 send_failed: 1; ++ u8 can_delay: 1; ++ u8 abandoned: 1; ++}; ++ ++struct sctp_stream_priorities { ++ struct list_head prio_sched; ++ struct list_head active; ++ struct sctp_stream_out_ext *next; ++ __u16 prio; ++}; ++ ++struct sctp_stream_out_ext { ++ __u64 abandoned_unsent[3]; ++ __u64 abandoned_sent[3]; ++ struct list_head outq; ++ union { ++ struct { ++ struct list_head prio_list; ++ struct sctp_stream_priorities *prio_head; ++ }; ++ struct { ++ struct list_head rr_list; ++ }; ++ }; ++}; ++ ++struct sctp_stream_out { ++ union { ++ __u32 mid; ++ __u16 ssn; ++ }; ++ __u32 mid_uo; ++ struct sctp_stream_out_ext *ext; ++ __u8 state; ++}; ++ ++struct task_security_struct { ++ u32 osid; ++ u32 sid; ++ u32 exec_sid; ++ u32 create_sid; ++ u32 keycreate_sid; ++ u32 sockcreate_sid; ++}; ++ ++enum label_initialized { ++ LABEL_INVALID = 0, ++ LABEL_INITIALIZED = 1, ++ LABEL_PENDING = 2, ++}; ++ ++struct inode_security_struct { ++ struct inode *inode; ++ union { ++ struct list_head list; ++ struct callback_head rcu; ++ }; ++ u32 task_sid; ++ u32 sid; ++ u16 sclass; ++ unsigned char initialized; ++ spinlock_t lock; ++}; ++ ++struct file_security_struct { ++ u32 sid; ++ u32 fown_sid; ++ u32 isid; ++ u32 pseqno; ++}; ++ ++struct superblock_security_struct { ++ struct super_block *sb; ++ u32 sid; ++ u32 def_sid; ++ u32 mntpoint_sid; ++ short unsigned int behavior; ++ short unsigned int flags; ++ struct mutex lock; ++ struct list_head isec_head; ++ spinlock_t isec_lock; ++}; ++ ++struct msg_security_struct { ++ u32 sid; ++}; ++ ++struct ipc_security_struct { ++ u16 sclass; ++ u32 sid; ++}; ++ ++struct sk_security_struct { ++ enum { ++ NLBL_UNSET = 0, ++ NLBL_REQUIRE = 1, ++ NLBL_LABELED = 2, ++ NLBL_REQSKB = 3, ++ NLBL_CONNLABELED = 4, ++ } nlbl_state; ++ struct netlbl_lsm_secattr *nlbl_secattr; ++ u32 sid; ++ u32 peer_sid; ++ u16 sclass; ++ enum { ++ SCTP_ASSOC_UNSET = 0, ++ SCTP_ASSOC_SET = 1, ++ } sctp_assoc_state; ++}; ++ ++struct tun_security_struct { ++ u32 sid; ++}; ++ ++struct key_security_struct { ++ u32 sid; ++}; ++ ++struct ib_security_struct { ++ u32 sid; ++}; ++ ++struct bpf_security_struct { ++ u32 sid; ++}; ++ ++enum { ++ Opt_error = 4294967295, ++ Opt_context = 1, ++ Opt_fscontext = 2, ++ Opt_defcontext = 3, ++ Opt_rootcontext = 4, ++ Opt_labelsupport = 5, ++ Opt_nextmntopt = 6, ++}; ++ ++enum sel_inos { ++ SEL_ROOT_INO = 2, ++ SEL_LOAD = 3, ++ SEL_ENFORCE = 4, ++ SEL_CONTEXT = 5, ++ SEL_ACCESS = 6, ++ SEL_CREATE = 7, ++ SEL_RELABEL = 8, ++ SEL_USER = 9, ++ SEL_POLICYVERS = 10, ++ SEL_COMMIT_BOOLS = 11, ++ SEL_MLS = 12, ++ SEL_DISABLE = 13, ++ SEL_MEMBER = 14, ++ SEL_CHECKREQPROT = 15, ++ SEL_COMPAT_NET = 16, ++ SEL_REJECT_UNKNOWN = 17, ++ SEL_DENY_UNKNOWN = 18, ++ SEL_STATUS = 19, ++ SEL_POLICY = 20, ++ SEL_VALIDATE_TRANS = 21, ++ SEL_INO_NEXT = 22, ++}; ++ ++struct selinux_fs_info { ++ struct dentry *bool_dir; ++ unsigned int bool_num; ++ char **bool_pending_names; ++ unsigned int *bool_pending_values; ++ struct dentry *class_dir; ++ long unsigned int last_class_ino; ++ bool policy_opened; ++ struct dentry *policycap_dir; ++ struct mutex mutex; ++ long unsigned int last_ino; ++ struct selinux_state *state; ++ struct super_block *sb; ++}; ++ ++struct policy_load_memory { ++ size_t len; ++ void *data; ++}; ++ ++enum { ++ SELNL_MSG_SETENFORCE = 16, ++ SELNL_MSG_POLICYLOAD = 17, ++ SELNL_MSG_MAX = 18, ++}; ++ ++enum selinux_nlgroups { ++ SELNLGRP_NONE = 0, ++ SELNLGRP_AVC = 1, ++ __SELNLGRP_MAX = 2, ++}; ++ ++struct selnl_msg_setenforce { ++ __s32 val; ++}; ++ ++struct selnl_msg_policyload { ++ __u32 seqno; ++}; ++ ++enum { ++ XFRM_MSG_BASE = 16, ++ XFRM_MSG_NEWSA = 16, ++ XFRM_MSG_DELSA = 17, ++ XFRM_MSG_GETSA = 18, ++ XFRM_MSG_NEWPOLICY = 19, ++ XFRM_MSG_DELPOLICY = 20, ++ XFRM_MSG_GETPOLICY = 21, ++ XFRM_MSG_ALLOCSPI = 22, ++ XFRM_MSG_ACQUIRE = 23, ++ XFRM_MSG_EXPIRE = 24, ++ XFRM_MSG_UPDPOLICY = 25, ++ XFRM_MSG_UPDSA = 26, ++ XFRM_MSG_POLEXPIRE = 27, ++ XFRM_MSG_FLUSHSA = 28, ++ XFRM_MSG_FLUSHPOLICY = 29, ++ XFRM_MSG_NEWAE = 30, ++ XFRM_MSG_GETAE = 31, ++ XFRM_MSG_REPORT = 32, ++ XFRM_MSG_MIGRATE = 33, ++ XFRM_MSG_NEWSADINFO = 34, ++ XFRM_MSG_GETSADINFO = 35, ++ XFRM_MSG_NEWSPDINFO = 36, ++ XFRM_MSG_GETSPDINFO = 37, ++ XFRM_MSG_MAPPING = 38, ++ __XFRM_MSG_MAX = 39, ++}; ++ ++enum { ++ RTM_BASE = 16, ++ RTM_NEWLINK = 16, ++ RTM_DELLINK = 17, ++ RTM_GETLINK = 18, ++ RTM_SETLINK = 19, ++ RTM_NEWADDR = 20, ++ RTM_DELADDR = 21, ++ RTM_GETADDR = 22, ++ RTM_NEWROUTE = 24, ++ RTM_DELROUTE = 25, ++ RTM_GETROUTE = 26, ++ RTM_NEWNEIGH = 28, ++ RTM_DELNEIGH = 29, ++ RTM_GETNEIGH = 30, ++ RTM_NEWRULE = 32, ++ RTM_DELRULE = 33, ++ RTM_GETRULE = 34, ++ RTM_NEWQDISC = 36, ++ RTM_DELQDISC = 37, ++ RTM_GETQDISC = 38, ++ RTM_NEWTCLASS = 40, ++ RTM_DELTCLASS = 41, ++ RTM_GETTCLASS = 42, ++ RTM_NEWTFILTER = 44, ++ RTM_DELTFILTER = 45, ++ RTM_GETTFILTER = 46, ++ RTM_NEWACTION = 48, ++ RTM_DELACTION = 49, ++ RTM_GETACTION = 50, ++ RTM_NEWPREFIX = 52, ++ RTM_GETMULTICAST = 58, ++ RTM_GETANYCAST = 62, ++ RTM_NEWNEIGHTBL = 64, ++ RTM_GETNEIGHTBL = 66, ++ RTM_SETNEIGHTBL = 67, ++ RTM_NEWNDUSEROPT = 68, ++ RTM_NEWADDRLABEL = 72, ++ RTM_DELADDRLABEL = 73, ++ RTM_GETADDRLABEL = 74, ++ RTM_GETDCB = 78, ++ RTM_SETDCB = 79, ++ RTM_NEWNETCONF = 80, ++ RTM_DELNETCONF = 81, ++ RTM_GETNETCONF = 82, ++ RTM_NEWMDB = 84, ++ RTM_DELMDB = 85, ++ RTM_GETMDB = 86, ++ RTM_NEWNSID = 88, ++ RTM_DELNSID = 89, ++ RTM_GETNSID = 90, ++ RTM_NEWSTATS = 92, ++ RTM_GETSTATS = 94, ++ RTM_NEWCACHEREPORT = 96, ++ RTM_NEWCHAIN = 100, ++ RTM_DELCHAIN = 101, ++ RTM_GETCHAIN = 102, ++ __RTM_MAX = 103, ++}; ++ ++struct nlmsg_perm { ++ u16 nlmsg_type; ++ u32 perm; ++}; ++ ++struct netif_security_struct { ++ struct net *ns; ++ int ifindex; ++ u32 sid; ++}; ++ ++struct sel_netif { ++ struct list_head list; ++ struct netif_security_struct nsec; ++ struct callback_head callback_head; ++}; ++ ++struct netnode_security_struct { ++ union { ++ __be32 ipv4; ++ struct in6_addr ipv6; ++ } addr; ++ u32 sid; ++ u16 family; ++}; ++ ++struct sel_netnode_bkt { ++ unsigned int size; ++ struct list_head list; ++}; ++ ++struct sel_netnode { ++ struct netnode_security_struct nsec; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct netport_security_struct { ++ u32 sid; ++ u16 port; ++ u8 protocol; ++}; ++ ++struct sel_netport_bkt { ++ int size; ++ struct list_head list; ++}; ++ ++struct sel_netport { ++ struct netport_security_struct psec; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct pkey_security_struct { ++ u64 subnet_prefix; ++ u16 pkey; ++ u32 sid; ++}; ++ ++struct sel_ib_pkey_bkt { ++ int size; ++ struct list_head list; ++}; ++ ++struct sel_ib_pkey { ++ struct pkey_security_struct psec; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct path___3; ++ ++struct ebitmap_node { ++ struct ebitmap_node *next; ++ long unsigned int maps[6]; ++ u32 startbit; ++}; ++ ++struct ebitmap { ++ struct ebitmap_node *node; ++ u32 highbit; ++}; ++ ++struct policy_file { ++ char *data; ++ size_t len; ++}; ++ ++struct hashtab_node { ++ void *key; ++ void *datum; ++ struct hashtab_node *next; ++}; ++ ++struct hashtab { ++ struct hashtab_node **htable; ++ u32 size; ++ u32 nel; ++ u32 (*hash_value)(struct hashtab *, const void *); ++ int (*keycmp)(struct hashtab *, const void *, const void *); ++}; ++ ++struct hashtab_info { ++ u32 slots_used; ++ u32 max_chain_len; ++}; ++ ++struct symtab { ++ struct hashtab *table; ++ u32 nprim; ++}; ++ ++struct mls_level { ++ u32 sens; ++ struct ebitmap cat; ++}; ++ ++struct mls_range { ++ struct mls_level level[2]; ++}; ++ ++struct context { ++ u32 user; ++ u32 role; ++ u32 type; ++ u32 len; ++ struct mls_range range; ++ char *str; ++}; ++ ++struct sidtab_node { ++ u32 sid; ++ struct context context; ++ struct sidtab_node *next; ++}; ++ ++struct sidtab { ++ struct sidtab_node **htable; ++ unsigned int nel; ++ unsigned int next_sid; ++ unsigned char shutdown; ++ struct sidtab_node *cache[3]; ++ spinlock_t lock; ++}; ++ ++typedef u16 uint16_t; ++ ++struct avtab_key { ++ u16 source_type; ++ u16 target_type; ++ u16 target_class; ++ u16 specified; ++}; ++ ++struct avtab_extended_perms { ++ u8 specified; ++ u8 driver; ++ struct extended_perms_data perms; ++}; ++ ++struct avtab_datum { ++ union { ++ u32 data; ++ struct avtab_extended_perms *xperms; ++ } u; ++}; ++ ++struct avtab_node { ++ struct avtab_key key; ++ struct avtab_datum datum; ++ struct avtab_node *next; ++}; ++ ++struct avtab { ++ struct flex_array *htable; ++ u32 nel; ++ u32 nslot; ++ u32 mask; ++}; ++ ++struct type_set; ++ ++struct constraint_expr { ++ u32 expr_type; ++ u32 attr; ++ u32 op; ++ struct ebitmap names; ++ struct type_set *type_names; ++ struct constraint_expr *next; ++}; ++ ++struct type_set { ++ struct ebitmap types; ++ struct ebitmap negset; ++ u32 flags; ++}; ++ ++struct constraint_node { ++ u32 permissions; ++ struct constraint_expr *expr; ++ struct constraint_node *next; ++}; ++ ++struct common_datum { ++ u32 value; ++ struct symtab permissions; ++}; ++ ++struct class_datum { ++ u32 value; ++ char *comkey; ++ struct common_datum *comdatum; ++ struct symtab permissions; ++ struct constraint_node *constraints; ++ struct constraint_node *validatetrans; ++ char default_user; ++ char default_role; ++ char default_type; ++ char default_range; ++}; ++ ++struct role_datum { ++ u32 value; ++ u32 bounds; ++ struct ebitmap dominates; ++ struct ebitmap types; ++}; ++ ++struct role_trans { ++ u32 role; ++ u32 type; ++ u32 tclass; ++ u32 new_role; ++ struct role_trans *next; ++}; ++ ++struct role_allow { ++ u32 role; ++ u32 new_role; ++ struct role_allow *next; ++}; ++ ++struct user_datum { ++ u32 value; ++ u32 bounds; ++ struct ebitmap roles; ++ struct mls_range range; ++ struct mls_level dfltlevel; ++}; ++ ++struct cond_bool_datum { ++ __u32 value; ++ int state; ++}; ++ ++struct ocontext { ++ union { ++ char *name; ++ struct { ++ u8 protocol; ++ u16 low_port; ++ u16 high_port; ++ } port; ++ struct { ++ u32 addr; ++ u32 mask; ++ } node; ++ struct { ++ u32 addr[4]; ++ u32 mask[4]; ++ } node6; ++ struct { ++ u64 subnet_prefix; ++ u16 low_pkey; ++ u16 high_pkey; ++ } ibpkey; ++ struct { ++ char *dev_name; ++ u8 port; ++ } ibendport; ++ } u; ++ union { ++ u32 sclass; ++ u32 behavior; ++ } v; ++ struct context context[2]; ++ u32 sid[2]; ++ struct ocontext *next; ++}; ++ ++struct genfs { ++ char *fstype; ++ struct ocontext *head; ++ struct genfs *next; ++}; ++ ++struct cond_node; ++ ++struct policydb { ++ int mls_enabled; ++ struct symtab symtab[8]; ++ struct flex_array *sym_val_to_name[8]; ++ struct class_datum **class_val_to_struct; ++ struct role_datum **role_val_to_struct; ++ struct user_datum **user_val_to_struct; ++ struct flex_array *type_val_to_struct_array; ++ struct avtab te_avtab; ++ struct role_trans *role_tr; ++ struct ebitmap filename_trans_ttypes; ++ struct hashtab *filename_trans; ++ struct cond_bool_datum **bool_val_to_struct; ++ struct avtab te_cond_avtab; ++ struct cond_node *cond_list; ++ struct role_allow *role_allow; ++ struct ocontext *ocontexts[9]; ++ struct genfs *genfs; ++ struct hashtab *range_tr; ++ struct flex_array *type_attr_map_array; ++ struct ebitmap policycaps; ++ struct ebitmap permissive_map; ++ size_t len; ++ unsigned int policyvers; ++ unsigned int reject_unknown: 1; ++ unsigned int allow_unknown: 1; ++ u16 process_class; ++ u32 process_trans_perms; ++}; ++ ++struct selinux_mapping; ++ ++struct selinux_map { ++ struct selinux_mapping *mapping; ++ u16 size; ++}; ++ ++struct selinux_ss { ++ struct sidtab sidtab; ++ struct policydb policydb; ++ rwlock_t policy_rwlock; ++ u32 latest_granting; ++ struct selinux_map map; ++ struct page *status_page; ++ struct mutex status_lock; ++}; ++ ++struct perm_datum { ++ u32 value; ++}; ++ ++struct filename_trans { ++ u32 stype; ++ u32 ttype; ++ u16 tclass; ++ const char *name; ++}; ++ ++struct filename_trans_datum { ++ u32 otype; ++}; ++ ++struct type_datum { ++ u32 value; ++ u32 bounds; ++ unsigned char primary; ++ unsigned char attribute; ++}; ++ ++struct level_datum { ++ struct mls_level *level; ++ unsigned char isalias; ++}; ++ ++struct cat_datum { ++ u32 value; ++ unsigned char isalias; ++}; ++ ++struct range_trans { ++ u32 source_type; ++ u32 target_type; ++ u32 target_class; ++}; ++ ++struct cond_expr; ++ ++struct cond_av_list; ++ ++struct cond_node { ++ int cur_state; ++ struct cond_expr *expr; ++ struct cond_av_list *true_list; ++ struct cond_av_list *false_list; ++ struct cond_node *next; ++}; ++ ++struct policy_data { ++ struct policydb *p; ++ void *fp; ++}; ++ ++struct cond_expr { ++ __u32 expr_type; ++ __u32 bool; ++ struct cond_expr *next; ++}; ++ ++struct cond_av_list { ++ struct avtab_node *node; ++ struct cond_av_list *next; ++}; ++ ++struct selinux_mapping { ++ u16 value; ++ unsigned int num_perms; ++ u32 perms[32]; ++}; ++ ++struct policydb_compat_info { ++ int version; ++ int sym_num; ++ int ocon_num; ++}; ++ ++struct convert_context_args { ++ struct selinux_state *state; ++ struct policydb *oldp; ++ struct policydb *newp; ++}; ++ ++struct selinux_audit_rule { ++ u32 au_seqno; ++ struct context au_ctxt; ++}; ++ ++struct cond_insertf_data { ++ struct policydb *p; ++ struct cond_av_list *other; ++ struct cond_av_list *head; ++ struct cond_av_list *tail; ++}; ++ ++struct selinux_kernel_status { ++ u32 version; ++ u32 sequence; ++ u32 enforcing; ++ u32 policyload; ++ u32 deny_unknown; ++}; ++ ++struct xfrm_offload { ++ struct { ++ __u32 low; ++ __u32 hi; ++ } seq; ++ __u32 flags; ++ __u32 status; ++ __u8 proto; ++}; ++ ++struct sec_path { ++ refcount_t refcnt; ++ int len; ++ int olen; ++ struct xfrm_state *xvec[6]; ++ struct xfrm_offload ovec[1]; ++}; ++ ++typedef union { ++ __be32 a4; ++ __be32 a6[4]; ++ struct in6_addr in6; ++} xfrm_address_t; ++ ++struct xfrm_id { ++ xfrm_address_t daddr; ++ __be32 spi; ++ __u8 proto; ++}; ++ ++struct xfrm_selector { ++ xfrm_address_t daddr; ++ xfrm_address_t saddr; ++ __be16 dport; ++ __be16 dport_mask; ++ __be16 sport; ++ __be16 sport_mask; ++ __u16 family; ++ __u8 prefixlen_d; ++ __u8 prefixlen_s; ++ __u8 proto; ++ int ifindex; ++ __kernel_uid32_t user; ++}; ++ ++struct xfrm_lifetime_cfg { ++ __u64 soft_byte_limit; ++ __u64 hard_byte_limit; ++ __u64 soft_packet_limit; ++ __u64 hard_packet_limit; ++ __u64 soft_add_expires_seconds; ++ __u64 hard_add_expires_seconds; ++ __u64 soft_use_expires_seconds; ++ __u64 hard_use_expires_seconds; ++}; ++ ++struct xfrm_lifetime_cur { ++ __u64 bytes; ++ __u64 packets; ++ __u64 add_time; ++ __u64 use_time; ++}; ++ ++struct xfrm_replay_state { ++ __u32 oseq; ++ __u32 seq; ++ __u32 bitmap; ++}; ++ ++struct xfrm_replay_state_esn { ++ unsigned int bmp_len; ++ __u32 oseq; ++ __u32 seq; ++ __u32 oseq_hi; ++ __u32 seq_hi; ++ __u32 replay_window; ++ __u32 bmp[0]; ++}; ++ ++struct xfrm_algo { ++ char alg_name[64]; ++ unsigned int alg_key_len; ++ char alg_key[0]; ++}; ++ ++struct xfrm_algo_auth { ++ char alg_name[64]; ++ unsigned int alg_key_len; ++ unsigned int alg_trunc_len; ++ char alg_key[0]; ++}; ++ ++struct xfrm_algo_aead { ++ char alg_name[64]; ++ unsigned int alg_key_len; ++ unsigned int alg_icv_len; ++ char alg_key[0]; ++}; ++ ++struct xfrm_stats { ++ __u32 replay_window; ++ __u32 replay; ++ __u32 integrity_failed; ++}; ++ ++enum { ++ XFRM_POLICY_TYPE_MAIN = 0, ++ XFRM_POLICY_TYPE_SUB = 1, ++ XFRM_POLICY_TYPE_MAX = 2, ++ XFRM_POLICY_TYPE_ANY = 255, ++}; ++ ++struct xfrm_encap_tmpl { ++ __u16 encap_type; ++ __be16 encap_sport; ++ __be16 encap_dport; ++ xfrm_address_t encap_oa; ++}; ++ ++struct xfrm_mark { ++ __u32 v; ++ __u32 m; ++}; ++ ++struct xfrm_address_filter { ++ xfrm_address_t saddr; ++ xfrm_address_t daddr; ++ __u16 family; ++ __u8 splen; ++ __u8 dplen; ++}; ++ ++struct xfrm_state_walk { ++ struct list_head all; ++ u8 state; ++ u8 dying; ++ u8 proto; ++ u32 seq; ++ struct xfrm_address_filter *filter; ++ long unsigned int kabi_reserved1; ++}; ++ ++struct xfrm_state_offload { ++ struct net_device *dev; ++ long unsigned int offload_handle; ++ unsigned int num_exthdrs; ++ u8 flags; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct xfrm_replay; ++ ++struct xfrm_type; ++ ++struct xfrm_mode; ++ ++struct xfrm_type_offload; ++ ++struct xfrm_state { ++ possible_net_t xs_net; ++ union { ++ struct hlist_node gclist; ++ struct hlist_node bydst; ++ }; ++ struct hlist_node bysrc; ++ struct hlist_node byspi; ++ refcount_t refcnt; ++ spinlock_t lock; ++ struct xfrm_id id; ++ struct xfrm_selector sel; ++ struct xfrm_mark mark; ++ u32 if_id; ++ u32 tfcpad; ++ u32 genid; ++ struct xfrm_state_walk km; ++ struct { ++ u32 reqid; ++ u8 mode; ++ u8 replay_window; ++ u8 aalgo; ++ u8 ealgo; ++ u8 calgo; ++ u8 flags; ++ u16 family; ++ xfrm_address_t saddr; ++ int header_len; ++ int trailer_len; ++ u32 extra_flags; ++ struct xfrm_mark smark; ++ } props; ++ struct xfrm_lifetime_cfg lft; ++ struct xfrm_algo_auth *aalg; ++ struct xfrm_algo *ealg; ++ struct xfrm_algo *calg; ++ struct xfrm_algo_aead *aead; ++ const char *geniv; ++ struct xfrm_encap_tmpl *encap; ++ xfrm_address_t *coaddr; ++ struct xfrm_state *tunnel; ++ atomic_t tunnel_users; ++ struct xfrm_replay_state replay; ++ struct xfrm_replay_state_esn *replay_esn; ++ struct xfrm_replay_state preplay; ++ struct xfrm_replay_state_esn *preplay_esn; ++ const struct xfrm_replay *repl; ++ u32 xflags; ++ u32 replay_maxage; ++ u32 replay_maxdiff; ++ struct timer_list rtimer; ++ struct xfrm_stats stats; ++ struct xfrm_lifetime_cur curlft; ++ struct tasklet_hrtimer mtimer; ++ struct xfrm_state_offload xso; ++ long int saved_tmo; ++ time64_t lastused; ++ struct page_frag xfrag; ++ const struct xfrm_type *type; ++ struct xfrm_mode *inner_mode; ++ struct xfrm_mode *inner_mode_iaf; ++ struct xfrm_mode *outer_mode; ++ const struct xfrm_type_offload *type_offload; ++ struct xfrm_sec_ctx *security; ++ void *data; ++}; ++ ++struct xfrm_policy_walk_entry { ++ struct list_head all; ++ u8 dead; ++}; ++ ++struct xfrm_policy_queue { ++ struct sk_buff_head hold_queue; ++ struct timer_list hold_timer; ++ long unsigned int timeout; ++}; ++ ++struct xfrm_tmpl { ++ struct xfrm_id id; ++ xfrm_address_t saddr; ++ short unsigned int encap_family; ++ u32 reqid; ++ u8 mode; ++ u8 share; ++ u8 optional; ++ u8 allalgs; ++ u32 aalgos; ++ u32 ealgos; ++ u32 calgos; ++}; ++ ++struct xfrm_policy { ++ possible_net_t xp_net; ++ struct hlist_node bydst; ++ struct hlist_node byidx; ++ rwlock_t lock; ++ refcount_t refcnt; ++ struct timer_list timer; ++ atomic_t genid; ++ u32 priority; ++ u32 index; ++ u32 if_id; ++ struct xfrm_mark mark; ++ struct xfrm_selector selector; ++ struct xfrm_lifetime_cfg lft; ++ struct xfrm_lifetime_cur curlft; ++ struct xfrm_policy_walk_entry walk; ++ struct xfrm_policy_queue polq; ++ u8 type; ++ u8 action; ++ u8 flags; ++ u8 xfrm_nr; ++ u16 family; ++ struct xfrm_sec_ctx *security; ++ struct xfrm_tmpl xfrm_vec[6]; ++ struct callback_head rcu; ++}; ++ ++struct xfrm_replay { ++ void (*advance)(struct xfrm_state *, __be32); ++ int (*check)(struct xfrm_state *, struct sk_buff *, __be32); ++ int (*recheck)(struct xfrm_state *, struct sk_buff *, __be32); ++ void (*notify)(struct xfrm_state *, int); ++ int (*overflow)(struct xfrm_state *, struct sk_buff *); ++}; ++ ++struct xfrm_type { ++ char *description; ++ struct module___2 *owner; ++ u8 proto; ++ u8 flags; ++ int (*init_state)(struct xfrm_state *); ++ void (*destructor)(struct xfrm_state *); ++ int (*input)(struct xfrm_state *, struct sk_buff *); ++ int (*output)(struct xfrm_state *, struct sk_buff *); ++ int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); ++ int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); ++ u32 (*get_mtu)(struct xfrm_state *, int); ++}; ++ ++struct xfrm_state_afinfo; ++ ++struct xfrm_mode { ++ int (*input2)(struct xfrm_state *, struct sk_buff *); ++ int (*input)(struct xfrm_state *, struct sk_buff *); ++ int (*output2)(struct xfrm_state *, struct sk_buff *); ++ int (*output)(struct xfrm_state *, struct sk_buff *); ++ struct sk_buff * (*gso_segment)(struct xfrm_state *, struct sk_buff *, netdev_features_t); ++ void (*xmit)(struct xfrm_state *, struct sk_buff *); ++ struct xfrm_state_afinfo *afinfo; ++ struct module___2 *owner; ++ unsigned int encap; ++ int flags; ++}; ++ ++struct xfrm_type_offload { ++ char *description; ++ struct module___2 *owner; ++ u8 proto; ++ void (*encap)(struct xfrm_state *, struct sk_buff *); ++ int (*input_tail)(struct xfrm_state *, struct sk_buff *); ++ int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); ++}; ++ ++struct xfrm_dst { ++ union { ++ struct dst_entry dst; ++ struct rtable rt; ++ struct rt6_info rt6; ++ } u; ++ struct dst_entry *route; ++ struct dst_entry *child; ++ struct dst_entry *path; ++ struct xfrm_policy *pols[2]; ++ int num_pols; ++ int num_xfrms; ++ u32 xfrm_genid; ++ u32 policy_genid; ++ u32 route_mtu_cached; ++ u32 child_mtu_cached; ++ u32 route_cookie; ++ u32 path_cookie; ++}; ++ ++struct xfrm_state_afinfo { ++ unsigned int family; ++ unsigned int proto; ++ __be16 eth_proto; ++ struct module___2 *owner; ++ const struct xfrm_type *type_map[256]; ++ const struct xfrm_type_offload *type_offload_map[256]; ++ struct xfrm_mode *mode_map[5]; ++ int (*init_flags)(struct xfrm_state *); ++ void (*init_tempsel)(struct xfrm_selector *, const struct flowi *); ++ void (*init_temprop)(struct xfrm_state *, const struct xfrm_tmpl *, const xfrm_address_t *, const xfrm_address_t *); ++ int (*tmpl_sort)(struct xfrm_tmpl **, struct xfrm_tmpl **, int); ++ int (*state_sort)(struct xfrm_state **, struct xfrm_state **, int); ++ int (*output)(struct net *, struct sock *, struct sk_buff *); ++ int (*output_finish)(struct sock *, struct sk_buff *); ++ int (*extract_input)(struct xfrm_state *, struct sk_buff *); ++ int (*extract_output)(struct xfrm_state *, struct sk_buff *); ++ int (*transport_finish)(struct sk_buff *, int); ++ void (*local_error)(struct sk_buff *, u32); ++}; ++ ++struct sockaddr_un { ++ __kernel_sa_family_t sun_family; ++ char sun_path[108]; ++}; ++ ++struct unix_address { ++ refcount_t refcnt; ++ int len; ++ unsigned int hash; ++ struct sockaddr_un name[0]; ++}; ++ ++struct unix_sock { ++ struct sock sk; ++ struct unix_address *addr; ++ struct path path; ++ struct mutex iolock; ++ struct mutex bindlock; ++ struct sock *peer; ++ struct list_head link; ++ atomic_long_t inflight; ++ spinlock_t lock; ++ long unsigned int gc_flags; ++ struct socket_wq peer_wq; ++ wait_queue_entry_t peer_wake; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct ptrace_relation { ++ struct task_struct___2 *tracer; ++ struct task_struct___2 *tracee; ++ bool invalid; ++ struct list_head node; ++ struct callback_head rcu; ++}; ++ ++struct access_report_info { ++ struct callback_head work; ++ const char *access; ++ struct task_struct___2 *target; ++ struct task_struct___2 *agent; ++}; ++ ++enum devcg_behavior { ++ DEVCG_DEFAULT_NONE = 0, ++ DEVCG_DEFAULT_ALLOW = 1, ++ DEVCG_DEFAULT_DENY = 2, ++}; ++ ++struct dev_exception_item { ++ u32 major; ++ u32 minor; ++ short int type; ++ short int access; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct dev_cgroup { ++ struct cgroup_subsys_state___2 css; ++ struct list_head exceptions; ++ enum devcg_behavior behavior; ++}; ++ ++struct crypto_wait { ++ struct completion completion; ++ int err; ++}; ++ ++struct crypto_template; ++ ++struct crypto_instance { ++ struct crypto_alg alg; ++ struct crypto_template *tmpl; ++ struct hlist_node list; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct rtattr; ++ ++struct crypto_template { ++ struct list_head list; ++ struct hlist_head instances; ++ struct module___2 *module; ++ struct crypto_instance * (*alloc)(struct rtattr **); ++ void (*free)(struct crypto_instance *); ++ int (*create)(struct crypto_template *, struct rtattr **); ++ char name[128]; ++}; ++ ++enum { ++ CRYPTO_MSG_ALG_REQUEST = 0, ++ CRYPTO_MSG_ALG_REGISTER = 1, ++}; ++ ++struct crypto_larval { ++ struct crypto_alg alg; ++ struct crypto_alg *adult; ++ struct completion completion; ++ u32 mask; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++enum { ++ CRYPTOA_UNSPEC = 0, ++ CRYPTOA_ALG = 1, ++ CRYPTOA_TYPE = 2, ++ CRYPTOA_U32 = 3, ++ __CRYPTOA_MAX = 4, ++}; ++ ++struct crypto_attr_alg { ++ char name[128]; ++}; ++ ++struct crypto_attr_type { ++ u32 type; ++ u32 mask; ++}; ++ ++struct crypto_attr_u32 { ++ u32 num; ++}; ++ ++struct rtattr { ++ short unsigned int rta_len; ++ short unsigned int rta_type; ++}; ++ ++struct crypto_spawn { ++ struct list_head list; ++ struct crypto_alg *alg; ++ struct crypto_instance *inst; ++ const struct crypto_type *frontend; ++ u32 mask; ++}; ++ ++struct crypto_queue { ++ struct list_head list; ++ struct list_head *backlog; ++ unsigned int qlen; ++ unsigned int max_qlen; ++}; ++ ++struct scatter_walk { ++ struct scatterlist *sg; ++ unsigned int offset; ++}; ++ ++struct aead_instance { ++ void (*free)(struct aead_instance *); ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ union { ++ struct { ++ char head[128]; ++ struct crypto_instance base; ++ } s; ++ struct aead_alg alg; ++ }; ++}; ++ ++struct crypto_aead_spawn { ++ struct crypto_spawn base; ++}; ++ ++struct crypto_skcipher; ++ ++struct aead_geniv_ctx { ++ spinlock_t lock; ++ struct crypto_aead *child; ++ struct crypto_skcipher *sknull; ++ u8 salt[0]; ++}; ++ ++struct crypto_rng; ++ ++struct rng_alg { ++ int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); ++ int (*seed)(struct crypto_rng *, const u8 *, unsigned int); ++ void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); ++ unsigned int seedsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct crypto_rng { ++ struct crypto_tfm base; ++}; ++ ++enum crypto_attr_type_t { ++ CRYPTOCFGA_UNSPEC = 0, ++ CRYPTOCFGA_PRIORITY_VAL = 1, ++ CRYPTOCFGA_REPORT_LARVAL = 2, ++ CRYPTOCFGA_REPORT_HASH = 3, ++ CRYPTOCFGA_REPORT_BLKCIPHER = 4, ++ CRYPTOCFGA_REPORT_AEAD = 5, ++ CRYPTOCFGA_REPORT_COMPRESS = 6, ++ CRYPTOCFGA_REPORT_RNG = 7, ++ CRYPTOCFGA_REPORT_CIPHER = 8, ++ CRYPTOCFGA_REPORT_AKCIPHER = 9, ++ CRYPTOCFGA_REPORT_KPP = 10, ++ CRYPTOCFGA_REPORT_ACOMP = 11, ++ __CRYPTOCFGA_MAX = 12, ++}; ++ ++struct crypto_report_aead { ++ char type[64]; ++ char geniv[64]; ++ unsigned int blocksize; ++ unsigned int maxauthsize; ++ unsigned int ivsize; ++}; ++ ++struct skcipher_givcrypt_request { ++ u64 seq; ++ u8 *giv; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct ablkcipher_request creq; ++}; ++ ++struct ablkcipher_walk { ++ struct { ++ struct page___2 *page; ++ unsigned int offset; ++ } src; ++ struct { ++ struct page___2 *page; ++ unsigned int offset; ++ } dst; ++ struct scatter_walk in; ++ unsigned int nbytes; ++ struct scatter_walk out; ++ unsigned int total; ++ struct list_head buffers; ++ u8 *iv_buffer; ++ u8 *iv; ++ int flags; ++ unsigned int blocksize; ++}; ++ ++struct crypto_report_blkcipher { ++ char type[64]; ++ char geniv[64]; ++ unsigned int blocksize; ++ unsigned int min_keysize; ++ unsigned int max_keysize; ++ unsigned int ivsize; ++}; ++ ++struct ablkcipher_buffer { ++ struct list_head entry; ++ struct scatter_walk dst; ++ unsigned int len; ++ void *data; ++}; ++ ++enum { ++ ABLKCIPHER_WALK_SLOW = 1, ++}; ++ ++struct blkcipher_walk { ++ union { ++ struct { ++ struct page___2 *page; ++ long unsigned int offset; ++ } phys; ++ struct { ++ u8 *page; ++ u8 *addr; ++ } virt; ++ } src; ++ union { ++ struct { ++ struct page___2 *page; ++ long unsigned int offset; ++ } phys; ++ struct { ++ u8 *page; ++ u8 *addr; ++ } virt; ++ } dst; ++ struct scatter_walk in; ++ unsigned int nbytes; ++ struct scatter_walk out; ++ unsigned int total; ++ void *page; ++ u8 *buffer; ++ u8 *iv; ++ unsigned int ivsize; ++ int flags; ++ unsigned int walk_blocksize; ++ unsigned int cipher_blocksize; ++ unsigned int alignmask; ++}; ++ ++enum { ++ BLKCIPHER_WALK_PHYS = 1, ++ BLKCIPHER_WALK_SLOW = 2, ++ BLKCIPHER_WALK_COPY = 4, ++ BLKCIPHER_WALK_DIFF = 8, ++}; ++ ++struct skcipher_request { ++ unsigned int cryptlen; ++ u8 *iv; ++ struct scatterlist *src; ++ struct scatterlist *dst; ++ struct crypto_async_request base; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_skcipher { ++ int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); ++ int (*encrypt)(struct skcipher_request *); ++ int (*decrypt)(struct skcipher_request *); ++ unsigned int ivsize; ++ unsigned int reqsize; ++ unsigned int keysize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_tfm base; ++}; ++ ++struct skcipher_alg { ++ int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); ++ int (*encrypt)(struct skcipher_request *); ++ int (*decrypt)(struct skcipher_request *); ++ int (*init)(struct crypto_skcipher *); ++ void (*exit)(struct crypto_skcipher *); ++ unsigned int min_keysize; ++ unsigned int max_keysize; ++ unsigned int ivsize; ++ unsigned int chunksize; ++ unsigned int walksize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct skcipher_instance { ++ void (*free)(struct skcipher_instance *); ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ union { ++ struct { ++ char head[128]; ++ struct crypto_instance base; ++ } s; ++ struct skcipher_alg alg; ++ }; ++}; ++ ++struct crypto_skcipher_spawn { ++ struct crypto_spawn base; ++}; ++ ++struct skcipher_walk { ++ union { ++ struct { ++ struct page___2 *page; ++ long unsigned int offset; ++ } phys; ++ struct { ++ u8 *page; ++ void *addr; ++ } virt; ++ } src; ++ union { ++ struct { ++ struct page___2 *page; ++ long unsigned int offset; ++ } phys; ++ struct { ++ u8 *page; ++ void *addr; ++ } virt; ++ } dst; ++ struct scatter_walk in; ++ unsigned int nbytes; ++ struct scatter_walk out; ++ unsigned int total; ++ struct list_head buffers; ++ u8 *page; ++ u8 *buffer; ++ u8 *oiv; ++ void *iv; ++ unsigned int ivsize; ++ int flags; ++ unsigned int blocksize; ++ unsigned int stride; ++ unsigned int alignmask; ++}; ++ ++enum { ++ SKCIPHER_WALK_PHYS = 1, ++ SKCIPHER_WALK_SLOW = 2, ++ SKCIPHER_WALK_COPY = 4, ++ SKCIPHER_WALK_DIFF = 8, ++ SKCIPHER_WALK_SLEEP = 16, ++}; ++ ++struct skcipher_walk_buffer { ++ struct list_head entry; ++ struct scatter_walk dst; ++ unsigned int len; ++ u8 *data; ++ u8 buffer[0]; ++}; ++ ++struct hash_alg_common { ++ unsigned int digestsize; ++ unsigned int statesize; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct ahash_request { ++ struct crypto_async_request base; ++ unsigned int nbytes; ++ struct scatterlist *src; ++ u8 *result; ++ void *priv; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_ahash; ++ ++struct ahash_alg { ++ int (*init)(struct ahash_request *); ++ int (*update)(struct ahash_request *); ++ int (*final)(struct ahash_request *); ++ int (*finup)(struct ahash_request *); ++ int (*digest)(struct ahash_request *); ++ int (*export)(struct ahash_request *, void *); ++ int (*import)(struct ahash_request *, const void *); ++ int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct hash_alg_common halg; ++}; ++ ++struct crypto_ahash { ++ int (*init)(struct ahash_request *); ++ int (*update)(struct ahash_request *); ++ int (*final)(struct ahash_request *); ++ int (*finup)(struct ahash_request *); ++ int (*digest)(struct ahash_request *); ++ int (*export)(struct ahash_request *, void *); ++ int (*import)(struct ahash_request *, const void *); ++ int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); ++ unsigned int reqsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_tfm base; ++}; ++ ++struct crypto_shash___2; ++ ++struct shash_desc { ++ struct crypto_shash___2 *tfm; ++ u32 flags; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_shash___2 { ++ unsigned int descsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_tfm base; ++}; ++ ++struct shash_alg { ++ int (*init)(struct shash_desc *); ++ int (*update)(struct shash_desc *, const u8 *, unsigned int); ++ int (*final)(struct shash_desc *, u8 *); ++ int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); ++ int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); ++ int (*export)(struct shash_desc *, void *); ++ int (*import)(struct shash_desc *, const void *); ++ int (*setkey)(struct crypto_shash___2 *, const u8 *, unsigned int); ++ unsigned int descsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ unsigned int digestsize; ++ unsigned int statesize; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct crypto_hash_walk { ++ char *data; ++ unsigned int offset; ++ unsigned int alignmask; ++ struct page___2 *pg; ++ unsigned int entrylen; ++ unsigned int total; ++ struct scatterlist *sg; ++ unsigned int flags; ++}; ++ ++struct ahash_instance { ++ struct ahash_alg alg; ++}; ++ ++struct crypto_ahash_spawn { ++ struct crypto_spawn base; ++}; ++ ++struct crypto_report_hash { ++ char type[64]; ++ unsigned int blocksize; ++ unsigned int digestsize; ++}; ++ ++struct ahash_request_priv { ++ crypto_completion_t complete; ++ void *data; ++ u8 *result; ++ u32 flags; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *ubuf[0]; ++}; ++ ++struct shash_instance { ++ struct shash_alg alg; ++}; ++ ++struct crypto_shash_spawn { ++ struct crypto_spawn base; ++}; ++ ++struct crypto_report_akcipher { ++ char type[64]; ++}; ++ ++struct akcipher_request { ++ struct crypto_async_request base; ++ struct scatterlist *src; ++ struct scatterlist *dst; ++ unsigned int src_len; ++ unsigned int dst_len; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_akcipher { ++ struct crypto_tfm base; ++}; ++ ++struct akcipher_alg { ++ int (*sign)(struct akcipher_request *); ++ int (*verify)(struct akcipher_request *); ++ int (*encrypt)(struct akcipher_request *); ++ int (*decrypt)(struct akcipher_request *); ++ int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); ++ int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); ++ unsigned int (*max_size)(struct crypto_akcipher *); ++ int (*init)(struct crypto_akcipher *); ++ void (*exit)(struct crypto_akcipher *); ++ unsigned int reqsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct akcipher_instance { ++ void (*free)(struct akcipher_instance *); ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ union { ++ struct { ++ char head[128]; ++ struct crypto_instance base; ++ } s; ++ struct akcipher_alg alg; ++ }; ++}; ++ ++struct crypto_akcipher_spawn { ++ struct crypto_spawn base; ++}; ++ ++struct crypto_report_kpp { ++ char type[64]; ++}; ++ ++struct kpp_request { ++ struct crypto_async_request base; ++ struct scatterlist *src; ++ struct scatterlist *dst; ++ unsigned int src_len; ++ unsigned int dst_len; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_kpp { ++ struct crypto_tfm base; ++}; ++ ++struct kpp_alg { ++ int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); ++ int (*generate_public_key)(struct kpp_request *); ++ int (*compute_shared_secret)(struct kpp_request *); ++ unsigned int (*max_size)(struct crypto_kpp *); ++ int (*init)(struct crypto_kpp *); ++ void (*exit)(struct crypto_kpp *); ++ unsigned int reqsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++enum asn1_class { ++ ASN1_UNIV = 0, ++ ASN1_APPL = 1, ++ ASN1_CONT = 2, ++ ASN1_PRIV = 3, ++}; ++ ++enum asn1_method { ++ ASN1_PRIM = 0, ++ ASN1_CONS = 1, ++}; ++ ++enum asn1_tag { ++ ASN1_EOC = 0, ++ ASN1_BOOL = 1, ++ ASN1_INT = 2, ++ ASN1_BTS = 3, ++ ASN1_OTS = 4, ++ ASN1_NULL = 5, ++ ASN1_OID = 6, ++ ASN1_ODE = 7, ++ ASN1_EXT = 8, ++ ASN1_REAL = 9, ++ ASN1_ENUM = 10, ++ ASN1_EPDV = 11, ++ ASN1_UTF8STR = 12, ++ ASN1_RELOID = 13, ++ ASN1_SEQ = 16, ++ ASN1_SET = 17, ++ ASN1_NUMSTR = 18, ++ ASN1_PRNSTR = 19, ++ ASN1_TEXSTR = 20, ++ ASN1_VIDSTR = 21, ++ ASN1_IA5STR = 22, ++ ASN1_UNITIM = 23, ++ ASN1_GENTIM = 24, ++ ASN1_GRASTR = 25, ++ ASN1_VISSTR = 26, ++ ASN1_GENSTR = 27, ++ ASN1_UNISTR = 28, ++ ASN1_CHRSTR = 29, ++ ASN1_BMPSTR = 30, ++ ASN1_LONG_TAG = 31, ++}; ++ ++typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); ++ ++struct asn1_decoder { ++ const unsigned char *machine; ++ size_t machlen; ++ const asn1_action_t *actions; ++}; ++ ++enum asn1_opcode { ++ ASN1_OP_MATCH = 0, ++ ASN1_OP_MATCH_OR_SKIP = 1, ++ ASN1_OP_MATCH_ACT = 2, ++ ASN1_OP_MATCH_ACT_OR_SKIP = 3, ++ ASN1_OP_MATCH_JUMP = 4, ++ ASN1_OP_MATCH_JUMP_OR_SKIP = 5, ++ ASN1_OP_MATCH_ANY = 8, ++ ASN1_OP_MATCH_ANY_OR_SKIP = 9, ++ ASN1_OP_MATCH_ANY_ACT = 10, ++ ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, ++ ASN1_OP_COND_MATCH_OR_SKIP = 17, ++ ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, ++ ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, ++ ASN1_OP_COND_MATCH_ANY = 24, ++ ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, ++ ASN1_OP_COND_MATCH_ANY_ACT = 26, ++ ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, ++ ASN1_OP_COND_FAIL = 28, ++ ASN1_OP_COMPLETE = 29, ++ ASN1_OP_ACT = 30, ++ ASN1_OP_MAYBE_ACT = 31, ++ ASN1_OP_END_SEQ = 32, ++ ASN1_OP_END_SET = 33, ++ ASN1_OP_END_SEQ_OF = 34, ++ ASN1_OP_END_SET_OF = 35, ++ ASN1_OP_END_SEQ_ACT = 36, ++ ASN1_OP_END_SET_ACT = 37, ++ ASN1_OP_END_SEQ_OF_ACT = 38, ++ ASN1_OP_END_SET_OF_ACT = 39, ++ ASN1_OP_RETURN = 40, ++ ASN1_OP__NR = 41, ++}; ++ ++enum rsapubkey_actions { ++ ACT_rsa_get_e = 0, ++ ACT_rsa_get_n = 1, ++ NR__rsapubkey_actions = 2, ++}; ++ ++enum rsaprivkey_actions { ++ ACT_rsa_get_d = 0, ++ ACT_rsa_get_dp = 1, ++ ACT_rsa_get_dq = 2, ++ ACT_rsa_get_e___2 = 3, ++ ACT_rsa_get_n___2 = 4, ++ ACT_rsa_get_p = 5, ++ ACT_rsa_get_q = 6, ++ ACT_rsa_get_qinv = 7, ++ NR__rsaprivkey_actions = 8, ++}; ++ ++typedef long unsigned int mpi_limb_t; ++ ++struct gcry_mpi { ++ int alloced; ++ int nlimbs; ++ int nbits; ++ int sign; ++ unsigned int flags; ++ mpi_limb_t *d; ++}; ++ ++typedef struct gcry_mpi *MPI; ++ ++struct rsa_key { ++ const u8 *n; ++ const u8 *e; ++ const u8 *d; ++ const u8 *p; ++ const u8 *q; ++ const u8 *dp; ++ const u8 *dq; ++ const u8 *qinv; ++ size_t n_sz; ++ size_t e_sz; ++ size_t d_sz; ++ size_t p_sz; ++ size_t q_sz; ++ size_t dp_sz; ++ size_t dq_sz; ++ size_t qinv_sz; ++}; ++ ++struct rsa_mpi_key { ++ MPI n; ++ MPI e; ++ MPI d; ++}; ++ ++struct crypto_template___2; ++ ++struct asn1_decoder___2; ++ ++struct rsa_asn1_template { ++ const char *name; ++ const u8 *data; ++ size_t size; ++}; ++ ++struct pkcs1pad_ctx { ++ struct crypto_akcipher *child; ++ unsigned int key_size; ++}; ++ ++struct pkcs1pad_inst_ctx { ++ struct crypto_akcipher_spawn spawn; ++ const struct rsa_asn1_template *digest_info; ++}; ++ ++struct pkcs1pad_request { ++ struct scatterlist in_sg[2]; ++ struct scatterlist out_sg[1]; ++ uint8_t *in_buf; ++ uint8_t *out_buf; ++ long: 64; ++ long: 64; ++ struct akcipher_request child_req; ++}; ++ ++struct crypto_report_acomp { ++ char type[64]; ++}; ++ ++struct acomp_req { ++ struct crypto_async_request base; ++ struct scatterlist *src; ++ struct scatterlist *dst; ++ unsigned int slen; ++ unsigned int dlen; ++ u32 flags; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ void *__ctx[0]; ++}; ++ ++struct crypto_acomp { ++ int (*compress)(struct acomp_req *); ++ int (*decompress)(struct acomp_req *); ++ void (*dst_free)(struct scatterlist *); ++ unsigned int reqsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_tfm base; ++}; ++ ++struct acomp_alg { ++ int (*compress)(struct acomp_req *); ++ int (*decompress)(struct acomp_req *); ++ void (*dst_free)(struct scatterlist *); ++ int (*init)(struct crypto_acomp *); ++ void (*exit)(struct crypto_acomp *); ++ unsigned int reqsize; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct crypto_report_comp { ++ char type[64]; ++}; ++ ++struct crypto_scomp { ++ struct crypto_tfm base; ++}; ++ ++struct scomp_alg { ++ void * (*alloc_ctx)(struct crypto_scomp *); ++ void (*free_ctx)(struct crypto_scomp *, void *); ++ int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); ++ int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct crypto_alg base; ++}; ++ ++struct cryptomgr_param { ++ struct rtattr *tb[34]; ++ struct { ++ struct rtattr attr; ++ struct crypto_attr_type data; ++ } type; ++ union { ++ struct rtattr attr; ++ struct { ++ struct rtattr attr; ++ struct crypto_attr_alg data; ++ } alg; ++ struct { ++ struct rtattr attr; ++ struct crypto_attr_u32 data; ++ } nu32; ++ } attrs[32]; ++ char template[128]; ++ struct crypto_larval *larval; ++ u32 otype; ++ u32 omask; ++}; ++ ++struct crypto_test_param { ++ char driver[128]; ++ char alg[128]; ++ u32 type; ++}; ++ ++struct crypto_cipher { ++ struct crypto_tfm base; ++}; ++ ++struct drbg_string { ++ const unsigned char *buf; ++ size_t len; ++ struct list_head list; ++}; ++ ++struct drbg_test_data { ++ struct drbg_string *testentropy; ++}; ++ ++struct hash_testvec { ++ const char *key; ++ const char *plaintext; ++ const char *digest; ++ unsigned char tap[8]; ++ short unsigned int psize; ++ unsigned char np; ++ unsigned char ksize; ++}; ++ ++struct cipher_testvec { ++ const char *key; ++ const char *iv; ++ const char *ptext; ++ const char *ctext; ++ short unsigned int tap[8]; ++ int np; ++ unsigned char also_non_np; ++ bool fail; ++ unsigned char wk; ++ unsigned char klen; ++ short unsigned int len; ++ bool fips_skip; ++ bool generates_iv; ++}; ++ ++struct aead_testvec { ++ const char *key; ++ const char *iv; ++ const char *input; ++ const char *assoc; ++ const char *result; ++ unsigned char tap[8]; ++ unsigned char atap[8]; ++ int np; ++ int anp; ++ bool fail; ++ unsigned char novrfy; ++ unsigned char wk; ++ unsigned char klen; ++ short unsigned int ilen; ++ short unsigned int alen; ++ short unsigned int rlen; ++}; ++ ++struct cprng_testvec { ++ const char *key; ++ const char *dt; ++ const char *v; ++ const char *result; ++ unsigned char klen; ++ short unsigned int dtlen; ++ short unsigned int vlen; ++ short unsigned int rlen; ++ short unsigned int loops; ++}; ++ ++struct drbg_testvec { ++ const unsigned char *entropy; ++ size_t entropylen; ++ const unsigned char *entpra; ++ const unsigned char *entprb; ++ size_t entprlen; ++ const unsigned char *addtla; ++ const unsigned char *addtlb; ++ size_t addtllen; ++ const unsigned char *pers; ++ size_t perslen; ++ const unsigned char *expected; ++ size_t expectedlen; ++}; ++ ++struct akcipher_testvec { ++ const unsigned char *key; ++ const unsigned char *m; ++ const unsigned char *c; ++ unsigned int key_len; ++ unsigned int m_size; ++ unsigned int c_size; ++ bool public_key_vec; ++ bool siggen_sigver_test; ++}; ++ ++struct kpp_testvec { ++ const unsigned char *secret; ++ const unsigned char *b_secret; ++ const unsigned char *b_public; ++ const unsigned char *expected_a_public; ++ const unsigned char *expected_ss; ++ short unsigned int secret_size; ++ short unsigned int b_secret_size; ++ short unsigned int b_public_size; ++ short unsigned int expected_a_public_size; ++ short unsigned int expected_ss_size; ++ bool genkey; ++}; ++ ++struct comp_testvec { ++ int inlen; ++ int outlen; ++ char input[512]; ++ char output[512]; ++}; ++ ++struct aead_test_suite { ++ struct { ++ const struct aead_testvec *vecs; ++ unsigned int count; ++ } enc; ++ struct { ++ const struct aead_testvec *vecs; ++ unsigned int count; ++ } dec; ++}; ++ ++struct cipher_test_suite { ++ const struct cipher_testvec *vecs; ++ unsigned int count; ++}; ++ ++struct comp_test_suite { ++ struct { ++ const struct comp_testvec *vecs; ++ unsigned int count; ++ } comp; ++ struct { ++ const struct comp_testvec *vecs; ++ unsigned int count; ++ } decomp; ++}; ++ ++struct hash_test_suite { ++ const struct hash_testvec *vecs; ++ unsigned int count; ++}; ++ ++struct cprng_test_suite { ++ const struct cprng_testvec *vecs; ++ unsigned int count; ++}; ++ ++struct drbg_test_suite { ++ const struct drbg_testvec *vecs; ++ unsigned int count; ++}; ++ ++struct akcipher_test_suite { ++ const struct akcipher_testvec *vecs; ++ unsigned int count; ++}; ++ ++struct kpp_test_suite { ++ const struct kpp_testvec *vecs; ++ unsigned int count; ++}; ++ ++struct alg_test_desc { ++ const char *alg; ++ int (*test)(const struct alg_test_desc *, const char *, u32, u32); ++ int fips_allowed; ++ union { ++ struct aead_test_suite aead; ++ struct cipher_test_suite cipher; ++ struct comp_test_suite comp; ++ struct hash_test_suite hash; ++ struct cprng_test_suite cprng; ++ struct drbg_test_suite drbg; ++ struct akcipher_test_suite akcipher; ++ struct kpp_test_suite kpp; ++ } suite; ++}; ++ ++enum hash_test { ++ HASH_TEST_DIGEST = 0, ++ HASH_TEST_FINAL = 1, ++ HASH_TEST_FINUP = 2, ++}; ++ ++struct hmac_ctx { ++ struct crypto_shash___2 *hash; ++}; ++ ++struct md5_state { ++ u32 hash[4]; ++ u32 block[16]; ++ u64 byte_count; ++}; ++ ++struct sha1_state { ++ u32 state[5]; ++ u64 count; ++ u8 buffer[64]; ++}; ++ ++typedef void sha1_block_fn(struct sha1_state *, const u8 *, int); ++ ++struct sha256_state { ++ u32 state[8]; ++ u64 count; ++ u8 buf[64]; ++}; ++ ++typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); ++ ++typedef struct { ++ u64 a; ++ u64 b; ++} u128; ++ ++typedef struct { ++ __be64 a; ++ __be64 b; ++} be128; ++ ++typedef struct { ++ __le64 b; ++ __le64 a; ++} le128; ++ ++struct gf128mul_4k { ++ be128 t[256]; ++}; ++ ++struct gf128mul_64k { ++ struct gf128mul_4k *t[16]; ++}; ++ ++struct crypto_ecb_ctx { ++ struct crypto_cipher *child; ++}; ++ ++struct crypto_cbc_ctx { ++ struct crypto_cipher *child; ++}; ++ ++struct crypto_ctr_ctx { ++ struct crypto_cipher *child; ++}; ++ ++struct crypto_rfc3686_ctx { ++ struct crypto_skcipher *child; ++ u8 nonce[4]; ++}; ++ ++struct crypto_rfc3686_req_ctx { ++ u8 iv[16]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct skcipher_request subreq; ++}; ++ ++struct gcm_instance_ctx { ++ struct crypto_skcipher_spawn ctr; ++ struct crypto_ahash_spawn ghash; ++}; ++ ++struct crypto_gcm_ctx { ++ struct crypto_skcipher *ctr; ++ struct crypto_ahash *ghash; ++}; ++ ++struct crypto_rfc4106_ctx { ++ struct crypto_aead *child; ++ u8 nonce[4]; ++}; ++ ++struct crypto_rfc4106_req_ctx { ++ struct scatterlist src[3]; ++ struct scatterlist dst[3]; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct aead_request subreq; ++}; ++ ++struct crypto_rfc4543_instance_ctx { ++ struct crypto_aead_spawn aead; ++}; ++ ++struct crypto_rfc4543_ctx { ++ struct crypto_aead *child; ++ struct crypto_skcipher *null; ++ u8 nonce[4]; ++}; ++ ++struct crypto_rfc4543_req_ctx { ++ struct aead_request subreq; ++}; ++ ++struct crypto_gcm_ghash_ctx { ++ unsigned int cryptlen; ++ struct scatterlist *src; ++ int (*complete)(struct aead_request *, u32); ++}; ++ ++struct crypto_gcm_req_priv_ctx { ++ u8 iv[16]; ++ u8 auth_tag[16]; ++ u8 iauth_tag[16]; ++ struct scatterlist src[3]; ++ struct scatterlist dst[3]; ++ struct scatterlist sg; ++ struct crypto_gcm_ghash_ctx ghash_ctx; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ union { ++ struct ahash_request ahreq; ++ struct skcipher_request skreq; ++ } u; ++}; ++ ++typedef unsigned char Byte; ++ ++typedef long unsigned int uLong; ++ ++struct internal_state; ++ ++struct z_stream_s { ++ const Byte *next_in; ++ uLong avail_in; ++ uLong total_in; ++ Byte *next_out; ++ uLong avail_out; ++ uLong total_out; ++ char *msg; ++ struct internal_state *state; ++ void *workspace; ++ int data_type; ++ uLong adler; ++ uLong reserved; ++}; ++ ++struct internal_state { ++ int dummy; ++}; ++ ++struct deflate_ctx { ++ struct z_stream_s comp_stream; ++ struct z_stream_s decomp_stream; ++}; ++ ++struct chksum_ctx { ++ u32 key; ++}; ++ ++struct chksum_desc_ctx { ++ u32 crc; ++}; ++ ++struct chksum_desc_ctx___2 { ++ __u16 crc; ++}; ++ ++struct lzo_ctx { ++ void *lzo_comp_mem; ++}; ++ ++struct crypto_report_rng { ++ char type[64]; ++ unsigned int seedsize; ++}; ++ ++struct random_ready_callback { ++ struct list_head list; ++ void (*func)(struct random_ready_callback *); ++ struct module___2 *owner; ++}; ++ ++typedef uint32_t drbg_flag_t; ++ ++struct drbg_core { ++ drbg_flag_t flags; ++ __u8 statelen; ++ __u8 blocklen_bytes; ++ char cra_name[128]; ++ char backend_cra_name[128]; ++}; ++ ++struct drbg_state; ++ ++struct drbg_state_ops { ++ int (*update)(struct drbg_state *, struct list_head *, int); ++ int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); ++ int (*crypto_init)(struct drbg_state *); ++ int (*crypto_fini)(struct drbg_state *); ++}; ++ ++struct drbg_state { ++ struct mutex drbg_mutex; ++ unsigned char *V; ++ unsigned char *Vbuf; ++ unsigned char *C; ++ unsigned char *Cbuf; ++ size_t reseed_ctr; ++ size_t reseed_threshold; ++ unsigned char *scratchpad; ++ unsigned char *scratchpadbuf; ++ void *priv_data; ++ struct crypto_skcipher *ctr_handle; ++ struct skcipher_request *ctr_req; ++ __u8 *outscratchpadbuf; ++ __u8 *outscratchpad; ++ struct crypto_wait ctr_wait; ++ struct scatterlist sg_in; ++ struct scatterlist sg_out; ++ bool seeded; ++ bool pr; ++ struct work_struct seed_work; ++ struct crypto_rng *jent; ++ const struct drbg_state_ops *d_ops; ++ const struct drbg_core *core; ++ struct drbg_string test_data; ++ struct random_ready_callback random_ready; ++}; ++ ++enum drbg_prefixes { ++ DRBG_PREFIX0 = 0, ++ DRBG_PREFIX1 = 1, ++ DRBG_PREFIX2 = 2, ++ DRBG_PREFIX3 = 3, ++}; ++ ++struct sdesc { ++ struct shash_desc shash; ++ char ctx[0]; ++}; ++ ++struct s { ++ __be32 conv; ++}; ++ ++struct rand_data { ++ __u64 data; ++ __u64 old_data; ++ __u64 prev_time; ++ __u64 last_delta; ++ __s64 last_delta2; ++ unsigned int stuck: 1; ++ unsigned int osr; ++ unsigned int stir: 1; ++ unsigned int disable_unbias: 1; ++ unsigned char *mem; ++ unsigned int memlocation; ++ unsigned int memblocks; ++ unsigned int memblocksize; ++ unsigned int memaccessloops; ++}; ++ ++union c { ++ __u64 u64; ++ __u32 u32[2]; ++}; ++ ++struct rand_data___2; ++ ++struct jitterentropy { ++ spinlock_t jent_lock; ++ struct rand_data___2 *entropy_collector; ++}; ++ ++struct ghash_ctx { ++ struct gf128mul_4k *gf128; ++}; ++ ++struct ghash_desc_ctx { ++ u8 buffer[16]; ++ u32 bytes; ++}; ++ ++struct sockaddr_alg { ++ __u16 salg_family; ++ __u8 salg_type[14]; ++ __u32 salg_feat; ++ __u32 salg_mask; ++ __u8 salg_name[64]; ++}; ++ ++struct af_alg_iv { ++ __u32 ivlen; ++ __u8 iv[0]; ++}; ++ ++struct cmsghdr { ++ __kernel_size_t cmsg_len; ++ int cmsg_level; ++ int cmsg_type; ++}; ++ ++struct net_proto_family { ++ int family; ++ int (*create)(struct net *, struct socket *, int, int); ++ struct module___2 *owner; ++}; ++ ++enum { ++ SOCK_WAKE_IO = 0, ++ SOCK_WAKE_WAITD = 1, ++ SOCK_WAKE_SPACE = 2, ++ SOCK_WAKE_URG = 3, ++}; ++ ++struct af_alg_type; ++ ++struct alg_sock { ++ struct sock sk; ++ struct sock *parent; ++ unsigned int refcnt; ++ unsigned int nokey_refcnt; ++ const struct af_alg_type *type; ++ void *private; ++}; ++ ++struct af_alg_type { ++ void * (*bind)(const char *, u32, u32); ++ void (*release)(void *); ++ int (*setkey)(void *, const u8 *, unsigned int); ++ int (*accept)(void *, struct sock *); ++ int (*accept_nokey)(void *, struct sock *); ++ int (*setauthsize)(void *, unsigned int); ++ struct proto_ops *ops; ++ struct proto_ops *ops_nokey; ++ struct module___2 *owner; ++ char name[14]; ++}; ++ ++struct af_alg_control { ++ struct af_alg_iv *iv; ++ int op; ++ unsigned int aead_assoclen; ++}; ++ ++struct af_alg_sgl { ++ struct scatterlist sg[17]; ++ struct page *pages[16]; ++ unsigned int npages; ++}; ++ ++struct af_alg_tsgl { ++ struct list_head list; ++ unsigned int cur; ++ struct scatterlist sg[0]; ++}; ++ ++struct af_alg_rsgl { ++ struct af_alg_sgl sgl; ++ struct list_head list; ++ size_t sg_num_bytes; ++}; ++ ++struct af_alg_async_req { ++ struct kiocb *iocb; ++ struct sock *sk; ++ struct af_alg_rsgl first_rsgl; ++ struct af_alg_rsgl *last_rsgl; ++ struct list_head rsgl_list; ++ struct scatterlist *tsgl; ++ unsigned int tsgl_entries; ++ unsigned int outlen; ++ unsigned int areqlen; ++ union { ++ struct aead_request aead_req; ++ struct skcipher_request skcipher_req; ++ } cra_u; ++}; ++ ++struct af_alg_ctx { ++ struct list_head tsgl_list; ++ void *iv; ++ size_t aead_assoclen; ++ struct crypto_wait wait; ++ size_t used; ++ atomic_t rcvused; ++ bool more; ++ bool merge; ++ bool enc; ++ unsigned int len; ++}; ++ ++struct alg_type_list { ++ const struct af_alg_type *type; ++ struct list_head list; ++}; ++ ++struct hash_ctx { ++ struct af_alg_sgl sgl; ++ u8 *result; ++ struct crypto_wait wait; ++ unsigned int len; ++ bool more; ++ long: 24; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct ahash_request req; ++}; ++ ++struct rng_ctx { ++ unsigned int len; ++ struct crypto_rng *drng; ++}; ++ ++struct aead_tfm { ++ struct crypto_aead *aead; ++ struct crypto_skcipher *null_tfm; ++}; ++ ++enum asymmetric_payload_bits { ++ asym_crypto = 0, ++ asym_subtype = 1, ++ asym_key_ids = 2, ++ asym_auth = 3, ++}; ++ ++struct asymmetric_key_id { ++ short unsigned int len; ++ unsigned char data[0]; ++}; ++ ++struct asymmetric_key_ids { ++ void *id[2]; ++}; ++ ++struct public_key_signature; ++ ++struct asymmetric_key_subtype___2 { ++ struct module___2 *owner; ++ const char *name; ++ short unsigned int name_len; ++ void (*describe)(const struct key___2 *, struct seq_file___2 *); ++ void (*destroy)(void *, void *); ++ int (*verify_signature)(const struct key___2 *, const struct public_key_signature *); ++}; ++ ++struct public_key_signature { ++ struct asymmetric_key_id *auth_ids[2]; ++ u8 *s; ++ u32 s_size; ++ u8 *digest; ++ u8 digest_size; ++ const char *pkey_algo; ++ const char *hash_algo; ++}; ++ ++struct asymmetric_key_parser { ++ struct list_head link; ++ struct module___2 *owner; ++ const char *name; ++ int (*parse)(struct key_preparsed_payload *); ++}; ++ ++struct public_key { ++ void *key; ++ u32 keylen; ++ const char *id_type; ++ const char *pkey_algo; ++}; ++ ++enum x509_actions { ++ ACT_x509_extract_key_data = 0, ++ ACT_x509_extract_name_segment = 1, ++ ACT_x509_note_OID = 2, ++ ACT_x509_note_issuer = 3, ++ ACT_x509_note_not_after = 4, ++ ACT_x509_note_not_before = 5, ++ ACT_x509_note_pkey_algo = 6, ++ ACT_x509_note_serial = 7, ++ ACT_x509_note_signature = 8, ++ ACT_x509_note_subject = 9, ++ ACT_x509_note_tbs_certificate = 10, ++ ACT_x509_process_extension = 11, ++ NR__x509_actions = 12, ++}; ++ ++enum x509_akid_actions { ++ ACT_x509_akid_note_kid = 0, ++ ACT_x509_akid_note_name = 1, ++ ACT_x509_akid_note_serial = 2, ++ ACT_x509_extract_name_segment___2 = 3, ++ ACT_x509_note_OID___2 = 4, ++ NR__x509_akid_actions = 5, ++}; ++ ++enum OID { ++ OID_id_dsa_with_sha1 = 0, ++ OID_id_dsa = 1, ++ OID_id_ecdsa_with_sha1 = 2, ++ OID_id_ecPublicKey = 3, ++ OID_rsaEncryption = 4, ++ OID_md2WithRSAEncryption = 5, ++ OID_md3WithRSAEncryption = 6, ++ OID_md4WithRSAEncryption = 7, ++ OID_sha1WithRSAEncryption = 8, ++ OID_sha256WithRSAEncryption = 9, ++ OID_sha384WithRSAEncryption = 10, ++ OID_sha512WithRSAEncryption = 11, ++ OID_sha224WithRSAEncryption = 12, ++ OID_data = 13, ++ OID_signed_data = 14, ++ OID_email_address = 15, ++ OID_contentType = 16, ++ OID_messageDigest = 17, ++ OID_signingTime = 18, ++ OID_smimeCapabilites = 19, ++ OID_smimeAuthenticatedAttrs = 20, ++ OID_md2 = 21, ++ OID_md4 = 22, ++ OID_md5 = 23, ++ OID_msIndirectData = 24, ++ OID_msStatementType = 25, ++ OID_msSpOpusInfo = 26, ++ OID_msPeImageDataObjId = 27, ++ OID_msIndividualSPKeyPurpose = 28, ++ OID_msOutlookExpress = 29, ++ OID_certAuthInfoAccess = 30, ++ OID_sha1 = 31, ++ OID_sha256 = 32, ++ OID_sha384 = 33, ++ OID_sha512 = 34, ++ OID_sha224 = 35, ++ OID_commonName = 36, ++ OID_surname = 37, ++ OID_countryName = 38, ++ OID_locality = 39, ++ OID_stateOrProvinceName = 40, ++ OID_organizationName = 41, ++ OID_organizationUnitName = 42, ++ OID_title = 43, ++ OID_description = 44, ++ OID_name = 45, ++ OID_givenName = 46, ++ OID_initials = 47, ++ OID_generationalQualifier = 48, ++ OID_subjectKeyIdentifier = 49, ++ OID_keyUsage = 50, ++ OID_subjectAltName = 51, ++ OID_issuerAltName = 52, ++ OID_basicConstraints = 53, ++ OID_crlDistributionPoints = 54, ++ OID_certPolicies = 55, ++ OID_authorityKeyIdentifier = 56, ++ OID_extKeyUsage = 57, ++ OID__NR = 58, ++}; ++ ++struct x509_certificate { ++ struct x509_certificate *next; ++ struct x509_certificate *signer; ++ struct public_key *pub; ++ struct public_key_signature *sig; ++ char *issuer; ++ char *subject; ++ struct asymmetric_key_id *id; ++ struct asymmetric_key_id *skid; ++ time64_t valid_from; ++ time64_t valid_to; ++ const void *tbs; ++ unsigned int tbs_size; ++ unsigned int raw_sig_size; ++ const void *raw_sig; ++ const void *raw_serial; ++ unsigned int raw_serial_size; ++ unsigned int raw_issuer_size; ++ const void *raw_issuer; ++ const void *raw_subject; ++ unsigned int raw_subject_size; ++ unsigned int raw_skid_size; ++ const void *raw_skid; ++ unsigned int index; ++ bool seen; ++ bool verified; ++ bool self_signed; ++ bool unsupported_key; ++ bool unsupported_sig; ++ bool blacklisted; ++}; ++ ++struct x509_parse_context { ++ struct x509_certificate *cert; ++ long unsigned int data; ++ const void *cert_start; ++ const void *key; ++ size_t key_size; ++ enum OID last_oid; ++ enum OID algo_oid; ++ unsigned char nr_mpi; ++ u8 o_size; ++ u8 cn_size; ++ u8 email_size; ++ u16 o_offset; ++ u16 cn_offset; ++ u16 email_offset; ++ unsigned int raw_akid_size; ++ const void *raw_akid; ++ const void *akid_raw_issuer; ++ unsigned int akid_raw_issuer_size; ++}; ++ ++enum pkcs7_actions { ++ ACT_pkcs7_check_content_type = 0, ++ ACT_pkcs7_extract_cert = 1, ++ ACT_pkcs7_note_OID = 2, ++ ACT_pkcs7_note_certificate_list = 3, ++ ACT_pkcs7_note_content = 4, ++ ACT_pkcs7_note_data = 5, ++ ACT_pkcs7_note_signed_info = 6, ++ ACT_pkcs7_note_signeddata_version = 7, ++ ACT_pkcs7_note_signerinfo_version = 8, ++ ACT_pkcs7_sig_note_authenticated_attr = 9, ++ ACT_pkcs7_sig_note_digest_algo = 10, ++ ACT_pkcs7_sig_note_issuer = 11, ++ ACT_pkcs7_sig_note_pkey_algo = 12, ++ ACT_pkcs7_sig_note_serial = 13, ++ ACT_pkcs7_sig_note_set_of_authattrs = 14, ++ ACT_pkcs7_sig_note_signature = 15, ++ ACT_pkcs7_sig_note_skid = 16, ++ NR__pkcs7_actions = 17, ++}; ++ ++struct pkcs7_signed_info { ++ struct pkcs7_signed_info *next; ++ struct x509_certificate *signer; ++ unsigned int index; ++ bool unsupported_crypto; ++ bool blacklisted; ++ const void *msgdigest; ++ unsigned int msgdigest_len; ++ unsigned int authattrs_len; ++ const void *authattrs; ++ long unsigned int aa_set; ++ time64_t signing_time; ++ struct public_key_signature *sig; ++}; ++ ++struct pkcs7_message___2 { ++ struct x509_certificate *certs; ++ struct x509_certificate *crl; ++ struct pkcs7_signed_info *signed_infos; ++ u8 version; ++ bool have_authattrs; ++ enum OID data_type; ++ size_t data_len; ++ size_t data_hdrlen; ++ const void *data; ++}; ++ ++struct pkcs7_parse_context { ++ struct pkcs7_message___2 *msg; ++ struct pkcs7_signed_info *sinfo; ++ struct pkcs7_signed_info **ppsinfo; ++ struct x509_certificate *certs; ++ struct x509_certificate **ppcerts; ++ long unsigned int data; ++ enum OID last_oid; ++ unsigned int x509_index; ++ unsigned int sinfo_index; ++ const void *raw_serial; ++ unsigned int raw_serial_size; ++ unsigned int raw_issuer_size; ++ const void *raw_issuer; ++ const void *raw_skid; ++ unsigned int raw_skid_size; ++ bool expect_skid; ++}; ++ ++struct mz_hdr { ++ uint16_t magic; ++ uint16_t lbsize; ++ uint16_t blocks; ++ uint16_t relocs; ++ uint16_t hdrsize; ++ uint16_t min_extra_pps; ++ uint16_t max_extra_pps; ++ uint16_t ss; ++ uint16_t sp; ++ uint16_t checksum; ++ uint16_t ip; ++ uint16_t cs; ++ uint16_t reloc_table_offset; ++ uint16_t overlay_num; ++ uint16_t reserved0[4]; ++ uint16_t oem_id; ++ uint16_t oem_info; ++ uint16_t reserved1[10]; ++ uint32_t peaddr; ++ char message[64]; ++}; ++ ++struct pe_hdr { ++ uint32_t magic; ++ uint16_t machine; ++ uint16_t sections; ++ uint32_t timestamp; ++ uint32_t symbol_table; ++ uint32_t symbols; ++ uint16_t opt_hdr_size; ++ uint16_t flags; ++}; ++ ++struct pe32_opt_hdr { ++ uint16_t magic; ++ uint8_t ld_major; ++ uint8_t ld_minor; ++ uint32_t text_size; ++ uint32_t data_size; ++ uint32_t bss_size; ++ uint32_t entry_point; ++ uint32_t code_base; ++ uint32_t data_base; ++ uint32_t image_base; ++ uint32_t section_align; ++ uint32_t file_align; ++ uint16_t os_major; ++ uint16_t os_minor; ++ uint16_t image_major; ++ uint16_t image_minor; ++ uint16_t subsys_major; ++ uint16_t subsys_minor; ++ uint32_t win32_version; ++ uint32_t image_size; ++ uint32_t header_size; ++ uint32_t csum; ++ uint16_t subsys; ++ uint16_t dll_flags; ++ uint32_t stack_size_req; ++ uint32_t stack_size; ++ uint32_t heap_size_req; ++ uint32_t heap_size; ++ uint32_t loader_flags; ++ uint32_t data_dirs; ++}; ++ ++struct pe32plus_opt_hdr { ++ uint16_t magic; ++ uint8_t ld_major; ++ uint8_t ld_minor; ++ uint32_t text_size; ++ uint32_t data_size; ++ uint32_t bss_size; ++ uint32_t entry_point; ++ uint32_t code_base; ++ uint64_t image_base; ++ uint32_t section_align; ++ uint32_t file_align; ++ uint16_t os_major; ++ uint16_t os_minor; ++ uint16_t image_major; ++ uint16_t image_minor; ++ uint16_t subsys_major; ++ uint16_t subsys_minor; ++ uint32_t win32_version; ++ uint32_t image_size; ++ uint32_t header_size; ++ uint32_t csum; ++ uint16_t subsys; ++ uint16_t dll_flags; ++ uint64_t stack_size_req; ++ uint64_t stack_size; ++ uint64_t heap_size_req; ++ uint64_t heap_size; ++ uint32_t loader_flags; ++ uint32_t data_dirs; ++}; ++ ++struct data_dirent { ++ uint32_t virtual_address; ++ uint32_t size; ++}; ++ ++struct data_directory { ++ struct data_dirent exports; ++ struct data_dirent imports; ++ struct data_dirent resources; ++ struct data_dirent exceptions; ++ struct data_dirent certs; ++ struct data_dirent base_relocations; ++ struct data_dirent debug; ++ struct data_dirent arch; ++ struct data_dirent global_ptr; ++ struct data_dirent tls; ++ struct data_dirent load_config; ++ struct data_dirent bound_imports; ++ struct data_dirent import_addrs; ++ struct data_dirent delay_imports; ++ struct data_dirent clr_runtime_hdr; ++ struct data_dirent reserved; ++}; ++ ++struct section_header { ++ char name[8]; ++ uint32_t virtual_size; ++ uint32_t virtual_address; ++ uint32_t raw_data_size; ++ uint32_t data_addr; ++ uint32_t relocs; ++ uint32_t line_numbers; ++ uint16_t num_relocs; ++ uint16_t num_lin_numbers; ++ uint32_t flags; ++}; ++ ++struct win_certificate { ++ uint32_t length; ++ uint16_t revision; ++ uint16_t cert_type; ++}; ++ ++struct pefile_context { ++ unsigned int header_size; ++ unsigned int image_checksum_offset; ++ unsigned int cert_dirent_offset; ++ unsigned int n_data_dirents; ++ unsigned int n_sections; ++ unsigned int certs_size; ++ unsigned int sig_offset; ++ unsigned int sig_len; ++ const struct section_header *secs; ++ const void *digest; ++ unsigned int digest_len; ++ const char *digest_algo; ++}; ++ ++enum mscode_actions { ++ ACT_mscode_note_content_type = 0, ++ ACT_mscode_note_digest = 1, ++ ACT_mscode_note_digest_algo = 2, ++ NR__mscode_actions = 3, ++}; ++ ++enum hash_algo { ++ HASH_ALGO_MD4 = 0, ++ HASH_ALGO_MD5 = 1, ++ HASH_ALGO_SHA1 = 2, ++ HASH_ALGO_RIPE_MD_160 = 3, ++ HASH_ALGO_SHA256 = 4, ++ HASH_ALGO_SHA384 = 5, ++ HASH_ALGO_SHA512 = 6, ++ HASH_ALGO_SHA224 = 7, ++ HASH_ALGO_RIPE_MD_128 = 8, ++ HASH_ALGO_RIPE_MD_256 = 9, ++ HASH_ALGO_RIPE_MD_320 = 10, ++ HASH_ALGO_WP_256 = 11, ++ HASH_ALGO_WP_384 = 12, ++ HASH_ALGO_WP_512 = 13, ++ HASH_ALGO_TGR_128 = 14, ++ HASH_ALGO_TGR_160 = 15, ++ HASH_ALGO_TGR_192 = 16, ++ HASH_ALGO_SM3_256 = 17, ++ HASH_ALGO__LAST = 18, ++}; ++ ++enum xen_domain_type { ++ XEN_NATIVE = 0, ++ XEN_PV_DOMAIN = 1, ++ XEN_HVM_DOMAIN = 2, ++}; ++ ++struct biovec_slab { ++ int nr_vecs; ++ char *name; ++ struct kmem_cache *slab; ++}; ++ ++enum rq_qos_id { ++ RQ_QOS_WBT = 0, ++ RQ_QOS_CGROUP = 1, ++}; ++ ++struct rq_qos_ops; ++ ++struct rq_qos { ++ struct rq_qos_ops *ops; ++ struct request_queue *q; ++ enum rq_qos_id id; ++ struct rq_qos *next; ++}; ++ ++struct rq_map_data { ++ struct page **pages; ++ int page_order; ++ int nr_entries; ++ long unsigned int offset; ++ int null_mapped; ++ int from_user; ++}; ++ ++struct rq_qos_ops { ++ void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *); ++ void (*track)(struct rq_qos *, struct request *, struct bio *); ++ void (*issue)(struct rq_qos *, struct request *); ++ void (*requeue)(struct rq_qos *, struct request *); ++ void (*done)(struct rq_qos *, struct request *); ++ void (*done_bio)(struct rq_qos *, struct bio *); ++ void (*cleanup)(struct rq_qos *, struct bio *); ++ void (*exit)(struct rq_qos *); ++}; ++ ++struct bio_slab { ++ struct kmem_cache *slab; ++ unsigned int slab_ref; ++ unsigned int slab_size; ++ char name[8]; ++}; ++ ++struct bio_map_data { ++ int is_our_pages; ++ struct iov_iter iter; ++ struct iovec iov[0]; ++}; ++ ++enum { ++ ELV_MQUEUE_MAY = 0, ++ ELV_MQUEUE_NO = 1, ++ ELV_MQUEUE_MUST = 2, ++}; ++ ++enum { ++ BLK_MQ_F_SHOULD_MERGE = 1, ++ BLK_MQ_F_TAG_SHARED = 2, ++ BLK_MQ_F_SG_MERGE = 4, ++ BLK_MQ_F_BLOCKING = 32, ++ BLK_MQ_F_NO_SCHED = 64, ++ BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, ++ BLK_MQ_F_ALLOC_POLICY_BITS = 1, ++ BLK_MQ_S_STOPPED = 0, ++ BLK_MQ_S_TAG_ACTIVE = 1, ++ BLK_MQ_S_SCHED_RESTART = 2, ++ BLK_MQ_MAX_DEPTH = 10240, ++ BLK_MQ_CPU_WORK_BATCH = 8, ++}; ++ ++enum { ++ WBT_RWQ_BG = 0, ++ WBT_RWQ_KSWAPD = 1, ++ WBT_RWQ_DISCARD = 2, ++ WBT_NUM_RWQ = 3, ++}; ++ ++enum { ++ PERCPU_REF_INIT_ATOMIC = 1, ++ PERCPU_REF_INIT_DEAD = 2, ++}; ++ ++enum { ++ BLKPREP_OK = 0, ++ BLKPREP_KILL = 1, ++ BLKPREP_DEFER = 2, ++ BLKPREP_INVALID = 3, ++}; ++ ++struct req_iterator { ++ struct bvec_iter iter; ++ struct bio *bio; ++}; ++ ++struct blk_plug_cb; ++ ++typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); ++ ++struct blk_plug_cb { ++ struct list_head list; ++ blk_plug_cb_fn callback; ++ void *data; ++}; ++ ++enum { ++ BLK_MQ_REQ_NOWAIT = 1, ++ BLK_MQ_REQ_RESERVED = 2, ++ BLK_MQ_REQ_INTERNAL = 4, ++ BLK_MQ_REQ_PREEMPT = 8, ++}; ++ ++struct trace_event_raw_block_buffer { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ size_t size; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_rq_requeue { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ char rwbs[8]; ++ u32 __data_loc_cmd; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_rq_complete { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ int error; ++ char rwbs[8]; ++ u32 __data_loc_cmd; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_rq { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ unsigned int bytes; ++ char rwbs[8]; ++ char comm[16]; ++ u32 __data_loc_cmd; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_bio_bounce { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ char rwbs[8]; ++ char comm[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_bio_complete { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ int error; ++ char rwbs[8]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_bio_merge { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ char rwbs[8]; ++ char comm[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_bio_queue { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ char rwbs[8]; ++ char comm[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_get_rq { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ char rwbs[8]; ++ char comm[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_plug { ++ struct trace_entry ent; ++ char comm[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_unplug { ++ struct trace_entry ent; ++ int nr_rq; ++ char comm[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_split { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ sector_t new_sector; ++ char rwbs[8]; ++ char comm[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_bio_remap { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ dev_t old_dev; ++ sector_t old_sector; ++ char rwbs[8]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_block_rq_remap { ++ struct trace_entry ent; ++ dev_t dev; ++ sector_t sector; ++ unsigned int nr_sector; ++ dev_t old_dev; ++ sector_t old_sector; ++ unsigned int nr_bios; ++ char rwbs[8]; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_block_buffer {}; ++ ++struct trace_event_data_offsets_block_rq_requeue { ++ u32 cmd; ++}; ++ ++struct trace_event_data_offsets_block_rq_complete { ++ u32 cmd; ++}; ++ ++struct trace_event_data_offsets_block_rq { ++ u32 cmd; ++}; ++ ++struct trace_event_data_offsets_block_bio_bounce {}; ++ ++struct trace_event_data_offsets_block_bio_complete {}; ++ ++struct trace_event_data_offsets_block_bio_merge {}; ++ ++struct trace_event_data_offsets_block_bio_queue {}; ++ ++struct trace_event_data_offsets_block_get_rq {}; ++ ++struct trace_event_data_offsets_block_plug {}; ++ ++struct trace_event_data_offsets_block_unplug {}; ++ ++struct trace_event_data_offsets_block_split {}; ++ ++struct trace_event_data_offsets_block_bio_remap {}; ++ ++struct trace_event_data_offsets_block_rq_remap {}; ++ ++struct queue_sysfs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct request_queue *, char *); ++ ssize_t (*store)(struct request_queue *, const char *, size_t); ++}; ++ ++enum { ++ REQ_FSEQ_PREFLUSH = 1, ++ REQ_FSEQ_DATA = 2, ++ REQ_FSEQ_POSTFLUSH = 4, ++ REQ_FSEQ_DONE = 8, ++ REQ_FSEQ_ACTIONS = 7, ++ FLUSH_PENDING_TIMEOUT = 1250, ++}; ++ ++enum blk_default_limits { ++ BLK_MAX_SEGMENTS = 128, ++ BLK_SAFE_MAX_SECTORS = 255, ++ BLK_DEF_MAX_SECTORS = 2560, ++ BLK_MAX_SEGMENT_SIZE = 65536, ++ BLK_SEG_BOUNDARY_MASK = 4294967295, ++}; ++ ++enum { ++ ICQ_EXITED = 4, ++ ICQ_DESTROYED = 8, ++}; ++ ++typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); ++ ++enum { ++ BLK_MQ_UNIQUE_TAG_BITS = 16, ++ BLK_MQ_UNIQUE_TAG_MASK = 65535, ++}; ++ ++enum { ++ BLK_MQ_TAG_FAIL = 4294967295, ++ BLK_MQ_TAG_MIN = 1, ++ BLK_MQ_TAG_MAX = 4294967294, ++}; ++ ++struct mq_inflight { ++ struct hd_struct *part; ++ unsigned int *inflight; ++}; ++ ++struct flush_busy_ctx_data { ++ struct blk_mq_hw_ctx *hctx; ++ struct list_head *list; ++}; ++ ++struct dispatch_rq_data { ++ struct blk_mq_hw_ctx *hctx; ++ struct request *rq; ++}; ++ ++struct blk_mq_qe_pair { ++ struct list_head node; ++ struct request_queue *q; ++ struct elevator_type *type; ++}; ++ ++typedef void busy_iter_fn(struct blk_mq_hw_ctx *, struct request *, void *, bool); ++ ++typedef void busy_tag_iter_fn(struct request *, void *, bool); ++ ++struct bt_iter_data { ++ struct blk_mq_hw_ctx *hctx; ++ busy_iter_fn *fn; ++ void *data; ++ bool reserved; ++ bool inflight; ++}; ++ ++struct bt_tags_iter_data { ++ struct blk_mq_tags *tags; ++ busy_tag_iter_fn *fn; ++ void *data; ++ bool reserved; ++}; ++ ++struct blk_queue_stats { ++ struct list_head callbacks; ++ spinlock_t lock; ++ bool enable_accounting; ++}; ++ ++struct blk_mq_ctx_sysfs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct blk_mq_ctx *, char *); ++ ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); ++}; ++ ++struct blk_mq_hw_ctx_sysfs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct blk_mq_hw_ctx *, char *); ++ ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); ++}; ++ ++struct disk_part_iter { ++ struct gendisk *disk; ++ struct hd_struct *part; ++ int idx; ++ unsigned int flags; ++}; ++ ++struct hd_geometry { ++ unsigned char heads; ++ unsigned char sectors; ++ short unsigned int cylinders; ++ long unsigned int start; ++}; ++ ++struct blkpg_ioctl_arg { ++ int op; ++ int flags; ++ int datalen; ++ void *data; ++}; ++ ++struct blkpg_partition { ++ long long int start; ++ long long int length; ++ int pno; ++ char devname[64]; ++ char volname[64]; ++}; ++ ++struct pr_reservation { ++ __u64 key; ++ __u32 type; ++ __u32 flags; ++}; ++ ++struct pr_registration { ++ __u64 old_key; ++ __u64 new_key; ++ __u32 flags; ++ __u32 __pad; ++}; ++ ++struct pr_preempt { ++ __u64 old_key; ++ __u64 new_key; ++ __u32 type; ++ __u32 flags; ++}; ++ ++struct pr_clear { ++ __u64 key; ++ __u32 flags; ++ __u32 __pad; ++}; ++ ++struct klist { ++ spinlock_t k_lock; ++ struct list_head k_list; ++ void (*get)(struct klist_node *); ++ void (*put)(struct klist_node *); ++}; ++ ++struct klist_iter { ++ struct klist *i_klist; ++ struct klist_node *i_cur; ++}; ++ ++struct class_dev_iter { ++ struct klist_iter ki; ++ const struct device_type *type; ++}; ++ ++struct disk_events { ++ struct list_head node; ++ struct gendisk *disk; ++ spinlock_t lock; ++ struct mutex block_mutex; ++ int block; ++ unsigned int pending; ++ unsigned int clearing; ++ long int poll_msecs; ++ struct delayed_work dwork; ++}; ++ ++struct blk_major_name { ++ struct blk_major_name *next; ++ int major; ++ char name[16]; ++}; ++ ++typedef struct { ++ struct page *v; ++} Sector; ++ ++struct parsed_partitions { ++ struct block_device *bdev; ++ char name[32]; ++ struct { ++ sector_t from; ++ sector_t size; ++ int flags; ++ bool has_info; ++ struct partition_meta_info info; ++ } *parts; ++ int next; ++ int limit; ++ bool access_beyond_eod; ++ char *pp_buf; ++}; ++ ++enum { ++ IOPRIO_WHO_PROCESS = 1, ++ IOPRIO_WHO_PGRP = 2, ++ IOPRIO_WHO_USER = 3, ++}; ++ ++struct RigidDiskBlock { ++ __u32 rdb_ID; ++ __be32 rdb_SummedLongs; ++ __s32 rdb_ChkSum; ++ __u32 rdb_HostID; ++ __be32 rdb_BlockBytes; ++ __u32 rdb_Flags; ++ __u32 rdb_BadBlockList; ++ __be32 rdb_PartitionList; ++ __u32 rdb_FileSysHeaderList; ++ __u32 rdb_DriveInit; ++ __u32 rdb_Reserved1[6]; ++ __u32 rdb_Cylinders; ++ __u32 rdb_Sectors; ++ __u32 rdb_Heads; ++ __u32 rdb_Interleave; ++ __u32 rdb_Park; ++ __u32 rdb_Reserved2[3]; ++ __u32 rdb_WritePreComp; ++ __u32 rdb_ReducedWrite; ++ __u32 rdb_StepRate; ++ __u32 rdb_Reserved3[5]; ++ __u32 rdb_RDBBlocksLo; ++ __u32 rdb_RDBBlocksHi; ++ __u32 rdb_LoCylinder; ++ __u32 rdb_HiCylinder; ++ __u32 rdb_CylBlocks; ++ __u32 rdb_AutoParkSeconds; ++ __u32 rdb_HighRDSKBlock; ++ __u32 rdb_Reserved4; ++ char rdb_DiskVendor[8]; ++ char rdb_DiskProduct[16]; ++ char rdb_DiskRevision[4]; ++ char rdb_ControllerVendor[8]; ++ char rdb_ControllerProduct[16]; ++ char rdb_ControllerRevision[4]; ++ __u32 rdb_Reserved5[10]; ++}; ++ ++struct PartitionBlock { ++ __be32 pb_ID; ++ __be32 pb_SummedLongs; ++ __s32 pb_ChkSum; ++ __u32 pb_HostID; ++ __be32 pb_Next; ++ __u32 pb_Flags; ++ __u32 pb_Reserved1[2]; ++ __u32 pb_DevFlags; ++ __u8 pb_DriveName[32]; ++ __u32 pb_Reserved2[15]; ++ __be32 pb_Environment[17]; ++ __u32 pb_EReserved[15]; ++}; ++ ++struct mac_partition { ++ __be16 signature; ++ __be16 res1; ++ __be32 map_count; ++ __be32 start_block; ++ __be32 block_count; ++ char name[32]; ++ char type[32]; ++ __be32 data_start; ++ __be32 data_count; ++ __be32 status; ++ __be32 boot_start; ++ __be32 boot_size; ++ __be32 boot_load; ++ __be32 boot_load2; ++ __be32 boot_entry; ++ __be32 boot_entry2; ++ __be32 boot_cksum; ++ char processor[16]; ++}; ++ ++struct mac_driver_desc { ++ __be16 signature; ++ __be16 block_size; ++ __be32 block_count; ++}; ++ ++struct fat_boot_sector { ++ __u8 ignored[3]; ++ __u8 system_id[8]; ++ __u8 sector_size[2]; ++ __u8 sec_per_clus; ++ __le16 reserved; ++ __u8 fats; ++ __u8 dir_entries[2]; ++ __u8 sectors[2]; ++ __u8 media; ++ __le16 fat_length; ++ __le16 secs_track; ++ __le16 heads; ++ __le32 hidden; ++ __le32 total_sect; ++ union { ++ struct { ++ __u8 drive_number; ++ __u8 state; ++ __u8 signature; ++ __u8 vol_id[4]; ++ __u8 vol_label[11]; ++ __u8 fs_type[8]; ++ } fat16; ++ struct { ++ __le32 length; ++ __le16 flags; ++ __u8 version[2]; ++ __le32 root_cluster; ++ __le16 info_sector; ++ __le16 backup_boot; ++ __le16 reserved2[6]; ++ __u8 drive_number; ++ __u8 state; ++ __u8 signature; ++ __u8 vol_id[4]; ++ __u8 vol_label[11]; ++ __u8 fs_type[8]; ++ } fat32; ++ }; ++}; ++ ++enum { ++ DOS_EXTENDED_PARTITION = 5, ++ LINUX_EXTENDED_PARTITION = 133, ++ WIN98_EXTENDED_PARTITION = 15, ++ SUN_WHOLE_DISK = 5, ++ LINUX_SWAP_PARTITION = 130, ++ LINUX_DATA_PARTITION = 131, ++ LINUX_LVM_PARTITION = 142, ++ LINUX_RAID_PARTITION = 253, ++ SOLARIS_X86_PARTITION = 130, ++ NEW_SOLARIS_X86_PARTITION = 191, ++ DM6_AUX1PARTITION = 81, ++ DM6_AUX3PARTITION = 83, ++ DM6_PARTITION = 84, ++ EZD_PARTITION = 85, ++ FREEBSD_PARTITION = 165, ++ OPENBSD_PARTITION = 166, ++ NETBSD_PARTITION = 169, ++ BSDI_PARTITION = 183, ++ MINIX_PARTITION = 129, ++ UNIXWARE_PARTITION = 99, ++}; ++ ++struct partition { ++ unsigned char boot_ind; ++ unsigned char head; ++ unsigned char sector; ++ unsigned char cyl; ++ unsigned char sys_ind; ++ unsigned char end_head; ++ unsigned char end_sector; ++ unsigned char end_cyl; ++ __le32 start_sect; ++ __le32 nr_sects; ++}; ++ ++struct solaris_x86_slice { ++ __le16 s_tag; ++ __le16 s_flag; ++ __le32 s_start; ++ __le32 s_size; ++}; ++ ++struct solaris_x86_vtoc { ++ unsigned int v_bootinfo[3]; ++ __le32 v_sanity; ++ __le32 v_version; ++ char v_volume[8]; ++ __le16 v_sectorsz; ++ __le16 v_nparts; ++ unsigned int v_reserved[10]; ++ struct solaris_x86_slice v_slice[16]; ++ unsigned int timestamp[16]; ++ char v_asciilabel[128]; ++}; ++ ++struct bsd_partition { ++ __le32 p_size; ++ __le32 p_offset; ++ __le32 p_fsize; ++ __u8 p_fstype; ++ __u8 p_frag; ++ __le16 p_cpg; ++}; ++ ++struct bsd_disklabel { ++ __le32 d_magic; ++ __s16 d_type; ++ __s16 d_subtype; ++ char d_typename[16]; ++ char d_packname[16]; ++ __u32 d_secsize; ++ __u32 d_nsectors; ++ __u32 d_ntracks; ++ __u32 d_ncylinders; ++ __u32 d_secpercyl; ++ __u32 d_secperunit; ++ __u16 d_sparespertrack; ++ __u16 d_sparespercyl; ++ __u32 d_acylinders; ++ __u16 d_rpm; ++ __u16 d_interleave; ++ __u16 d_trackskew; ++ __u16 d_cylskew; ++ __u32 d_headswitch; ++ __u32 d_trkseek; ++ __u32 d_flags; ++ __u32 d_drivedata[5]; ++ __u32 d_spare[5]; ++ __le32 d_magic2; ++ __le16 d_checksum; ++ __le16 d_npartitions; ++ __le32 d_bbsize; ++ __le32 d_sbsize; ++ struct bsd_partition d_partitions[16]; ++}; ++ ++struct unixware_slice { ++ __le16 s_label; ++ __le16 s_flags; ++ __le32 start_sect; ++ __le32 nr_sects; ++}; ++ ++struct unixware_vtoc { ++ __le32 v_magic; ++ __le32 v_version; ++ char v_name[8]; ++ __le16 v_nslices; ++ __le16 v_unknown1; ++ __le32 v_reserved[10]; ++ struct unixware_slice v_slice[16]; ++}; ++ ++struct unixware_disklabel { ++ __le32 d_type; ++ __le32 d_magic; ++ __le32 d_version; ++ char d_serial[12]; ++ __le32 d_ncylinders; ++ __le32 d_ntracks; ++ __le32 d_nsectors; ++ __le32 d_secsize; ++ __le32 d_part_start; ++ __le32 d_unknown1[12]; ++ __le32 d_alt_tbl; ++ __le32 d_alt_len; ++ __le32 d_phys_cyl; ++ __le32 d_phys_trk; ++ __le32 d_phys_sec; ++ __le32 d_phys_bytes; ++ __le32 d_unknown2; ++ __le32 d_unknown3; ++ __le32 d_pad[8]; ++ struct unixware_vtoc vtoc; ++}; ++ ++struct d_partition { ++ __le32 p_size; ++ __le32 p_offset; ++ __le32 p_fsize; ++ u8 p_fstype; ++ u8 p_frag; ++ __le16 p_cpg; ++}; ++ ++struct disklabel { ++ __le32 d_magic; ++ __le16 d_type; ++ __le16 d_subtype; ++ u8 d_typename[16]; ++ u8 d_packname[16]; ++ __le32 d_secsize; ++ __le32 d_nsectors; ++ __le32 d_ntracks; ++ __le32 d_ncylinders; ++ __le32 d_secpercyl; ++ __le32 d_secprtunit; ++ __le16 d_sparespertrack; ++ __le16 d_sparespercyl; ++ __le32 d_acylinders; ++ __le16 d_rpm; ++ __le16 d_interleave; ++ __le16 d_trackskew; ++ __le16 d_cylskew; ++ __le32 d_headswitch; ++ __le32 d_trkseek; ++ __le32 d_flags; ++ __le32 d_drivedata[5]; ++ __le32 d_spare[5]; ++ __le32 d_magic2; ++ __le16 d_checksum; ++ __le16 d_npartitions; ++ __le32 d_bbsize; ++ __le32 d_sbsize; ++ struct d_partition d_partitions[18]; ++}; ++ ++struct sgi_volume { ++ s8 name[8]; ++ __be32 block_num; ++ __be32 num_bytes; ++}; ++ ++struct sgi_partition { ++ __be32 num_blocks; ++ __be32 first_block; ++ __be32 type; ++}; ++ ++struct sgi_disklabel { ++ __be32 magic_mushroom; ++ __be16 root_part_num; ++ __be16 swap_part_num; ++ s8 boot_file[16]; ++ u8 _unused0[48]; ++ struct sgi_volume volume[15]; ++ struct sgi_partition partitions[16]; ++ __be32 csum; ++ __be32 _unused1; ++}; ++ ++struct sun_info { ++ __be16 id; ++ __be16 flags; ++}; ++ ++struct sun_vtoc { ++ __be32 version; ++ char volume[8]; ++ __be16 nparts; ++ struct sun_info infos[8]; ++ __be16 padding; ++ __be32 bootinfo[3]; ++ __be32 sanity; ++ __be32 reserved[10]; ++ __be32 timestamp[8]; ++}; ++ ++struct sun_partition { ++ __be32 start_cylinder; ++ __be32 num_sectors; ++}; ++ ++struct sun_disklabel { ++ unsigned char info[128]; ++ struct sun_vtoc vtoc; ++ __be32 write_reinstruct; ++ __be32 read_reinstruct; ++ unsigned char spare[148]; ++ __be16 rspeed; ++ __be16 pcylcount; ++ __be16 sparecyl; ++ __be16 obs1; ++ __be16 obs2; ++ __be16 ilfact; ++ __be16 ncyl; ++ __be16 nacyl; ++ __be16 ntrks; ++ __be16 nsect; ++ __be16 obs3; ++ __be16 obs4; ++ struct sun_partition partitions[8]; ++ __be16 magic; ++ __be16 csum; ++}; ++ ++struct _gpt_header { ++ __le64 signature; ++ __le32 revision; ++ __le32 header_size; ++ __le32 header_crc32; ++ __le32 reserved1; ++ __le64 my_lba; ++ __le64 alternate_lba; ++ __le64 first_usable_lba; ++ __le64 last_usable_lba; ++ efi_guid_t disk_guid; ++ __le64 partition_entry_lba; ++ __le32 num_partition_entries; ++ __le32 sizeof_partition_entry; ++ __le32 partition_entry_array_crc32; ++} __attribute__((packed)); ++ ++typedef struct _gpt_header gpt_header; ++ ++struct _gpt_entry_attributes { ++ u64 required_to_function: 1; ++ u64 reserved: 47; ++ u64 type_guid_specific: 16; ++}; ++ ++typedef struct _gpt_entry_attributes gpt_entry_attributes; ++ ++struct _gpt_entry { ++ efi_guid_t partition_type_guid; ++ efi_guid_t unique_partition_guid; ++ __le64 starting_lba; ++ __le64 ending_lba; ++ gpt_entry_attributes attributes; ++ __le16 partition_name[36]; ++}; ++ ++typedef struct _gpt_entry gpt_entry; ++ ++struct _gpt_mbr_record { ++ u8 boot_indicator; ++ u8 start_head; ++ u8 start_sector; ++ u8 start_track; ++ u8 os_type; ++ u8 end_head; ++ u8 end_sector; ++ u8 end_track; ++ __le32 starting_lba; ++ __le32 size_in_lba; ++}; ++ ++typedef struct _gpt_mbr_record gpt_mbr_record; ++ ++struct _legacy_mbr { ++ u8 boot_code[440]; ++ __le32 unique_mbr_signature; ++ __le16 unknown; ++ gpt_mbr_record partition_record[4]; ++ __le16 signature; ++} __attribute__((packed)); ++ ++typedef struct _legacy_mbr legacy_mbr; ++ ++struct d_partition___2 { ++ __le32 p_res; ++ u8 p_fstype; ++ u8 p_res2[3]; ++ __le32 p_offset; ++ __le32 p_size; ++}; ++ ++struct disklabel___2 { ++ u8 d_reserved[270]; ++ struct d_partition___2 d_partitions[2]; ++ u8 d_blank[208]; ++ __le16 d_magic; ++} __attribute__((packed)); ++ ++struct rq_wait { ++ wait_queue_head_t wait; ++ atomic_t inflight; ++}; ++ ++struct rq_depth { ++ unsigned int max_depth; ++ int scale_step; ++ bool scaled_max; ++ unsigned int queue_depth; ++ unsigned int default_depth; ++}; ++ ++struct request_sense; ++ ++struct cdrom_generic_command { ++ unsigned char cmd[12]; ++ unsigned char *buffer; ++ unsigned int buflen; ++ int stat; ++ struct request_sense *sense; ++ unsigned char data_direction; ++ int quiet; ++ int timeout; ++ void *reserved[1]; ++}; ++ ++struct request_sense { ++ __u8 error_code: 7; ++ __u8 valid: 1; ++ __u8 segment_number; ++ __u8 sense_key: 4; ++ __u8 reserved2: 1; ++ __u8 ili: 1; ++ __u8 reserved1: 2; ++ __u8 information[4]; ++ __u8 add_sense_len; ++ __u8 command_info[4]; ++ __u8 asc; ++ __u8 ascq; ++ __u8 fruc; ++ __u8 sks[3]; ++ __u8 asb[46]; ++}; ++ ++struct scsi_ioctl_command { ++ unsigned int inlen; ++ unsigned int outlen; ++ unsigned char data[0]; ++}; ++ ++enum scsi_device_event { ++ SDEV_EVT_MEDIA_CHANGE = 1, ++ SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, ++ SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, ++ SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, ++ SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, ++ SDEV_EVT_LUN_CHANGE_REPORTED = 6, ++ SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, ++ SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, ++ SDEV_EVT_FIRST = 1, ++ SDEV_EVT_LAST = 8, ++ SDEV_EVT_MAXBITS = 9, ++}; ++ ++struct scsi_request { ++ unsigned char __cmd[16]; ++ unsigned char *cmd; ++ short unsigned int cmd_len; ++ int result; ++ unsigned int sense_len; ++ unsigned int resid_len; ++ int retries; ++ void *sense; ++}; ++ ++struct blk_cmd_filter { ++ long unsigned int read_ok[4]; ++ long unsigned int write_ok[4]; ++}; ++ ++enum { ++ OMAX_SB_LEN = 16, ++}; ++ ++struct bsg_device { ++ struct request_queue *queue; ++ spinlock_t lock; ++ struct hlist_node dev_list; ++ refcount_t ref_count; ++ char name[20]; ++ int max_queue; ++}; ++ ++struct bsg_buffer { ++ unsigned int payload_len; ++ int sg_cnt; ++ struct scatterlist *sg_list; ++}; ++ ++struct bsg_job { ++ struct device *dev; ++ struct kref kref; ++ unsigned int timeout; ++ void *request; ++ void *reply; ++ unsigned int request_len; ++ unsigned int reply_len; ++ struct bsg_buffer request_payload; ++ struct bsg_buffer reply_payload; ++ int result; ++ unsigned int reply_payload_rcv_len; ++ void *dd_data; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++}; ++ ++struct blkg_stat { ++ struct percpu_counter cpu_cnt; ++ atomic64_t aux_cnt; ++}; ++ ++typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); ++ ++typedef void blkcg_pol_init_cpd_fn(struct blkcg_policy_data *); ++ ++typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); ++ ++typedef void blkcg_pol_bind_cpd_fn(struct blkcg_policy_data *); ++ ++typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(gfp_t, int); ++ ++typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); ++ ++typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); ++ ++typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); ++ ++typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); ++ ++typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); ++ ++typedef size_t blkcg_pol_stat_pd_fn(struct blkg_policy_data *, char *, size_t); ++ ++struct blkcg_policy { ++ int plid; ++ struct cftype *dfl_cftypes; ++ struct cftype *legacy_cftypes; ++ blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; ++ blkcg_pol_init_cpd_fn *cpd_init_fn; ++ blkcg_pol_free_cpd_fn *cpd_free_fn; ++ blkcg_pol_bind_cpd_fn *cpd_bind_fn; ++ blkcg_pol_alloc_pd_fn *pd_alloc_fn; ++ blkcg_pol_init_pd_fn *pd_init_fn; ++ blkcg_pol_online_pd_fn *pd_online_fn; ++ blkcg_pol_offline_pd_fn *pd_offline_fn; ++ blkcg_pol_free_pd_fn *pd_free_fn; ++ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; ++ blkcg_pol_stat_pd_fn *pd_stat_fn; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct blkg_conf_ctx { ++ struct gendisk *disk; ++ struct blkcg_gq *blkg; ++ char *body; ++}; ++ ++struct throtl_service_queue { ++ struct throtl_service_queue *parent_sq; ++ struct list_head queued[2]; ++ unsigned int nr_queued[2]; ++ struct rb_root pending_tree; ++ struct rb_node *first_pending; ++ unsigned int nr_pending; ++ long unsigned int first_pending_disptime; ++ struct timer_list pending_timer; ++}; ++ ++struct latency_bucket { ++ long unsigned int total_latency; ++ int samples; ++}; ++ ++struct avg_latency_bucket { ++ long unsigned int latency; ++ bool valid; ++}; ++ ++struct throtl_data { ++ struct throtl_service_queue service_queue; ++ struct request_queue *queue; ++ unsigned int nr_queued[2]; ++ unsigned int throtl_slice; ++ struct work_struct dispatch_work; ++ unsigned int limit_index; ++ bool limit_valid[2]; ++ long unsigned int low_upgrade_time; ++ long unsigned int low_downgrade_time; ++ unsigned int scale; ++ struct latency_bucket tmp_buckets[18]; ++ struct avg_latency_bucket avg_buckets[18]; ++ struct latency_bucket *latency_buckets[2]; ++ long unsigned int last_calculate_time; ++ long unsigned int filtered_latency; ++ bool track_bio_latency; ++}; ++ ++struct throtl_grp; ++ ++struct throtl_qnode { ++ struct list_head node; ++ struct bio_list bios; ++ struct throtl_grp *tg; ++}; ++ ++struct throtl_grp { ++ struct blkg_policy_data pd; ++ struct rb_node rb_node; ++ struct throtl_data *td; ++ struct throtl_service_queue service_queue; ++ struct throtl_qnode qnode_on_self[2]; ++ struct throtl_qnode qnode_on_parent[2]; ++ long unsigned int disptime; ++ unsigned int flags; ++ bool has_rules[2]; ++ uint64_t bps[4]; ++ uint64_t bps_conf[4]; ++ unsigned int iops[4]; ++ unsigned int iops_conf[4]; ++ uint64_t bytes_disp[2]; ++ unsigned int io_disp[2]; ++ long unsigned int last_low_overflow_time[2]; ++ uint64_t last_bytes_disp[2]; ++ unsigned int last_io_disp[2]; ++ long unsigned int last_check_time; ++ long unsigned int latency_target; ++ long unsigned int latency_target_conf; ++ long unsigned int slice_start[2]; ++ long unsigned int slice_end[2]; ++ long unsigned int last_finish_time; ++ long unsigned int checked_last_finish_time; ++ long unsigned int avg_idletime; ++ long unsigned int idletime_threshold; ++ long unsigned int idletime_threshold_conf; ++ unsigned int bio_cnt; ++ unsigned int bad_bio_cnt; ++ long unsigned int bio_cnt_reset_time; ++}; ++ ++enum tg_state_flags { ++ THROTL_TG_PENDING = 1, ++ THROTL_TG_WAS_EMPTY = 2, ++}; ++ ++enum { ++ LIMIT_LOW = 0, ++ LIMIT_MAX = 1, ++ LIMIT_CNT = 2, ++}; ++ ++struct noop_data { ++ struct list_head queue; ++}; ++ ++struct deadline_data { ++ struct rb_root sort_list[2]; ++ struct list_head fifo_list[2]; ++ struct request *next_rq[2]; ++ unsigned int batching; ++ unsigned int starved; ++ int fifo_expire[2]; ++ int fifo_batch; ++ int writes_starved; ++ int front_merges; ++}; ++ ++struct cfq_ttime { ++ u64 last_end_request; ++ u64 ttime_total; ++ u64 ttime_mean; ++ long unsigned int ttime_samples; ++}; ++ ++struct cfq_rb_root { ++ struct rb_root_cached rb; ++ struct rb_node *rb_rightmost; ++ unsigned int count; ++ u64 min_vdisktime; ++ struct cfq_ttime ttime; ++}; ++ ++struct cfq_data; ++ ++struct cfq_group; ++ ++struct cfq_queue { ++ int ref; ++ unsigned int flags; ++ struct cfq_data *cfqd; ++ struct rb_node rb_node; ++ u64 rb_key; ++ struct rb_node p_node; ++ struct rb_root *p_root; ++ struct rb_root sort_list; ++ struct request *next_rq; ++ int queued[2]; ++ int allocated[2]; ++ struct list_head fifo; ++ u64 dispatch_start; ++ u64 allocated_slice; ++ u64 slice_dispatch; ++ u64 slice_start; ++ u64 slice_end; ++ s64 slice_resid; ++ int prio_pending; ++ int dispatched; ++ short unsigned int ioprio; ++ short unsigned int org_ioprio; ++ short unsigned int ioprio_class; ++ short unsigned int org_ioprio_class; ++ pid_t pid; ++ u32 seek_history; ++ sector_t last_request_pos; ++ struct cfq_rb_root *service_tree; ++ struct cfq_queue *new_cfqq; ++ struct cfq_group *cfqg; ++ long unsigned int nr_sectors; ++}; ++ ++enum wl_class_t { ++ BE_WORKLOAD = 0, ++ RT_WORKLOAD = 1, ++ IDLE_WORKLOAD = 2, ++ CFQ_PRIO_NR = 3, ++}; ++ ++enum wl_type_t { ++ ASYNC_WORKLOAD = 0, ++ SYNC_NOIDLE_WORKLOAD = 1, ++ SYNC_WORKLOAD = 2, ++}; ++ ++struct cfq_io_cq; ++ ++struct cfq_data { ++ struct request_queue *queue; ++ struct cfq_rb_root grp_service_tree; ++ struct cfq_group *root_group; ++ enum wl_class_t serving_wl_class; ++ enum wl_type_t serving_wl_type; ++ u64 workload_expires; ++ struct cfq_group *serving_group; ++ struct rb_root prio_trees[8]; ++ unsigned int busy_queues; ++ unsigned int busy_sync_queues; ++ int rq_in_driver; ++ int rq_in_flight[2]; ++ int rq_queued; ++ int hw_tag; ++ int hw_tag_est_depth; ++ unsigned int hw_tag_samples; ++ struct hrtimer idle_slice_timer; ++ struct work_struct unplug_work; ++ struct cfq_queue *active_queue; ++ struct cfq_io_cq *active_cic; ++ sector_t last_position; ++ unsigned int cfq_quantum; ++ unsigned int cfq_back_penalty; ++ unsigned int cfq_back_max; ++ unsigned int cfq_slice_async_rq; ++ unsigned int cfq_latency; ++ u64 cfq_fifo_expire[2]; ++ u64 cfq_slice[2]; ++ u64 cfq_slice_idle; ++ u64 cfq_group_idle; ++ u64 cfq_target_latency; ++ struct cfq_queue oom_cfqq; ++ u64 last_delayed_sync; ++}; ++ ++struct cfqg_stats { ++ struct blkg_rwstat merged; ++ struct blkg_rwstat service_time; ++ struct blkg_rwstat wait_time; ++ struct blkg_rwstat queued; ++ struct blkg_stat time; ++}; ++ ++struct cfq_group { ++ struct blkg_policy_data pd; ++ struct rb_node rb_node; ++ u64 vdisktime; ++ int nr_active; ++ unsigned int children_weight; ++ unsigned int vfraction; ++ unsigned int weight; ++ unsigned int new_weight; ++ unsigned int dev_weight; ++ unsigned int leaf_weight; ++ unsigned int new_leaf_weight; ++ unsigned int dev_leaf_weight; ++ int nr_cfqq; ++ unsigned int busy_queues_avg[3]; ++ struct cfq_rb_root service_trees[6]; ++ struct cfq_rb_root service_tree_idle; ++ u64 saved_wl_slice; ++ enum wl_type_t saved_wl_type; ++ enum wl_class_t saved_wl_class; ++ int dispatched; ++ struct cfq_ttime ttime; ++ struct cfqg_stats stats; ++ struct cfq_queue *async_cfqq[16]; ++ struct cfq_queue *async_idle_cfqq; ++}; ++ ++struct cfq_group_data { ++ struct blkcg_policy_data cpd; ++ unsigned int weight; ++ unsigned int leaf_weight; ++}; ++ ++struct cfq_io_cq { ++ struct io_cq icq; ++ struct cfq_queue *cfqq[2]; ++ struct cfq_ttime ttime; ++ int ioprio; ++ uint64_t blkcg_serial_nr; ++}; ++ ++enum cfqq_state_flags { ++ CFQ_CFQQ_FLAG_on_rr = 0, ++ CFQ_CFQQ_FLAG_wait_request = 1, ++ CFQ_CFQQ_FLAG_must_dispatch = 2, ++ CFQ_CFQQ_FLAG_must_alloc_slice = 3, ++ CFQ_CFQQ_FLAG_fifo_expire = 4, ++ CFQ_CFQQ_FLAG_idle_window = 5, ++ CFQ_CFQQ_FLAG_prio_changed = 6, ++ CFQ_CFQQ_FLAG_slice_new = 7, ++ CFQ_CFQQ_FLAG_sync = 8, ++ CFQ_CFQQ_FLAG_coop = 9, ++ CFQ_CFQQ_FLAG_split_coop = 10, ++ CFQ_CFQQ_FLAG_deep = 11, ++ CFQ_CFQQ_FLAG_wait_busy = 12, ++}; ++ ++struct deadline_data___2 { ++ struct rb_root sort_list[2]; ++ struct list_head fifo_list[2]; ++ struct request *next_rq[2]; ++ unsigned int batching; ++ unsigned int starved; ++ int fifo_expire[2]; ++ int fifo_batch; ++ int writes_starved; ++ int front_merges; ++ spinlock_t lock; ++ spinlock_t zone_lock; ++ struct list_head dispatch; ++}; ++ ++enum { ++ KYBER_READ = 0, ++ KYBER_SYNC_WRITE = 1, ++ KYBER_OTHER = 2, ++ KYBER_NUM_DOMAINS = 3, ++}; ++ ++enum { ++ KYBER_MIN_DEPTH = 256, ++ KYBER_ASYNC_PERCENT = 75, ++}; ++ ++struct kyber_ctx_queue { ++ spinlock_t lock; ++ struct list_head rq_list[3]; ++ long: 64; ++}; ++ ++struct kyber_queue_data { ++ struct request_queue *q; ++ struct blk_stat_callback *cb; ++ struct sbitmap_queue domain_tokens[3]; ++ unsigned int async_depth; ++ u64 read_lat_nsec; ++ u64 write_lat_nsec; ++}; ++ ++struct kyber_hctx_data { ++ spinlock_t lock; ++ struct list_head rqs[3]; ++ unsigned int cur_domain; ++ unsigned int batching; ++ struct kyber_ctx_queue *kcqs; ++ struct sbitmap kcq_map[3]; ++ wait_queue_entry_t domain_wait[3]; ++ struct sbq_wait_state *domain_ws[3]; ++ atomic_t wait_index[3]; ++}; ++ ++enum { ++ NONE = 0, ++ GOOD = 1, ++ GREAT = 2, ++ BAD = 4294967295, ++ AWFUL = 4294967294, ++}; ++ ++struct flush_kcq_data { ++ struct kyber_hctx_data *khd; ++ unsigned int sched_domain; ++ struct list_head *list; ++}; ++ ++struct bfq_entity; ++ ++struct bfq_service_tree { ++ struct rb_root active; ++ struct rb_root idle; ++ struct bfq_entity *first_idle; ++ struct bfq_entity *last_idle; ++ u64 vtime; ++ long unsigned int wsum; ++}; ++ ++struct bfq_weight_counter; ++ ++struct bfq_sched_data; ++ ++struct bfq_entity { ++ struct rb_node rb_node; ++ struct bfq_weight_counter *weight_counter; ++ bool on_st; ++ u64 start; ++ u64 finish; ++ struct rb_root *tree; ++ u64 min_start; ++ int service; ++ int budget; ++ int weight; ++ int new_weight; ++ int orig_weight; ++ struct bfq_entity *parent; ++ struct bfq_sched_data *my_sched_data; ++ struct bfq_sched_data *sched_data; ++ int prio_changed; ++}; ++ ++struct bfq_sched_data { ++ struct bfq_entity *in_service_entity; ++ struct bfq_entity *next_in_service; ++ struct bfq_service_tree service_tree[3]; ++ long unsigned int bfq_class_idle_last_service; ++}; ++ ++struct bfq_weight_counter { ++ unsigned int weight; ++ unsigned int num_active; ++ struct rb_node weights_node; ++}; ++ ++struct bfq_ttime { ++ u64 last_end_request; ++ u64 ttime_total; ++ long unsigned int ttime_samples; ++ u64 ttime_mean; ++}; ++ ++struct bfq_data; ++ ++struct bfq_io_cq; ++ ++struct bfq_queue { ++ int ref; ++ struct bfq_data *bfqd; ++ short unsigned int ioprio; ++ short unsigned int ioprio_class; ++ short unsigned int new_ioprio; ++ short unsigned int new_ioprio_class; ++ struct bfq_queue *new_bfqq; ++ struct rb_node pos_node; ++ struct rb_root *pos_root; ++ struct rb_root sort_list; ++ struct request *next_rq; ++ int queued[2]; ++ int allocated; ++ int meta_pending; ++ struct list_head fifo; ++ struct bfq_entity entity; ++ int max_budget; ++ long unsigned int budget_timeout; ++ int dispatched; ++ long unsigned int flags; ++ struct list_head bfqq_list; ++ struct bfq_ttime ttime; ++ u32 seek_history; ++ struct hlist_node burst_list_node; ++ sector_t last_request_pos; ++ unsigned int requests_within_timer; ++ pid_t pid; ++ struct bfq_io_cq *bic; ++ long unsigned int wr_cur_max_time; ++ long unsigned int soft_rt_next_start; ++ long unsigned int last_wr_start_finish; ++ unsigned int wr_coeff; ++ long unsigned int last_idle_bklogged; ++ long unsigned int service_from_backlogged; ++ long unsigned int service_from_wr; ++ long unsigned int wr_start_at_switch_to_srt; ++ long unsigned int split_time; ++ long unsigned int first_IO_time; ++ u32 max_service_rate; ++ unsigned int inject_coeff; ++ unsigned int injected_service; ++}; ++ ++struct bfq_group; ++ ++struct bfq_data { ++ struct request_queue *queue; ++ struct list_head dispatch; ++ struct bfq_group *root_group; ++ struct rb_root queue_weights_tree; ++ struct rb_root group_weights_tree; ++ int busy_queues; ++ int wr_busy_queues; ++ int queued; ++ int rq_in_driver; ++ int max_rq_in_driver; ++ int hw_tag_samples; ++ int hw_tag; ++ int budgets_assigned; ++ struct hrtimer idle_slice_timer; ++ struct bfq_queue *in_service_queue; ++ sector_t last_position; ++ sector_t in_serv_last_pos; ++ u64 last_completion; ++ u64 first_dispatch; ++ u64 last_dispatch; ++ ktime_t last_budget_start; ++ ktime_t last_idling_start; ++ int peak_rate_samples; ++ u32 sequential_samples; ++ u64 tot_sectors_dispatched; ++ u32 last_rq_max_size; ++ u64 delta_from_first; ++ u32 peak_rate; ++ int bfq_max_budget; ++ struct list_head active_list; ++ struct list_head idle_list; ++ u64 bfq_fifo_expire[2]; ++ unsigned int bfq_back_penalty; ++ unsigned int bfq_back_max; ++ u32 bfq_slice_idle; ++ int bfq_user_max_budget; ++ unsigned int bfq_timeout; ++ unsigned int bfq_requests_within_timer; ++ bool strict_guarantees; ++ long unsigned int last_ins_in_burst; ++ long unsigned int bfq_burst_interval; ++ int burst_size; ++ struct bfq_entity *burst_parent_entity; ++ long unsigned int bfq_large_burst_thresh; ++ bool large_burst; ++ struct hlist_head burst_list; ++ bool low_latency; ++ unsigned int bfq_wr_coeff; ++ unsigned int bfq_wr_max_time; ++ unsigned int bfq_wr_rt_max_time; ++ unsigned int bfq_wr_min_idle_time; ++ long unsigned int bfq_wr_min_inter_arr_async; ++ unsigned int bfq_wr_max_softrt_rate; ++ u64 rate_dur_prod; ++ struct bfq_queue oom_bfqq; ++ spinlock_t lock; ++ struct bfq_io_cq *bio_bic; ++ struct bfq_queue *bio_bfqq; ++ unsigned int word_depths[4]; ++}; ++ ++struct bfq_io_cq { ++ struct io_cq icq; ++ struct bfq_queue *bfqq[2]; ++ int ioprio; ++ uint64_t blkcg_serial_nr; ++ bool saved_has_short_ttime; ++ bool saved_IO_bound; ++ bool saved_in_large_burst; ++ bool was_in_burst_list; ++ long unsigned int saved_wr_coeff; ++ long unsigned int saved_last_wr_start_finish; ++ long unsigned int saved_wr_start_at_switch_to_srt; ++ unsigned int saved_wr_cur_max_time; ++ struct bfq_ttime saved_ttime; ++}; ++ ++struct bfqg_stats {}; ++ ++struct bfq_group { ++ struct blkg_policy_data pd; ++ char blkg_path[128]; ++ int ref; ++ struct bfq_entity entity; ++ struct bfq_sched_data sched_data; ++ void *bfqd; ++ struct bfq_queue *async_bfqq[16]; ++ struct bfq_queue *async_idle_bfqq; ++ struct bfq_entity *my_entity; ++ int active_entities; ++ struct rb_root rq_pos_tree; ++ struct bfqg_stats stats; ++}; ++ ++enum bfqq_state_flags { ++ BFQQF_just_created = 0, ++ BFQQF_busy = 1, ++ BFQQF_wait_request = 2, ++ BFQQF_non_blocking_wait_rq = 3, ++ BFQQF_fifo_expire = 4, ++ BFQQF_has_short_ttime = 5, ++ BFQQF_sync = 6, ++ BFQQF_IO_bound = 7, ++ BFQQF_in_large_burst = 8, ++ BFQQF_softrt_update = 9, ++ BFQQF_coop = 10, ++ BFQQF_split_coop = 11, ++}; ++ ++enum bfqq_expiration { ++ BFQQE_TOO_IDLE = 0, ++ BFQQE_BUDGET_TIMEOUT = 1, ++ BFQQE_BUDGET_EXHAUSTED = 2, ++ BFQQE_NO_MORE_REQUESTS = 3, ++ BFQQE_PREEMPTED = 4, ++}; ++ ++struct bfq_group_data { ++ struct blkcg_policy_data pd; ++ unsigned int weight; ++}; ++ ++struct cdrom_msf0 { ++ __u8 minute; ++ __u8 second; ++ __u8 frame; ++}; ++ ++union cdrom_addr { ++ struct cdrom_msf0 msf; ++ int lba; ++}; ++ ++struct cdrom_read_audio { ++ union cdrom_addr addr; ++ __u8 addr_format; ++ int nframes; ++ __u8 *buf; ++}; ++ ++struct compat_hd_geometry { ++ unsigned char heads; ++ unsigned char sectors; ++ short unsigned int cylinders; ++ u32 start; ++}; ++ ++struct compat_cdrom_read_audio { ++ union cdrom_addr addr; ++ u8 addr_format; ++ compat_int_t nframes; ++ compat_caddr_t buf; ++}; ++ ++struct compat_cdrom_generic_command { ++ unsigned char cmd[12]; ++ compat_caddr_t buffer; ++ compat_uint_t buflen; ++ compat_int_t stat; ++ compat_caddr_t sense; ++ unsigned char data_direction; ++ compat_int_t quiet; ++ compat_int_t timeout; ++ compat_caddr_t reserved[1]; ++}; ++ ++struct compat_blkpg_ioctl_arg { ++ compat_int_t op; ++ compat_int_t flags; ++ compat_int_t datalen; ++ compat_caddr_t data; ++}; ++ ++enum bip_flags { ++ BIP_BLOCK_INTEGRITY = 1, ++ BIP_MAPPED_INTEGRITY = 2, ++ BIP_CTRL_NOCHECK = 4, ++ BIP_DISK_NOCHECK = 8, ++ BIP_IP_CHECKSUM = 16, ++}; ++ ++enum blk_integrity_flags { ++ BLK_INTEGRITY_VERIFY = 1, ++ BLK_INTEGRITY_GENERATE = 2, ++ BLK_INTEGRITY_DEVICE_CAPABLE = 4, ++ BLK_INTEGRITY_IP_CHECKSUM = 8, ++}; ++ ++struct integrity_sysfs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct blk_integrity *, char *); ++ ssize_t (*store)(struct blk_integrity *, const char *, size_t); ++}; ++ ++enum t10_dif_type { ++ T10_PI_TYPE0_PROTECTION = 0, ++ T10_PI_TYPE1_PROTECTION = 1, ++ T10_PI_TYPE2_PROTECTION = 2, ++ T10_PI_TYPE3_PROTECTION = 3, ++}; ++ ++struct t10_pi_tuple { ++ __be16 guard_tag; ++ __be16 app_tag; ++ __be32 ref_tag; ++}; ++ ++typedef __be16 csum_fn(void *, unsigned int); ++ ++struct virtio_device_id { ++ __u32 device; ++ __u32 vendor; ++}; ++ ++typedef __u16 __virtio16; ++ ++typedef __u32 __virtio32; ++ ++typedef __u64 __virtio64; ++ ++struct vring_desc { ++ __virtio64 addr; ++ __virtio32 len; ++ __virtio16 flags; ++ __virtio16 next; ++}; ++ ++struct vring_avail { ++ __virtio16 flags; ++ __virtio16 idx; ++ __virtio16 ring[0]; ++}; ++ ++struct vring_used_elem { ++ __virtio32 id; ++ __virtio32 len; ++}; ++ ++struct vring_used { ++ __virtio16 flags; ++ __virtio16 idx; ++ struct vring_used_elem ring[0]; ++}; ++ ++struct vring { ++ unsigned int num; ++ struct vring_desc *desc; ++ struct vring_avail *avail; ++ struct vring_used *used; ++}; ++ ++struct vringh { ++ bool little_endian; ++ bool event_indices; ++ bool weak_barriers; ++ u16 last_avail_idx; ++ u16 last_used_idx; ++ u32 completed; ++ struct vring vring; ++ void (*notify)(struct vringh *); ++}; ++ ++struct virtio_device; ++ ++typedef void vrh_callback_t(struct virtio_device *, struct vringh *); ++ ++struct virtio_config_ops; ++ ++struct vringh_config_ops; ++ ++struct virtio_device { ++ int index; ++ bool failed; ++ bool config_enabled; ++ bool config_change_pending; ++ spinlock_t config_lock; ++ struct device dev; ++ struct virtio_device_id id; ++ const struct virtio_config_ops *config; ++ const struct vringh_config_ops *vringh_config; ++ struct list_head vqs; ++ u64 features; ++ void *priv; ++}; ++ ++struct vringh_config_ops { ++ int (*find_vrhs)(struct virtio_device *, unsigned int, struct vringh **, vrh_callback_t **); ++ void (*del_vrhs)(struct virtio_device *); ++}; ++ ++struct virtqueue { ++ struct list_head list; ++ void (*callback)(struct virtqueue *); ++ const char *name; ++ struct virtio_device *vdev; ++ unsigned int index; ++ unsigned int num_free; ++ void *priv; ++}; ++ ++typedef void vq_callback_t(struct virtqueue *); ++ ++struct irq_affinity___2; ++ ++struct virtio_config_ops { ++ void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); ++ void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); ++ u32 (*generation)(struct virtio_device *); ++ u8 (*get_status)(struct virtio_device *); ++ void (*set_status)(struct virtio_device *, u8); ++ void (*reset)(struct virtio_device *); ++ int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, vq_callback_t **, const char * const *, const bool *, struct irq_affinity___2 *); ++ void (*del_vqs)(struct virtio_device *); ++ u64 (*get_features)(struct virtio_device *); ++ int (*finalize_features)(struct virtio_device *); ++ const char * (*bus_name)(struct virtio_device *); ++ int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); ++ const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); ++}; ++ ++struct irq_poll; ++ ++typedef int irq_poll_fn(struct irq_poll *, int); ++ ++struct irq_poll { ++ struct list_head list; ++ long unsigned int state; ++ int weight; ++ irq_poll_fn *poll; ++}; ++ ++enum rdma_restrack_type { ++ RDMA_RESTRACK_PD = 0, ++ RDMA_RESTRACK_CQ = 1, ++ RDMA_RESTRACK_QP = 2, ++ RDMA_RESTRACK_CM_ID = 3, ++ RDMA_RESTRACK_MR = 4, ++ RDMA_RESTRACK_MAX = 5, ++}; ++ ++struct rdma_restrack_entry; ++ ++struct rdma_restrack_root { ++ struct rw_semaphore rwsem; ++ struct hlist_head hash[256]; ++ int (*fill_res_entry)(struct sk_buff *, struct rdma_restrack_entry *); ++}; ++ ++struct rdma_restrack_entry { ++ bool valid; ++ struct kref kref; ++ struct completion comp; ++ struct task_struct *task; ++ const char *kern_name; ++ struct hlist_node node; ++ enum rdma_restrack_type type; ++}; ++ ++enum rdma_driver_id { ++ RDMA_DRIVER_UNKNOWN = 0, ++ RDMA_DRIVER_MLX5 = 1, ++ RDMA_DRIVER_MLX4 = 2, ++ RDMA_DRIVER_CXGB3 = 3, ++ RDMA_DRIVER_CXGB4 = 4, ++ RDMA_DRIVER_MTHCA = 5, ++ RDMA_DRIVER_BNXT_RE = 6, ++ RDMA_DRIVER_OCRDMA = 7, ++ RDMA_DRIVER_NES = 8, ++ RDMA_DRIVER_I40IW = 9, ++ RDMA_DRIVER_VMW_PVRDMA = 10, ++ RDMA_DRIVER_QEDR = 11, ++ RDMA_DRIVER_HNS = 12, ++ RDMA_DRIVER_USNIC = 13, ++ RDMA_DRIVER_RXE = 14, ++ RDMA_DRIVER_HFI1 = 15, ++ RDMA_DRIVER_QIB = 16, ++}; ++ ++enum ib_uverbs_flow_action_esp_keymat { ++ IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM = 0, ++}; ++ ++struct ib_uverbs_flow_action_esp_keymat_aes_gcm { ++ __u64 iv; ++ __u32 iv_algo; ++ __u32 salt; ++ __u32 icv_len; ++ __u32 key_len; ++ __u32 aes_key[8]; ++}; ++ ++enum ib_uverbs_flow_action_esp_replay { ++ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE = 0, ++ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP = 1, ++}; ++ ++struct ib_uverbs_flow_action_esp_replay_bmp { ++ __u32 size; ++}; ++ ++enum ib_gid_type { ++ IB_GID_TYPE_IB = 0, ++ IB_GID_TYPE_ROCE = 0, ++ IB_GID_TYPE_ROCE_UDP_ENCAP = 1, ++ IB_GID_TYPE_SIZE = 2, ++}; ++ ++struct ib_device; ++ ++struct ib_gid_attr { ++ struct net_device *ndev; ++ struct ib_device *device; ++ union ib_gid gid; ++ enum ib_gid_type gid_type; ++ u16 index; ++ u8 port_num; ++}; ++ ++struct ib_event; ++ ++struct ib_event_handler { ++ struct ib_device *device; ++ void (*handler)(struct ib_event_handler *, struct ib_event *); ++ struct list_head list; ++}; ++ ++struct ib_port_cache; ++ ++struct ib_cache { ++ rwlock_t lock; ++ struct ib_event_handler event_handler; ++ struct ib_port_cache *ports; ++}; ++ ++struct iw_cm_verbs; ++ ++enum rdma_link_layer { ++ IB_LINK_LAYER_UNSPECIFIED = 0, ++ IB_LINK_LAYER_INFINIBAND = 1, ++ IB_LINK_LAYER_ETHERNET = 2, ++}; ++ ++enum ib_srq_attr_mask { ++ IB_SRQ_MAX_WR = 1, ++ IB_SRQ_LIMIT = 2, ++}; ++ ++enum ib_cq_notify_flags { ++ IB_CQ_SOLICITED = 1, ++ IB_CQ_NEXT_COMP = 2, ++ IB_CQ_SOLICITED_MASK = 3, ++ IB_CQ_REPORT_MISSED_EVENTS = 4, ++}; ++ ++enum ib_mr_type { ++ IB_MR_TYPE_MEM_REG = 0, ++ IB_MR_TYPE_SIGNATURE = 1, ++ IB_MR_TYPE_SG_GAPS = 2, ++}; ++ ++enum ib_mw_type { ++ IB_MW_TYPE_1 = 1, ++ IB_MW_TYPE_2 = 2, ++}; ++ ++struct ib_mad_hdr; ++ ++struct uverbs_attr_bundle; ++ ++enum rdma_netdev_t { ++ RDMA_NETDEV_OPA_VNIC = 0, ++ RDMA_NETDEV_IPOIB = 1, ++}; ++ ++enum ib_atomic_cap { ++ IB_ATOMIC_NONE = 0, ++ IB_ATOMIC_HCA = 1, ++ IB_ATOMIC_GLOB = 2, ++}; ++ ++struct ib_odp_caps { ++ uint64_t general_caps; ++ struct { ++ uint32_t rc_odp_caps; ++ uint32_t uc_odp_caps; ++ uint32_t ud_odp_caps; ++ } per_transport_caps; ++}; ++ ++struct ib_rss_caps { ++ u32 supported_qpts; ++ u32 max_rwq_indirection_tables; ++ u32 max_rwq_indirection_table_size; ++}; ++ ++struct ib_tm_caps { ++ u32 max_rndv_hdr_size; ++ u32 max_num_tags; ++ u32 flags; ++ u32 max_ops; ++ u32 max_sge; ++}; ++ ++struct ib_cq_caps { ++ u16 max_cq_moderation_count; ++ u16 max_cq_moderation_period; ++}; ++ ++struct ib_device_attr { ++ u64 fw_ver; ++ __be64 sys_image_guid; ++ u64 max_mr_size; ++ u64 page_size_cap; ++ u32 vendor_id; ++ u32 vendor_part_id; ++ u32 hw_ver; ++ int max_qp; ++ int max_qp_wr; ++ u64 device_cap_flags; ++ int max_send_sge; ++ int max_recv_sge; ++ int max_sge_rd; ++ int max_cq; ++ int max_cqe; ++ int max_mr; ++ int max_pd; ++ int max_qp_rd_atom; ++ int max_ee_rd_atom; ++ int max_res_rd_atom; ++ int max_qp_init_rd_atom; ++ int max_ee_init_rd_atom; ++ enum ib_atomic_cap atomic_cap; ++ enum ib_atomic_cap masked_atomic_cap; ++ int max_ee; ++ int max_rdd; ++ int max_mw; ++ int max_raw_ipv6_qp; ++ int max_raw_ethy_qp; ++ int max_mcast_grp; ++ int max_mcast_qp_attach; ++ int max_total_mcast_qp_attach; ++ int max_ah; ++ int max_fmr; ++ int max_map_per_fmr; ++ int max_srq; ++ int max_srq_wr; ++ int max_srq_sge; ++ unsigned int max_fast_reg_page_list_len; ++ u16 max_pkeys; ++ u8 local_ca_ack_delay; ++ int sig_prot_cap; ++ int sig_guard_cap; ++ struct ib_odp_caps odp_caps; ++ uint64_t timestamp_mask; ++ uint64_t hca_core_clock; ++ struct ib_rss_caps rss_caps; ++ u32 max_wq_type_rq; ++ u32 raw_packet_caps; ++ struct ib_tm_caps tm_caps; ++ struct ib_cq_caps cq_caps; ++ u64 max_dm_size; ++}; ++ ++struct uverbs_object_tree_def; ++ ++struct ib_port_immutable; ++ ++struct ib_port_pkey_list; ++ ++struct rdma_hw_stats; ++ ++struct ib_udata; ++ ++struct ib_port_attr; ++ ++struct ib_device_modify; ++ ++struct ib_port_modify; ++ ++struct ib_ucontext; ++ ++struct ib_pd; ++ ++struct ib_ah; ++ ++struct rdma_ah_attr; ++ ++struct ib_srq; ++ ++struct ib_srq_init_attr; ++ ++struct ib_srq_attr; ++ ++struct ib_recv_wr; ++ ++struct ib_qp; ++ ++struct ib_qp_init_attr; ++ ++struct ib_qp_attr; ++ ++struct ib_send_wr; ++ ++struct ib_cq; ++ ++struct ib_cq_init_attr; ++ ++struct ib_wc; ++ ++struct ib_mr; ++ ++struct ib_mw; ++ ++struct ib_fmr; ++ ++struct ib_fmr_attr; ++ ++struct ib_grh; ++ ++struct ib_xrcd; ++ ++struct ib_flow; ++ ++struct ib_flow_attr; ++ ++struct ib_mr_status; ++ ++struct ib_wq; ++ ++struct ib_wq_init_attr; ++ ++struct ib_wq_attr; ++ ++struct ib_rwq_ind_table; ++ ++struct ib_rwq_ind_table_init_attr; ++ ++struct ib_flow_action; ++ ++struct ib_flow_action_attrs_esp; ++ ++struct ib_dm; ++ ++struct ib_dm_alloc_attr; ++ ++struct ib_dm_mr_attr; ++ ++struct ib_counters; ++ ++struct ib_counters_read_attr; ++ ++struct ib_device { ++ struct device *dma_device; ++ char name[64]; ++ struct list_head event_handler_list; ++ spinlock_t event_handler_lock; ++ spinlock_t client_data_lock; ++ struct list_head core_list; ++ struct list_head client_data_list; ++ struct ib_cache cache; ++ struct ib_port_immutable *port_immutable; ++ int num_comp_vectors; ++ struct ib_port_pkey_list *port_pkey_list; ++ struct iw_cm_verbs *iwcm; ++ struct rdma_hw_stats * (*alloc_hw_stats)(struct ib_device *, u8); ++ int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u8, int); ++ int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *); ++ int (*query_port)(struct ib_device *, u8, struct ib_port_attr *); ++ enum rdma_link_layer (*get_link_layer)(struct ib_device *, u8); ++ struct net_device * (*get_netdev)(struct ib_device *, u8); ++ int (*query_gid)(struct ib_device *, u8, int, union ib_gid *); ++ int (*add_gid)(const struct ib_gid_attr *, void **); ++ int (*del_gid)(const struct ib_gid_attr *, void **); ++ int (*query_pkey)(struct ib_device *, u8, u16, u16 *); ++ int (*modify_device)(struct ib_device *, int, struct ib_device_modify *); ++ int (*modify_port)(struct ib_device *, u8, int, struct ib_port_modify *); ++ struct ib_ucontext * (*alloc_ucontext)(struct ib_device *, struct ib_udata *); ++ int (*dealloc_ucontext)(struct ib_ucontext *); ++ int (*mmap)(struct ib_ucontext *, struct vm_area_struct *); ++ struct ib_pd * (*alloc_pd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *); ++ int (*dealloc_pd)(struct ib_pd *); ++ struct ib_ah * (*create_ah)(struct ib_pd *, struct rdma_ah_attr *, struct ib_udata *); ++ int (*modify_ah)(struct ib_ah *, struct rdma_ah_attr *); ++ int (*query_ah)(struct ib_ah *, struct rdma_ah_attr *); ++ int (*destroy_ah)(struct ib_ah *); ++ struct ib_srq * (*create_srq)(struct ib_pd *, struct ib_srq_init_attr *, struct ib_udata *); ++ int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask, struct ib_udata *); ++ int (*query_srq)(struct ib_srq *, struct ib_srq_attr *); ++ int (*destroy_srq)(struct ib_srq *); ++ int (*post_srq_recv)(struct ib_srq *, const struct ib_recv_wr *, const struct ib_recv_wr **); ++ struct ib_qp * (*create_qp)(struct ib_pd *, struct ib_qp_init_attr *, struct ib_udata *); ++ int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); ++ int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *); ++ int (*destroy_qp)(struct ib_qp *); ++ int (*post_send)(struct ib_qp *, const struct ib_send_wr *, const struct ib_send_wr **); ++ int (*post_recv)(struct ib_qp *, const struct ib_recv_wr *, const struct ib_recv_wr **); ++ struct ib_cq * (*create_cq)(struct ib_device *, const struct ib_cq_init_attr *, struct ib_ucontext *, struct ib_udata *); ++ int (*modify_cq)(struct ib_cq *, u16, u16); ++ int (*destroy_cq)(struct ib_cq *); ++ int (*resize_cq)(struct ib_cq *, int, struct ib_udata *); ++ int (*poll_cq)(struct ib_cq *, int, struct ib_wc *); ++ int (*peek_cq)(struct ib_cq *, int); ++ int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags); ++ int (*req_ncomp_notif)(struct ib_cq *, int); ++ struct ib_mr * (*get_dma_mr)(struct ib_pd *, int); ++ struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64, u64, u64, int, struct ib_udata *); ++ int (*rereg_user_mr)(struct ib_mr *, int, u64, u64, u64, int, struct ib_pd *, struct ib_udata *); ++ int (*dereg_mr)(struct ib_mr *); ++ struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type, u32); ++ int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *); ++ struct ib_mw * (*alloc_mw)(struct ib_pd *, enum ib_mw_type, struct ib_udata *); ++ int (*dealloc_mw)(struct ib_mw *); ++ struct ib_fmr * (*alloc_fmr)(struct ib_pd *, int, struct ib_fmr_attr *); ++ int (*map_phys_fmr)(struct ib_fmr *, u64 *, int, u64); ++ int (*unmap_fmr)(struct list_head *); ++ int (*dealloc_fmr)(struct ib_fmr *); ++ int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16); ++ int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16); ++ int (*process_mad)(struct ib_device *, int, u8, const struct ib_wc *, const struct ib_grh *, const struct ib_mad_hdr *, size_t, struct ib_mad_hdr *, size_t *, u16 *); ++ struct ib_xrcd * (*alloc_xrcd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *); ++ int (*dealloc_xrcd)(struct ib_xrcd *); ++ struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, int, struct ib_udata *); ++ int (*destroy_flow)(struct ib_flow *); ++ int (*check_mr_status)(struct ib_mr *, u32, struct ib_mr_status *); ++ void (*disassociate_ucontext)(struct ib_ucontext *); ++ void (*drain_rq)(struct ib_qp *); ++ void (*drain_sq)(struct ib_qp *); ++ int (*set_vf_link_state)(struct ib_device *, int, u8, int); ++ int (*get_vf_config)(struct ib_device *, int, u8, struct ifla_vf_info *); ++ int (*get_vf_stats)(struct ib_device *, int, u8, struct ifla_vf_stats *); ++ int (*set_vf_guid)(struct ib_device *, int, u8, u64, int); ++ struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *); ++ int (*destroy_wq)(struct ib_wq *); ++ int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32, struct ib_udata *); ++ struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *); ++ int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *); ++ struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *); ++ int (*destroy_flow_action)(struct ib_flow_action *); ++ int (*modify_flow_action_esp)(struct ib_flow_action *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *); ++ struct ib_dm * (*alloc_dm)(struct ib_device *, struct ib_ucontext *, struct ib_dm_alloc_attr *, struct uverbs_attr_bundle *); ++ int (*dealloc_dm)(struct ib_dm *); ++ struct ib_mr * (*reg_dm_mr)(struct ib_pd *, struct ib_dm *, struct ib_dm_mr_attr *, struct uverbs_attr_bundle *); ++ struct ib_counters * (*create_counters)(struct ib_device *, struct uverbs_attr_bundle *); ++ int (*destroy_counters)(struct ib_counters *); ++ int (*read_counters)(struct ib_counters *, struct ib_counters_read_attr *, struct uverbs_attr_bundle *); ++ struct net_device * (*alloc_rdma_netdev)(struct ib_device *, u8, enum rdma_netdev_t, const char *, unsigned char, void (*)(struct net_device *)); ++ struct module *owner; ++ struct device dev; ++ struct kobject *ports_parent; ++ struct list_head port_list; ++ enum { ++ IB_DEV_UNINITIALIZED = 0, ++ IB_DEV_REGISTERED = 1, ++ IB_DEV_UNREGISTERED = 2, ++ } reg_state; ++ int uverbs_abi_ver; ++ u64 uverbs_cmd_mask; ++ u64 uverbs_ex_cmd_mask; ++ char node_desc[64]; ++ __be64 node_guid; ++ u32 local_dma_lkey; ++ u16 is_switch: 1; ++ u8 node_type; ++ u8 phys_port_cnt; ++ struct ib_device_attr attrs; ++ struct attribute_group *hw_stats_ag; ++ struct rdma_hw_stats *hw_stats; ++ struct rdmacg_device cg_device; ++ u32 index; ++ struct rdma_restrack_root res; ++ int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); ++ void (*get_dev_fw_str)(struct ib_device *, char *); ++ const struct cpumask * (*get_vector_affinity)(struct ib_device *, int); ++ const struct uverbs_object_tree_def * const *driver_specs; ++ enum rdma_driver_id driver_id; ++ refcount_t refcount; ++ struct completion unreg_completion; ++}; ++ ++struct ib_cq_init_attr { ++ unsigned int cqe; ++ u32 comp_vector; ++ u32 flags; ++}; ++ ++struct ib_dm_mr_attr { ++ u64 length; ++ u64 offset; ++ u32 access_flags; ++}; ++ ++struct ib_dm_alloc_attr { ++ u64 length; ++ u32 alignment; ++ u32 flags; ++}; ++ ++enum ib_mtu { ++ IB_MTU_256 = 1, ++ IB_MTU_512 = 2, ++ IB_MTU_1024 = 3, ++ IB_MTU_2048 = 4, ++ IB_MTU_4096 = 5, ++}; ++ ++enum ib_port_state { ++ IB_PORT_NOP = 0, ++ IB_PORT_DOWN = 1, ++ IB_PORT_INIT = 2, ++ IB_PORT_ARMED = 3, ++ IB_PORT_ACTIVE = 4, ++ IB_PORT_ACTIVE_DEFER = 5, ++}; ++ ++struct rdma_hw_stats { ++ struct mutex lock; ++ long unsigned int timestamp; ++ long unsigned int lifespan; ++ const char * const *names; ++ int num_counters; ++ u64 value[0]; ++}; ++ ++struct ib_port_attr { ++ u64 subnet_prefix; ++ enum ib_port_state state; ++ enum ib_mtu max_mtu; ++ enum ib_mtu active_mtu; ++ int gid_tbl_len; ++ unsigned int ip_gids: 1; ++ u32 port_cap_flags; ++ u32 max_msg_sz; ++ u32 bad_pkey_cntr; ++ u32 qkey_viol_cntr; ++ u16 pkey_tbl_len; ++ u32 sm_lid; ++ u32 lid; ++ u8 lmc; ++ u8 max_vl_num; ++ u8 sm_sl; ++ u8 subnet_timeout; ++ u8 init_type_reply; ++ u8 active_width; ++ u8 active_speed; ++ u8 phys_state; ++}; ++ ++struct ib_device_modify { ++ u64 sys_image_guid; ++ char node_desc[64]; ++}; ++ ++struct ib_port_modify { ++ u32 set_port_cap_mask; ++ u32 clr_port_cap_mask; ++ u8 init_type; ++}; ++ ++enum ib_event_type { ++ IB_EVENT_CQ_ERR = 0, ++ IB_EVENT_QP_FATAL = 1, ++ IB_EVENT_QP_REQ_ERR = 2, ++ IB_EVENT_QP_ACCESS_ERR = 3, ++ IB_EVENT_COMM_EST = 4, ++ IB_EVENT_SQ_DRAINED = 5, ++ IB_EVENT_PATH_MIG = 6, ++ IB_EVENT_PATH_MIG_ERR = 7, ++ IB_EVENT_DEVICE_FATAL = 8, ++ IB_EVENT_PORT_ACTIVE = 9, ++ IB_EVENT_PORT_ERR = 10, ++ IB_EVENT_LID_CHANGE = 11, ++ IB_EVENT_PKEY_CHANGE = 12, ++ IB_EVENT_SM_CHANGE = 13, ++ IB_EVENT_SRQ_ERR = 14, ++ IB_EVENT_SRQ_LIMIT_REACHED = 15, ++ IB_EVENT_QP_LAST_WQE_REACHED = 16, ++ IB_EVENT_CLIENT_REREGISTER = 17, ++ IB_EVENT_GID_CHANGE = 18, ++ IB_EVENT_WQ_FATAL = 19, ++}; ++ ++typedef void (*ib_comp_handler)(struct ib_cq *, void *); ++ ++enum ib_poll_context { ++ IB_POLL_DIRECT = 0, ++ IB_POLL_SOFTIRQ = 1, ++ IB_POLL_WORKQUEUE = 2, ++ IB_POLL_UNBOUND_WORKQUEUE = 3, ++}; ++ ++struct ib_uobject; ++ ++struct ib_cq { ++ struct ib_device *device; ++ struct ib_uobject *uobject; ++ ib_comp_handler comp_handler; ++ void (*event_handler)(struct ib_event *, void *); ++ void *cq_context; ++ int cqe; ++ atomic_t usecnt; ++ enum ib_poll_context poll_ctx; ++ struct ib_wc *wc; ++ union { ++ struct irq_poll iop; ++ struct work_struct work; ++ }; ++ struct workqueue_struct *comp_wq; ++ struct rdma_restrack_entry res; ++}; ++ ++enum ib_qp_type { ++ IB_QPT_SMI = 0, ++ IB_QPT_GSI = 1, ++ IB_QPT_RC = 2, ++ IB_QPT_UC = 3, ++ IB_QPT_UD = 4, ++ IB_QPT_RAW_IPV6 = 5, ++ IB_QPT_RAW_ETHERTYPE = 6, ++ IB_QPT_RAW_PACKET = 8, ++ IB_QPT_XRC_INI = 9, ++ IB_QPT_XRC_TGT = 10, ++ IB_QPT_MAX = 11, ++ IB_QPT_DRIVER = 255, ++ IB_QPT_RESERVED1 = 4096, ++ IB_QPT_RESERVED2 = 4097, ++ IB_QPT_RESERVED3 = 4098, ++ IB_QPT_RESERVED4 = 4099, ++ IB_QPT_RESERVED5 = 4100, ++ IB_QPT_RESERVED6 = 4101, ++ IB_QPT_RESERVED7 = 4102, ++ IB_QPT_RESERVED8 = 4103, ++ IB_QPT_RESERVED9 = 4104, ++ IB_QPT_RESERVED10 = 4105, ++}; ++ ++struct ib_qp_security; ++ ++struct ib_qp { ++ struct ib_device *device; ++ struct ib_pd *pd; ++ struct ib_cq *send_cq; ++ struct ib_cq *recv_cq; ++ spinlock_t mr_lock; ++ int mrs_used; ++ struct list_head rdma_mrs; ++ struct list_head sig_mrs; ++ struct ib_srq *srq; ++ struct ib_xrcd *xrcd; ++ struct list_head xrcd_list; ++ atomic_t usecnt; ++ struct list_head open_list; ++ struct ib_qp *real_qp; ++ struct ib_uobject *uobject; ++ void (*event_handler)(struct ib_event *, void *); ++ void *qp_context; ++ const struct ib_gid_attr *av_sgid_attr; ++ const struct ib_gid_attr *alt_path_sgid_attr; ++ u32 qp_num; ++ u32 max_write_sge; ++ u32 max_read_sge; ++ enum ib_qp_type qp_type; ++ struct ib_rwq_ind_table *rwq_ind_tbl; ++ struct ib_qp_security *qp_sec; ++ u8 port; ++ struct rdma_restrack_entry res; ++}; ++ ++enum ib_srq_type { ++ IB_SRQT_BASIC = 0, ++ IB_SRQT_XRC = 1, ++ IB_SRQT_TM = 2, ++}; ++ ++struct ib_srq { ++ struct ib_device *device; ++ struct ib_pd *pd; ++ struct ib_uobject *uobject; ++ void (*event_handler)(struct ib_event *, void *); ++ void *srq_context; ++ enum ib_srq_type srq_type; ++ atomic_t usecnt; ++ struct { ++ struct ib_cq *cq; ++ union { ++ struct { ++ struct ib_xrcd *xrcd; ++ u32 srq_num; ++ } xrc; ++ }; ++ } ext; ++}; ++ ++enum ib_wq_state { ++ IB_WQS_RESET = 0, ++ IB_WQS_RDY = 1, ++ IB_WQS_ERR = 2, ++}; ++ ++enum ib_wq_type { ++ IB_WQT_RQ = 0, ++}; ++ ++struct ib_wq { ++ struct ib_device *device; ++ struct ib_uobject *uobject; ++ void *wq_context; ++ void (*event_handler)(struct ib_event *, void *); ++ struct ib_pd *pd; ++ struct ib_cq *cq; ++ u32 wq_num; ++ enum ib_wq_state state; ++ enum ib_wq_type wq_type; ++ atomic_t usecnt; ++}; ++ ++struct ib_event { ++ struct ib_device *device; ++ union { ++ struct ib_cq *cq; ++ struct ib_qp *qp; ++ struct ib_srq *srq; ++ struct ib_wq *wq; ++ u8 port_num; ++ } element; ++ enum ib_event_type event; ++}; ++ ++struct ib_global_route { ++ const struct ib_gid_attr *sgid_attr; ++ union ib_gid dgid; ++ u32 flow_label; ++ u8 sgid_index; ++ u8 hop_limit; ++ u8 traffic_class; ++}; ++ ++struct ib_grh { ++ __be32 version_tclass_flow; ++ __be16 paylen; ++ u8 next_hdr; ++ u8 hop_limit; ++ union ib_gid sgid; ++ union ib_gid dgid; ++}; ++ ++enum ib_sig_err_type { ++ IB_SIG_BAD_GUARD = 0, ++ IB_SIG_BAD_REFTAG = 1, ++ IB_SIG_BAD_APPTAG = 2, ++}; ++ ++struct ib_sig_err { ++ enum ib_sig_err_type err_type; ++ u32 expected; ++ u32 actual; ++ u64 sig_err_offset; ++ u32 key; ++}; ++ ++struct ib_mr_status { ++ u32 fail_status; ++ struct ib_sig_err sig_err; ++}; ++ ++enum rdma_ah_attr_type { ++ RDMA_AH_ATTR_TYPE_UNDEFINED = 0, ++ RDMA_AH_ATTR_TYPE_IB = 1, ++ RDMA_AH_ATTR_TYPE_ROCE = 2, ++ RDMA_AH_ATTR_TYPE_OPA = 3, ++}; ++ ++struct ib_ah_attr { ++ u16 dlid; ++ u8 src_path_bits; ++}; ++ ++struct roce_ah_attr { ++ u8 dmac[6]; ++}; ++ ++struct opa_ah_attr { ++ u32 dlid; ++ u8 src_path_bits; ++ bool make_grd; ++}; ++ ++struct rdma_ah_attr { ++ struct ib_global_route grh; ++ u8 sl; ++ u8 static_rate; ++ u8 port_num; ++ u8 ah_flags; ++ enum rdma_ah_attr_type type; ++ union { ++ struct ib_ah_attr ib; ++ struct roce_ah_attr roce; ++ struct opa_ah_attr opa; ++ }; ++}; ++ ++enum ib_wc_status { ++ IB_WC_SUCCESS = 0, ++ IB_WC_LOC_LEN_ERR = 1, ++ IB_WC_LOC_QP_OP_ERR = 2, ++ IB_WC_LOC_EEC_OP_ERR = 3, ++ IB_WC_LOC_PROT_ERR = 4, ++ IB_WC_WR_FLUSH_ERR = 5, ++ IB_WC_MW_BIND_ERR = 6, ++ IB_WC_BAD_RESP_ERR = 7, ++ IB_WC_LOC_ACCESS_ERR = 8, ++ IB_WC_REM_INV_REQ_ERR = 9, ++ IB_WC_REM_ACCESS_ERR = 10, ++ IB_WC_REM_OP_ERR = 11, ++ IB_WC_RETRY_EXC_ERR = 12, ++ IB_WC_RNR_RETRY_EXC_ERR = 13, ++ IB_WC_LOC_RDD_VIOL_ERR = 14, ++ IB_WC_REM_INV_RD_REQ_ERR = 15, ++ IB_WC_REM_ABORT_ERR = 16, ++ IB_WC_INV_EECN_ERR = 17, ++ IB_WC_INV_EEC_STATE_ERR = 18, ++ IB_WC_FATAL_ERR = 19, ++ IB_WC_RESP_TIMEOUT_ERR = 20, ++ IB_WC_GENERAL_ERR = 21, ++}; ++ ++enum ib_wc_opcode { ++ IB_WC_SEND = 0, ++ IB_WC_RDMA_WRITE = 1, ++ IB_WC_RDMA_READ = 2, ++ IB_WC_COMP_SWAP = 3, ++ IB_WC_FETCH_ADD = 4, ++ IB_WC_LSO = 5, ++ IB_WC_LOCAL_INV = 6, ++ IB_WC_REG_MR = 7, ++ IB_WC_MASKED_COMP_SWAP = 8, ++ IB_WC_MASKED_FETCH_ADD = 9, ++ IB_WC_RECV = 128, ++ IB_WC_RECV_RDMA_WITH_IMM = 129, ++}; ++ ++struct ib_cqe { ++ void (*done)(struct ib_cq *, struct ib_wc *); ++}; ++ ++struct ib_wc { ++ union { ++ u64 wr_id; ++ struct ib_cqe *wr_cqe; ++ }; ++ enum ib_wc_status status; ++ enum ib_wc_opcode opcode; ++ u32 vendor_err; ++ u32 byte_len; ++ struct ib_qp *qp; ++ union { ++ __be32 imm_data; ++ u32 invalidate_rkey; ++ } ex; ++ u32 src_qp; ++ u32 slid; ++ int wc_flags; ++ u16 pkey_index; ++ u8 sl; ++ u8 dlid_path_bits; ++ u8 port_num; ++ u8 smac[6]; ++ u16 vlan_id; ++ u8 network_hdr_type; ++}; ++ ++struct ib_srq_attr { ++ u32 max_wr; ++ u32 max_sge; ++ u32 srq_limit; ++}; ++ ++struct ib_xrcd { ++ struct ib_device *device; ++ atomic_t usecnt; ++ struct inode *inode; ++ struct mutex tgt_qp_mutex; ++ struct list_head tgt_qp_list; ++}; ++ ++struct ib_srq_init_attr { ++ void (*event_handler)(struct ib_event *, void *); ++ void *srq_context; ++ struct ib_srq_attr attr; ++ enum ib_srq_type srq_type; ++ struct { ++ struct ib_cq *cq; ++ union { ++ struct { ++ struct ib_xrcd *xrcd; ++ } xrc; ++ struct { ++ u32 max_num_tags; ++ } tag_matching; ++ }; ++ } ext; ++}; ++ ++struct ib_qp_cap { ++ u32 max_send_wr; ++ u32 max_recv_wr; ++ u32 max_send_sge; ++ u32 max_recv_sge; ++ u32 max_inline_data; ++ u32 max_rdma_ctxs; ++}; ++ ++enum ib_sig_type { ++ IB_SIGNAL_ALL_WR = 0, ++ IB_SIGNAL_REQ_WR = 1, ++}; ++ ++struct ib_qp_init_attr { ++ void (*event_handler)(struct ib_event *, void *); ++ void *qp_context; ++ struct ib_cq *send_cq; ++ struct ib_cq *recv_cq; ++ struct ib_srq *srq; ++ struct ib_xrcd *xrcd; ++ struct ib_qp_cap cap; ++ enum ib_sig_type sq_sig_type; ++ enum ib_qp_type qp_type; ++ u32 create_flags; ++ u8 port_num; ++ struct ib_rwq_ind_table *rwq_ind_tbl; ++ u32 source_qpn; ++}; ++ ++struct ib_rwq_ind_table { ++ struct ib_device *device; ++ struct ib_uobject *uobject; ++ atomic_t usecnt; ++ u32 ind_tbl_num; ++ u32 log_ind_tbl_size; ++ struct ib_wq **ind_tbl; ++}; ++ ++enum ib_qp_state { ++ IB_QPS_RESET = 0, ++ IB_QPS_INIT = 1, ++ IB_QPS_RTR = 2, ++ IB_QPS_RTS = 3, ++ IB_QPS_SQD = 4, ++ IB_QPS_SQE = 5, ++ IB_QPS_ERR = 6, ++}; ++ ++enum ib_mig_state { ++ IB_MIG_MIGRATED = 0, ++ IB_MIG_REARM = 1, ++ IB_MIG_ARMED = 2, ++}; ++ ++struct ib_qp_attr { ++ enum ib_qp_state qp_state; ++ enum ib_qp_state cur_qp_state; ++ enum ib_mtu path_mtu; ++ enum ib_mig_state path_mig_state; ++ u32 qkey; ++ u32 rq_psn; ++ u32 sq_psn; ++ u32 dest_qp_num; ++ int qp_access_flags; ++ struct ib_qp_cap cap; ++ struct rdma_ah_attr ah_attr; ++ struct rdma_ah_attr alt_ah_attr; ++ u16 pkey_index; ++ u16 alt_pkey_index; ++ u8 en_sqd_async_notify; ++ u8 sq_draining; ++ u8 max_rd_atomic; ++ u8 max_dest_rd_atomic; ++ u8 min_rnr_timer; ++ u8 port_num; ++ u8 timeout; ++ u8 retry_cnt; ++ u8 rnr_retry; ++ u8 alt_port_num; ++ u8 alt_timeout; ++ u32 rate_limit; ++}; ++ ++enum ib_wr_opcode { ++ IB_WR_RDMA_WRITE = 0, ++ IB_WR_RDMA_WRITE_WITH_IMM = 1, ++ IB_WR_SEND = 2, ++ IB_WR_SEND_WITH_IMM = 3, ++ IB_WR_RDMA_READ = 4, ++ IB_WR_ATOMIC_CMP_AND_SWP = 5, ++ IB_WR_ATOMIC_FETCH_AND_ADD = 6, ++ IB_WR_LSO = 10, ++ IB_WR_SEND_WITH_INV = 9, ++ IB_WR_RDMA_READ_WITH_INV = 11, ++ IB_WR_LOCAL_INV = 7, ++ IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, ++ IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, ++ IB_WR_REG_MR = 32, ++ IB_WR_REG_SIG_MR = 33, ++ IB_WR_RESERVED1 = 240, ++ IB_WR_RESERVED2 = 241, ++ IB_WR_RESERVED3 = 242, ++ IB_WR_RESERVED4 = 243, ++ IB_WR_RESERVED5 = 244, ++ IB_WR_RESERVED6 = 245, ++ IB_WR_RESERVED7 = 246, ++ IB_WR_RESERVED8 = 247, ++ IB_WR_RESERVED9 = 248, ++ IB_WR_RESERVED10 = 249, ++}; ++ ++struct ib_sge { ++ u64 addr; ++ u32 length; ++ u32 lkey; ++}; ++ ++struct ib_send_wr { ++ struct ib_send_wr *next; ++ union { ++ u64 wr_id; ++ struct ib_cqe *wr_cqe; ++ }; ++ struct ib_sge *sg_list; ++ int num_sge; ++ enum ib_wr_opcode opcode; ++ int send_flags; ++ union { ++ __be32 imm_data; ++ u32 invalidate_rkey; ++ } ex; ++}; ++ ++struct ib_ah { ++ struct ib_device *device; ++ struct ib_pd *pd; ++ struct ib_uobject *uobject; ++ const struct ib_gid_attr *sgid_attr; ++ enum rdma_ah_attr_type type; ++}; ++ ++struct ib_mr { ++ struct ib_device *device; ++ struct ib_pd *pd; ++ u32 lkey; ++ u32 rkey; ++ u64 iova; ++ u64 length; ++ unsigned int page_size; ++ bool need_inval; ++ union { ++ struct ib_uobject *uobject; ++ struct list_head qp_entry; ++ }; ++ struct ib_dm *dm; ++ struct rdma_restrack_entry res; ++}; ++ ++struct ib_recv_wr { ++ struct ib_recv_wr *next; ++ union { ++ u64 wr_id; ++ struct ib_cqe *wr_cqe; ++ }; ++ struct ib_sge *sg_list; ++ int num_sge; ++}; ++ ++struct ib_fmr_attr { ++ int max_pages; ++ int max_maps; ++ u8 page_shift; ++}; ++ ++struct ib_rdmacg_object { ++ struct rdma_cgroup *cg; ++}; ++ ++struct ib_uverbs_file; ++ ++struct ib_umem; ++ ++struct ib_ucontext { ++ struct ib_device *device; ++ struct ib_uverbs_file *ufile; ++ int closing; ++ bool cleanup_retryable; ++ struct pid *tgid; ++ struct rb_root_cached umem_tree; ++ struct rw_semaphore umem_rwsem; ++ void (*invalidate_range)(struct ib_umem *, long unsigned int, long unsigned int); ++ struct mmu_notifier mn; ++ atomic_t notifier_count; ++ struct list_head no_private_counters; ++ int odp_mrs_count; ++ struct ib_rdmacg_object cg_obj; ++}; ++ ++struct uverbs_api_object; ++ ++struct ib_uobject { ++ u64 user_handle; ++ struct ib_uverbs_file *ufile; ++ struct ib_ucontext *context; ++ void *object; ++ struct list_head list; ++ struct ib_rdmacg_object cg_obj; ++ int id; ++ struct kref ref; ++ atomic_t usecnt; ++ struct callback_head rcu; ++ const struct uverbs_api_object *uapi_object; ++}; ++ ++struct ib_udata { ++ const void *inbuf; ++ void *outbuf; ++ size_t inlen; ++ size_t outlen; ++}; ++ ++struct ib_pd { ++ u32 local_dma_lkey; ++ u32 flags; ++ struct ib_device *device; ++ struct ib_uobject *uobject; ++ atomic_t usecnt; ++ u32 unsafe_global_rkey; ++ struct ib_mr *__internal_mr; ++ struct rdma_restrack_entry res; ++}; ++ ++struct ib_wq_init_attr { ++ void *wq_context; ++ enum ib_wq_type wq_type; ++ u32 max_wr; ++ u32 max_sge; ++ struct ib_cq *cq; ++ void (*event_handler)(struct ib_event *, void *); ++ u32 create_flags; ++}; ++ ++struct ib_wq_attr { ++ enum ib_wq_state wq_state; ++ enum ib_wq_state curr_wq_state; ++ u32 flags; ++ u32 flags_mask; ++}; ++ ++struct ib_rwq_ind_table_init_attr { ++ u32 log_ind_tbl_size; ++ struct ib_wq **ind_tbl; ++}; ++ ++enum port_pkey_state { ++ IB_PORT_PKEY_NOT_VALID = 0, ++ IB_PORT_PKEY_VALID = 1, ++ IB_PORT_PKEY_LISTED = 2, ++}; ++ ++struct ib_port_pkey { ++ enum port_pkey_state state; ++ u16 pkey_index; ++ u8 port_num; ++ struct list_head qp_list; ++ struct list_head to_error_list; ++ struct ib_qp_security *sec; ++}; ++ ++struct ib_ports_pkeys; ++ ++struct ib_qp_security { ++ struct ib_qp *qp; ++ struct ib_device *dev; ++ struct mutex mutex; ++ struct ib_ports_pkeys *ports_pkeys; ++ struct list_head shared_qp_list; ++ void *security; ++ bool destroying; ++ atomic_t error_list_count; ++ struct completion error_complete; ++ int error_comps_pending; ++}; ++ ++struct ib_ports_pkeys { ++ struct ib_port_pkey main; ++ struct ib_port_pkey alt; ++}; ++ ++struct ib_dm { ++ struct ib_device *device; ++ u32 length; ++ u32 flags; ++ struct ib_uobject *uobject; ++ atomic_t usecnt; ++}; ++ ++struct ib_mw { ++ struct ib_device *device; ++ struct ib_pd *pd; ++ struct ib_uobject *uobject; ++ u32 rkey; ++ enum ib_mw_type type; ++}; ++ ++struct ib_fmr { ++ struct ib_device *device; ++ struct ib_pd *pd; ++ struct list_head list; ++ u32 lkey; ++ u32 rkey; ++}; ++ ++enum ib_flow_attr_type { ++ IB_FLOW_ATTR_NORMAL = 0, ++ IB_FLOW_ATTR_ALL_DEFAULT = 1, ++ IB_FLOW_ATTR_MC_DEFAULT = 2, ++ IB_FLOW_ATTR_SNIFFER = 3, ++}; ++ ++enum ib_flow_spec_type { ++ IB_FLOW_SPEC_ETH = 32, ++ IB_FLOW_SPEC_IB = 34, ++ IB_FLOW_SPEC_IPV4 = 48, ++ IB_FLOW_SPEC_IPV6 = 49, ++ IB_FLOW_SPEC_ESP = 52, ++ IB_FLOW_SPEC_TCP = 64, ++ IB_FLOW_SPEC_UDP = 65, ++ IB_FLOW_SPEC_VXLAN_TUNNEL = 80, ++ IB_FLOW_SPEC_GRE = 81, ++ IB_FLOW_SPEC_MPLS = 96, ++ IB_FLOW_SPEC_INNER = 256, ++ IB_FLOW_SPEC_ACTION_TAG = 4096, ++ IB_FLOW_SPEC_ACTION_DROP = 4097, ++ IB_FLOW_SPEC_ACTION_HANDLE = 4098, ++ IB_FLOW_SPEC_ACTION_COUNT = 4099, ++}; ++ ++struct ib_flow_eth_filter { ++ u8 dst_mac[6]; ++ u8 src_mac[6]; ++ __be16 ether_type; ++ __be16 vlan_tag; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_eth { ++ u32 type; ++ u16 size; ++ struct ib_flow_eth_filter val; ++ struct ib_flow_eth_filter mask; ++}; ++ ++struct ib_flow_ib_filter { ++ __be16 dlid; ++ __u8 sl; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_ib { ++ u32 type; ++ u16 size; ++ struct ib_flow_ib_filter val; ++ struct ib_flow_ib_filter mask; ++}; ++ ++struct ib_flow_ipv4_filter { ++ __be32 src_ip; ++ __be32 dst_ip; ++ u8 proto; ++ u8 tos; ++ u8 ttl; ++ u8 flags; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_ipv4 { ++ u32 type; ++ u16 size; ++ struct ib_flow_ipv4_filter val; ++ struct ib_flow_ipv4_filter mask; ++}; ++ ++struct ib_flow_ipv6_filter { ++ u8 src_ip[16]; ++ u8 dst_ip[16]; ++ __be32 flow_label; ++ u8 next_hdr; ++ u8 traffic_class; ++ u8 hop_limit; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_ipv6 { ++ u32 type; ++ u16 size; ++ struct ib_flow_ipv6_filter val; ++ struct ib_flow_ipv6_filter mask; ++}; ++ ++struct ib_flow_tcp_udp_filter { ++ __be16 dst_port; ++ __be16 src_port; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_tcp_udp { ++ u32 type; ++ u16 size; ++ struct ib_flow_tcp_udp_filter val; ++ struct ib_flow_tcp_udp_filter mask; ++}; ++ ++struct ib_flow_tunnel_filter { ++ __be32 tunnel_id; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_tunnel { ++ u32 type; ++ u16 size; ++ struct ib_flow_tunnel_filter val; ++ struct ib_flow_tunnel_filter mask; ++}; ++ ++struct ib_flow_esp_filter { ++ __be32 spi; ++ __be32 seq; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_esp { ++ u32 type; ++ u16 size; ++ struct ib_flow_esp_filter val; ++ struct ib_flow_esp_filter mask; ++}; ++ ++struct ib_flow_gre_filter { ++ __be16 c_ks_res0_ver; ++ __be16 protocol; ++ __be32 key; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_gre { ++ u32 type; ++ u16 size; ++ struct ib_flow_gre_filter val; ++ struct ib_flow_gre_filter mask; ++}; ++ ++struct ib_flow_mpls_filter { ++ __be32 tag; ++ u8 real_sz[0]; ++}; ++ ++struct ib_flow_spec_mpls { ++ u32 type; ++ u16 size; ++ struct ib_flow_mpls_filter val; ++ struct ib_flow_mpls_filter mask; ++}; ++ ++struct ib_flow_spec_action_tag { ++ enum ib_flow_spec_type type; ++ u16 size; ++ u32 tag_id; ++}; ++ ++struct ib_flow_spec_action_drop { ++ enum ib_flow_spec_type type; ++ u16 size; ++}; ++ ++struct ib_flow_spec_action_handle { ++ enum ib_flow_spec_type type; ++ u16 size; ++ struct ib_flow_action *act; ++}; ++ ++enum ib_flow_action_type { ++ IB_FLOW_ACTION_UNSPECIFIED = 0, ++ IB_FLOW_ACTION_ESP = 1, ++}; ++ ++struct ib_flow_action { ++ struct ib_device *device; ++ struct ib_uobject *uobject; ++ enum ib_flow_action_type type; ++ atomic_t usecnt; ++}; ++ ++struct ib_flow_spec_action_count { ++ enum ib_flow_spec_type type; ++ u16 size; ++ struct ib_counters *counters; ++}; ++ ++struct ib_counters { ++ struct ib_device *device; ++ struct ib_uobject *uobject; ++ atomic_t usecnt; ++}; ++ ++union ib_flow_spec { ++ struct { ++ u32 type; ++ u16 size; ++ }; ++ struct ib_flow_spec_eth eth; ++ struct ib_flow_spec_ib ib; ++ struct ib_flow_spec_ipv4 ipv4; ++ struct ib_flow_spec_tcp_udp tcp_udp; ++ struct ib_flow_spec_ipv6 ipv6; ++ struct ib_flow_spec_tunnel tunnel; ++ struct ib_flow_spec_esp esp; ++ struct ib_flow_spec_gre gre; ++ struct ib_flow_spec_mpls mpls; ++ struct ib_flow_spec_action_tag flow_tag; ++ struct ib_flow_spec_action_drop drop; ++ struct ib_flow_spec_action_handle action; ++ struct ib_flow_spec_action_count flow_count; ++}; ++ ++struct ib_flow_attr { ++ enum ib_flow_attr_type type; ++ u16 size; ++ u16 priority; ++ u32 flags; ++ u8 num_of_specs; ++ u8 port; ++ union ib_flow_spec flows[0]; ++}; ++ ++struct ib_flow { ++ struct ib_qp *qp; ++ struct ib_device *device; ++ struct ib_uobject *uobject; ++}; ++ ++struct ib_flow_action_attrs_esp_keymats { ++ enum ib_uverbs_flow_action_esp_keymat protocol; ++ union { ++ struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; ++ } keymat; ++}; ++ ++struct ib_flow_action_attrs_esp_replays { ++ enum ib_uverbs_flow_action_esp_replay protocol; ++ union { ++ struct ib_uverbs_flow_action_esp_replay_bmp bmp; ++ } replay; ++}; ++ ++struct ib_flow_spec_list { ++ struct ib_flow_spec_list *next; ++ union ib_flow_spec spec; ++}; ++ ++struct ib_flow_action_attrs_esp { ++ struct ib_flow_action_attrs_esp_keymats *keymat; ++ struct ib_flow_action_attrs_esp_replays *replay; ++ struct ib_flow_spec_list *encap; ++ u32 esn; ++ u32 spi; ++ u32 seq; ++ u32 tfc_pad; ++ u64 flags; ++ u64 hard_limit_pkts; ++}; ++ ++struct ib_pkey_cache; ++ ++struct ib_gid_table; ++ ++struct ib_port_cache { ++ u64 subnet_prefix; ++ struct ib_pkey_cache *pkey; ++ struct ib_gid_table *gid; ++ u8 lmc; ++ enum ib_port_state port_state; ++}; ++ ++struct ib_port_immutable { ++ int pkey_tbl_len; ++ int gid_tbl_len; ++ u32 core_cap_flags; ++ u32 max_mad_size; ++}; ++ ++struct ib_port_pkey_list { ++ spinlock_t list_lock; ++ struct list_head pkey_list; ++}; ++ ++struct ib_counters_read_attr { ++ u64 *counters_buff; ++ u32 ncounters; ++ u32 flags; ++}; ++ ++enum blk_zone_type { ++ BLK_ZONE_TYPE_CONVENTIONAL = 1, ++ BLK_ZONE_TYPE_SEQWRITE_REQ = 2, ++ BLK_ZONE_TYPE_SEQWRITE_PREF = 3, ++}; ++ ++struct blk_zone { ++ __u64 start; ++ __u64 len; ++ __u64 wp; ++ __u8 type; ++ __u8 cond; ++ __u8 non_seq; ++ __u8 reset; ++ __u8 reserved[36]; ++}; ++ ++struct blk_zone_report { ++ __u64 sector; ++ __u32 nr_zones; ++ __u8 reserved[4]; ++ struct blk_zone zones[0]; ++}; ++ ++struct blk_zone_range { ++ __u64 sector; ++ __u64 nr_sectors; ++}; ++ ++struct blk_zone_report_hdr { ++ unsigned int nr_zones; ++ u8 padding[60]; ++}; ++ ++enum wbt_flags { ++ WBT_TRACKED = 1, ++ WBT_READ = 2, ++ WBT_KSWAPD = 4, ++ WBT_DISCARD = 8, ++ WBT_NR_BITS = 4, ++}; ++ ++enum { ++ WBT_STATE_ON_DEFAULT = 1, ++ WBT_STATE_ON_MANUAL = 2, ++}; ++ ++struct rq_wb { ++ unsigned int wb_background; ++ unsigned int wb_normal; ++ short int enable_state; ++ unsigned int unknown_cnt; ++ u64 win_nsec; ++ u64 cur_win_nsec; ++ struct blk_stat_callback *cb; ++ u64 sync_issue; ++ void *sync_cookie; ++ unsigned int wc; ++ long unsigned int last_issue; ++ long unsigned int last_comp; ++ long unsigned int min_lat_nsec; ++ struct rq_qos rqos; ++ struct rq_wait rq_wait[3]; ++ struct rq_depth rq_depth; ++}; ++ ++struct trace_event_raw_wbt_stat { ++ struct trace_entry ent; ++ char name[32]; ++ s64 rmean; ++ u64 rmin; ++ u64 rmax; ++ s64 rnr_samples; ++ s64 rtime; ++ s64 wmean; ++ u64 wmin; ++ u64 wmax; ++ s64 wnr_samples; ++ s64 wtime; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_wbt_lat { ++ struct trace_entry ent; ++ char name[32]; ++ long unsigned int lat; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_wbt_step { ++ struct trace_entry ent; ++ char name[32]; ++ const char *msg; ++ int step; ++ long unsigned int window; ++ unsigned int bg; ++ unsigned int normal; ++ unsigned int max; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_wbt_timer { ++ struct trace_entry ent; ++ char name[32]; ++ unsigned int status; ++ int step; ++ unsigned int inflight; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_wbt_stat {}; ++ ++struct trace_event_data_offsets_wbt_lat {}; ++ ++struct trace_event_data_offsets_wbt_step {}; ++ ++struct trace_event_data_offsets_wbt_timer {}; ++ ++enum { ++ RWB_DEF_DEPTH = 16, ++ RWB_WINDOW_NSEC = 100000000, ++ RWB_MIN_WRITE_SAMPLES = 3, ++ RWB_UNKNOWN_BUMP = 5, ++}; ++ ++enum { ++ LAT_OK = 1, ++ LAT_UNKNOWN = 2, ++ LAT_UNKNOWN_WRITES = 3, ++ LAT_EXCEEDED = 4, ++}; ++ ++struct wbt_wait_data { ++ struct wait_queue_entry wq; ++ struct task_struct *task; ++ struct rq_wb *rwb; ++ struct rq_wait *rqw; ++ long unsigned int rw; ++ bool got_token; ++}; ++ ++struct show_busy_params { ++ struct seq_file *m; ++ struct blk_mq_hw_ctx *hctx; ++}; ++ ++typedef __kernel_long_t __kernel_ptrdiff_t; ++ ++typedef __kernel_ptrdiff_t ptrdiff_t; ++ ++enum { ++ REG_OP_ISFREE = 0, ++ REG_OP_ALLOC = 1, ++ REG_OP_RELEASE = 2, ++}; ++ ++typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); ++ ++typedef void sg_free_fn(struct scatterlist *, unsigned int); ++ ++struct sg_page_iter { ++ struct scatterlist *sg; ++ unsigned int sg_pgoffset; ++ unsigned int __nents; ++ int __pg_advance; ++}; ++ ++struct sg_mapping_iter { ++ struct page *page; ++ void *addr; ++ size_t length; ++ size_t consumed; ++ struct sg_page_iter piter; ++ unsigned int __offset; ++ unsigned int __remaining; ++ unsigned int __flags; ++}; ++ ++struct flex_array_part { ++ char elements[65536]; ++}; ++ ++struct rhashtable_walker { ++ struct list_head list; ++ struct bucket_table *tbl; ++}; ++ ++struct rhashtable_iter { ++ struct rhashtable *ht; ++ struct rhash_head *p; ++ struct rhlist_head *list; ++ struct rhashtable_walker walker; ++ unsigned int slot; ++ unsigned int skip; ++ bool end_of_table; ++}; ++ ++union nested_table { ++ union nested_table *table; ++ struct rhash_head *bucket; ++}; ++ ++struct reciprocal_value_adv { ++ u32 m; ++ u8 sh; ++ u8 exp; ++ bool is_wide_m; ++}; ++ ++struct once_work { ++ struct work_struct work; ++ struct static_key_true *key; ++}; ++ ++struct test_fail { ++ const char *str; ++ unsigned int base; ++}; ++ ++struct test_s8 { ++ const char *str; ++ unsigned int base; ++ s8 expected_res; ++}; ++ ++struct test_u8 { ++ const char *str; ++ unsigned int base; ++ u8 expected_res; ++}; ++ ++struct test_s16 { ++ const char *str; ++ unsigned int base; ++ s16 expected_res; ++}; ++ ++struct test_u16 { ++ const char *str; ++ unsigned int base; ++ u16 expected_res; ++}; ++ ++struct test_s32 { ++ const char *str; ++ unsigned int base; ++ s32 expected_res; ++}; ++ ++struct test_u32 { ++ const char *str; ++ unsigned int base; ++ u32 expected_res; ++}; ++ ++struct test_s64 { ++ const char *str; ++ unsigned int base; ++ s64 expected_res; ++}; ++ ++struct test_u64 { ++ const char *str; ++ unsigned int base; ++ u64 expected_res; ++}; ++ ++struct test_ll { ++ const char *str; ++ unsigned int base; ++ long long int expected_res; ++}; ++ ++struct test_ull { ++ const char *str; ++ unsigned int base; ++ long long unsigned int expected_res; ++}; ++ ++enum devm_ioremap_type { ++ DEVM_IOREMAP = 0, ++ DEVM_IOREMAP_NC = 1, ++ DEVM_IOREMAP_WC = 2, ++}; ++ ++struct pcim_iomap_devres { ++ void *table[6]; ++}; ++ ++enum { ++ LOGIC_PIO_INDIRECT = 0, ++ LOGIC_PIO_CPU_MMIO = 1, ++}; ++ ++struct logic_pio_host_ops; ++ ++struct logic_pio_hwaddr { ++ struct list_head list; ++ struct fwnode_handle *fwnode; ++ resource_size_t hw_start; ++ resource_size_t io_start; ++ resource_size_t size; ++ long unsigned int flags; ++ void *hostdata; ++ const struct logic_pio_host_ops *ops; ++}; ++ ++struct logic_pio_host_ops { ++ u32 (*in)(void *, long unsigned int, size_t); ++ void (*out)(void *, long unsigned int, u32, size_t); ++ u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); ++ void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); ++}; ++ ++struct btree_head { ++ long unsigned int *node; ++ mempool_t *mempool; ++ int height; ++}; ++ ++struct btree_geo { ++ int keylen; ++ int no_pairs; ++ int no_longs; ++}; ++ ++typedef void (*visitor128_t)(void *, long unsigned int, u64, u64, size_t); ++ ++typedef void (*visitorl_t)(void *, long unsigned int, long unsigned int, size_t); ++ ++typedef void (*visitor32_t)(void *, long unsigned int, u32, size_t); ++ ++typedef void (*visitor64_t)(void *, long unsigned int, u64, size_t); ++ ++struct interval_tree_node { ++ struct rb_node rb; ++ long unsigned int start; ++ long unsigned int last; ++ long unsigned int __subtree_last; ++}; ++ ++enum assoc_array_walk_status { ++ assoc_array_walk_tree_empty = 0, ++ assoc_array_walk_found_terminal_node = 1, ++ assoc_array_walk_found_wrong_shortcut = 2, ++}; ++ ++struct assoc_array_walk_result { ++ struct { ++ struct assoc_array_node *node; ++ int level; ++ int slot; ++ } terminal_node; ++ struct { ++ struct assoc_array_shortcut *shortcut; ++ int level; ++ int sc_level; ++ long unsigned int sc_segments; ++ long unsigned int dissimilarity; ++ } wrong_shortcut; ++}; ++ ++struct assoc_array_delete_collapse_context { ++ struct assoc_array_node *node; ++ const void *skip_leaf; ++ int slot; ++}; ++ ++struct xxh32_state { ++ uint32_t total_len_32; ++ uint32_t large_len; ++ uint32_t v1; ++ uint32_t v2; ++ uint32_t v3; ++ uint32_t v4; ++ uint32_t mem32[4]; ++ uint32_t memsize; ++}; ++ ++struct xxh64_state { ++ uint64_t total_len; ++ uint64_t v1; ++ uint64_t v2; ++ uint64_t v3; ++ uint64_t v4; ++ uint64_t mem64[4]; ++ uint32_t memsize; ++}; ++ ++struct gen_pool_chunk { ++ struct list_head next_chunk; ++ atomic_long_t avail; ++ phys_addr_t phys_addr; ++ long unsigned int start_addr; ++ long unsigned int end_addr; ++ long unsigned int bits[0]; ++}; ++ ++struct genpool_data_align { ++ int align; ++}; ++ ++struct genpool_data_fixed { ++ long unsigned int offset; ++}; ++ ++typedef struct z_stream_s z_stream; ++ ++typedef z_stream *z_streamp; ++ ++typedef struct { ++ unsigned char op; ++ unsigned char bits; ++ short unsigned int val; ++} code; ++ ++typedef enum { ++ HEAD = 0, ++ FLAGS = 1, ++ TIME = 2, ++ OS = 3, ++ EXLEN = 4, ++ EXTRA = 5, ++ NAME = 6, ++ COMMENT = 7, ++ HCRC = 8, ++ DICTID = 9, ++ DICT = 10, ++ TYPE = 11, ++ TYPEDO = 12, ++ STORED = 13, ++ COPY = 14, ++ TABLE = 15, ++ LENLENS = 16, ++ CODELENS = 17, ++ LEN = 18, ++ LENEXT = 19, ++ DIST = 20, ++ DISTEXT = 21, ++ MATCH = 22, ++ LIT = 23, ++ CHECK = 24, ++ LENGTH = 25, ++ DONE = 26, ++ BAD___2 = 27, ++ MEM = 28, ++ SYNC = 29, ++} inflate_mode; ++ ++struct inflate_state { ++ inflate_mode mode; ++ int last; ++ int wrap; ++ int havedict; ++ int flags; ++ unsigned int dmax; ++ long unsigned int check; ++ long unsigned int total; ++ unsigned int wbits; ++ unsigned int wsize; ++ unsigned int whave; ++ unsigned int write; ++ unsigned char *window; ++ long unsigned int hold; ++ unsigned int bits; ++ unsigned int length; ++ unsigned int offset; ++ unsigned int extra; ++ const code *lencode; ++ const code *distcode; ++ unsigned int lenbits; ++ unsigned int distbits; ++ unsigned int ncode; ++ unsigned int nlen; ++ unsigned int ndist; ++ unsigned int have; ++ code *next; ++ short unsigned int lens[320]; ++ short unsigned int work[288]; ++ code codes[2048]; ++}; ++ ++union uu { ++ short unsigned int us; ++ unsigned char b[2]; ++}; ++ ++typedef unsigned int uInt; ++ ++struct inflate_workspace { ++ struct inflate_state inflate_state; ++ unsigned char working_window[32768]; ++}; ++ ++typedef enum { ++ CODES = 0, ++ LENS = 1, ++ DISTS = 2, ++} codetype; ++ ++typedef unsigned char uch; ++ ++typedef short unsigned int ush; ++ ++typedef long unsigned int ulg; ++ ++struct ct_data_s { ++ union { ++ ush freq; ++ ush code; ++ } fc; ++ union { ++ ush dad; ++ ush len; ++ } dl; ++}; ++ ++typedef struct ct_data_s ct_data; ++ ++struct static_tree_desc_s { ++ const ct_data *static_tree; ++ const int *extra_bits; ++ int extra_base; ++ int elems; ++ int max_length; ++}; ++ ++typedef struct static_tree_desc_s static_tree_desc; ++ ++struct tree_desc_s { ++ ct_data *dyn_tree; ++ int max_code; ++ static_tree_desc *stat_desc; ++}; ++ ++typedef ush Pos; ++ ++typedef unsigned int IPos; ++ ++struct deflate_state { ++ z_streamp strm; ++ int status; ++ Byte *pending_buf; ++ ulg pending_buf_size; ++ Byte *pending_out; ++ int pending; ++ int noheader; ++ Byte data_type; ++ Byte method; ++ int last_flush; ++ uInt w_size; ++ uInt w_bits; ++ uInt w_mask; ++ Byte *window; ++ ulg window_size; ++ Pos *prev; ++ Pos *head; ++ uInt ins_h; ++ uInt hash_size; ++ uInt hash_bits; ++ uInt hash_mask; ++ uInt hash_shift; ++ long int block_start; ++ uInt match_length; ++ IPos prev_match; ++ int match_available; ++ uInt strstart; ++ uInt match_start; ++ uInt lookahead; ++ uInt prev_length; ++ uInt max_chain_length; ++ uInt max_lazy_match; ++ int level; ++ int strategy; ++ uInt good_match; ++ int nice_match; ++ struct ct_data_s dyn_ltree[573]; ++ struct ct_data_s dyn_dtree[61]; ++ struct ct_data_s bl_tree[39]; ++ struct tree_desc_s l_desc; ++ struct tree_desc_s d_desc; ++ struct tree_desc_s bl_desc; ++ ush bl_count[16]; ++ int heap[573]; ++ int heap_len; ++ int heap_max; ++ uch depth[573]; ++ uch *l_buf; ++ uInt lit_bufsize; ++ uInt last_lit; ++ ush *d_buf; ++ ulg opt_len; ++ ulg static_len; ++ ulg compressed_len; ++ uInt matches; ++ int last_eob_len; ++ ush bi_buf; ++ int bi_valid; ++}; ++ ++typedef struct deflate_state deflate_state; ++ ++struct deflate_workspace { ++ deflate_state deflate_memory; ++ Byte *window_memory; ++ Pos *prev_memory; ++ Pos *head_memory; ++ char *overlay_memory; ++}; ++ ++typedef struct deflate_workspace deflate_workspace; ++ ++typedef enum { ++ need_more = 0, ++ block_done = 1, ++ finish_started = 2, ++ finish_done = 3, ++} block_state; ++ ++typedef block_state (*compress_func)(deflate_state *, int); ++ ++struct config_s { ++ ush good_length; ++ ush max_lazy; ++ ush nice_length; ++ ush max_chain; ++ compress_func func; ++}; ++ ++typedef struct config_s config; ++ ++typedef struct tree_desc_s tree_desc; ++ ++typedef struct { ++ const uint8_t *externalDict; ++ size_t extDictSize; ++ const uint8_t *prefixEnd; ++ size_t prefixSize; ++} LZ4_streamDecode_t_internal; ++ ++typedef union { ++ long long unsigned int table[4]; ++ LZ4_streamDecode_t_internal internal_donotuse; ++} LZ4_streamDecode_t; ++ ++typedef uint8_t BYTE; ++ ++typedef uint16_t U16; ++ ++typedef uint32_t U32; ++ ++typedef uint64_t U64; ++ ++enum { ++ noDict = 0, ++ withPrefix64k = 1, ++ usingExtDict = 2, ++}; ++ ++enum { ++ endOnOutputSize = 0, ++ endOnInputSize = 1, ++}; ++ ++enum { ++ full = 0, ++ partial = 1, ++}; ++ ++enum xz_mode { ++ XZ_SINGLE = 0, ++ XZ_PREALLOC = 1, ++ XZ_DYNALLOC = 2, ++}; ++ ++enum xz_ret { ++ XZ_OK = 0, ++ XZ_STREAM_END = 1, ++ XZ_UNSUPPORTED_CHECK = 2, ++ XZ_MEM_ERROR = 3, ++ XZ_MEMLIMIT_ERROR = 4, ++ XZ_FORMAT_ERROR = 5, ++ XZ_OPTIONS_ERROR = 6, ++ XZ_DATA_ERROR = 7, ++ XZ_BUF_ERROR = 8, ++}; ++ ++struct xz_buf { ++ const uint8_t *in; ++ size_t in_pos; ++ size_t in_size; ++ uint8_t *out; ++ size_t out_pos; ++ size_t out_size; ++}; ++ ++typedef uint64_t vli_type; ++ ++enum xz_check { ++ XZ_CHECK_NONE = 0, ++ XZ_CHECK_CRC32 = 1, ++ XZ_CHECK_CRC64 = 4, ++ XZ_CHECK_SHA256 = 10, ++}; ++ ++struct xz_dec_hash { ++ vli_type unpadded; ++ vli_type uncompressed; ++ uint32_t crc32; ++}; ++ ++struct xz_dec_lzma2; ++ ++struct xz_dec_bcj; ++ ++struct xz_dec { ++ enum { ++ SEQ_STREAM_HEADER = 0, ++ SEQ_BLOCK_START = 1, ++ SEQ_BLOCK_HEADER = 2, ++ SEQ_BLOCK_UNCOMPRESS = 3, ++ SEQ_BLOCK_PADDING = 4, ++ SEQ_BLOCK_CHECK = 5, ++ SEQ_INDEX = 6, ++ SEQ_INDEX_PADDING = 7, ++ SEQ_INDEX_CRC32 = 8, ++ SEQ_STREAM_FOOTER = 9, ++ } sequence; ++ uint32_t pos; ++ vli_type vli; ++ size_t in_start; ++ size_t out_start; ++ uint32_t crc32; ++ enum xz_check check_type; ++ enum xz_mode mode; ++ bool allow_buf_error; ++ struct { ++ vli_type compressed; ++ vli_type uncompressed; ++ uint32_t size; ++ } block_header; ++ struct { ++ vli_type compressed; ++ vli_type uncompressed; ++ vli_type count; ++ struct xz_dec_hash hash; ++ } block; ++ struct { ++ enum { ++ SEQ_INDEX_COUNT = 0, ++ SEQ_INDEX_UNPADDED = 1, ++ SEQ_INDEX_UNCOMPRESSED = 2, ++ } sequence; ++ vli_type size; ++ vli_type count; ++ struct xz_dec_hash hash; ++ } index; ++ struct { ++ size_t pos; ++ size_t size; ++ uint8_t buf[1024]; ++ } temp; ++ struct xz_dec_lzma2 *lzma2; ++ struct xz_dec_bcj *bcj; ++ bool bcj_active; ++}; ++ ++enum lzma_state { ++ STATE_LIT_LIT = 0, ++ STATE_MATCH_LIT_LIT = 1, ++ STATE_REP_LIT_LIT = 2, ++ STATE_SHORTREP_LIT_LIT = 3, ++ STATE_MATCH_LIT = 4, ++ STATE_REP_LIT = 5, ++ STATE_SHORTREP_LIT = 6, ++ STATE_LIT_MATCH = 7, ++ STATE_LIT_LONGREP = 8, ++ STATE_LIT_SHORTREP = 9, ++ STATE_NONLIT_MATCH = 10, ++ STATE_NONLIT_REP = 11, ++}; ++ ++struct dictionary { ++ uint8_t *buf; ++ size_t start; ++ size_t pos; ++ size_t full; ++ size_t limit; ++ size_t end; ++ uint32_t size; ++ uint32_t size_max; ++ uint32_t allocated; ++ enum xz_mode mode; ++}; ++ ++struct rc_dec { ++ uint32_t range; ++ uint32_t code; ++ uint32_t init_bytes_left; ++ const uint8_t *in; ++ size_t in_pos; ++ size_t in_limit; ++}; ++ ++struct lzma_len_dec { ++ uint16_t choice; ++ uint16_t choice2; ++ uint16_t low[128]; ++ uint16_t mid[128]; ++ uint16_t high[256]; ++}; ++ ++struct lzma_dec { ++ uint32_t rep0; ++ uint32_t rep1; ++ uint32_t rep2; ++ uint32_t rep3; ++ enum lzma_state state; ++ uint32_t len; ++ uint32_t lc; ++ uint32_t literal_pos_mask; ++ uint32_t pos_mask; ++ uint16_t is_match[192]; ++ uint16_t is_rep[12]; ++ uint16_t is_rep0[12]; ++ uint16_t is_rep1[12]; ++ uint16_t is_rep2[12]; ++ uint16_t is_rep0_long[192]; ++ uint16_t dist_slot[256]; ++ uint16_t dist_special[114]; ++ uint16_t dist_align[16]; ++ struct lzma_len_dec match_len_dec; ++ struct lzma_len_dec rep_len_dec; ++ uint16_t literal[12288]; ++}; ++ ++enum lzma2_seq { ++ SEQ_CONTROL = 0, ++ SEQ_UNCOMPRESSED_1 = 1, ++ SEQ_UNCOMPRESSED_2 = 2, ++ SEQ_COMPRESSED_0 = 3, ++ SEQ_COMPRESSED_1 = 4, ++ SEQ_PROPERTIES = 5, ++ SEQ_LZMA_PREPARE = 6, ++ SEQ_LZMA_RUN = 7, ++ SEQ_COPY = 8, ++}; ++ ++struct lzma2_dec { ++ enum lzma2_seq sequence; ++ enum lzma2_seq next_sequence; ++ uint32_t uncompressed; ++ uint32_t compressed; ++ bool need_dict_reset; ++ bool need_props; ++}; ++ ++struct xz_dec_lzma2___2 { ++ struct rc_dec rc; ++ struct dictionary dict; ++ struct lzma2_dec lzma2; ++ struct lzma_dec lzma; ++ struct { ++ uint32_t size; ++ uint8_t buf[63]; ++ } temp; ++}; ++ ++struct xz_dec_bcj___2 { ++ enum { ++ BCJ_X86 = 4, ++ BCJ_POWERPC = 5, ++ BCJ_IA64 = 6, ++ BCJ_ARM = 7, ++ BCJ_ARMTHUMB = 8, ++ BCJ_SPARC = 9, ++ } type; ++ enum xz_ret ret; ++ bool single_call; ++ uint32_t pos; ++ uint32_t x86_prev_mask; ++ uint8_t *out; ++ size_t out_pos; ++ size_t out_size; ++ struct { ++ size_t filtered; ++ size_t size; ++ uint8_t buf[16]; ++ } temp; ++}; ++ ++struct ts_state { ++ unsigned int offset; ++ char cb[40]; ++}; ++ ++struct ts_config; ++ ++struct ts_ops { ++ const char *name; ++ struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); ++ unsigned int (*find)(struct ts_config *, struct ts_state *); ++ void (*destroy)(struct ts_config *); ++ void * (*get_pattern)(struct ts_config *); ++ unsigned int (*get_pattern_len)(struct ts_config *); ++ struct module *owner; ++ struct list_head list; ++}; ++ ++struct ts_config { ++ struct ts_ops *ops; ++ int flags; ++ unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); ++ void (*finish)(struct ts_config *, struct ts_state *); ++}; ++ ++struct ts_linear_state { ++ unsigned int len; ++ const void *data; ++}; ++ ++struct ddebug_table { ++ struct list_head link; ++ const char *mod_name; ++ unsigned int num_ddebugs; ++ struct _ddebug *ddebugs; ++}; ++ ++struct ddebug_query { ++ const char *filename; ++ const char *module; ++ const char *function; ++ const char *format; ++ unsigned int first_lineno; ++ unsigned int last_lineno; ++}; ++ ++struct ddebug_iter { ++ struct ddebug_table *table; ++ unsigned int idx; ++}; ++ ++struct nla_bitfield32 { ++ __u32 value; ++ __u32 selector; ++}; ++ ++struct cpu_rmap { ++ struct kref refcount; ++ u16 size; ++ u16 used; ++ void **obj; ++ struct { ++ u16 index; ++ u16 dist; ++ } near[0]; ++}; ++ ++struct irq_glue { ++ struct irq_affinity_notify notify; ++ struct cpu_rmap *rmap; ++ u16 index; ++}; ++ ++typedef mpi_limb_t *mpi_ptr_t; ++ ++typedef int mpi_size_t; ++ ++typedef mpi_limb_t UWtype; ++ ++typedef unsigned int UHWtype; ++ ++struct karatsuba_ctx { ++ struct karatsuba_ctx *next; ++ mpi_ptr_t tspace; ++ mpi_size_t tspace_size; ++ mpi_ptr_t tp; ++ mpi_size_t tp_size; ++}; ++ ++typedef long int mpi_limb_signed_t; ++ ++struct sg_pool { ++ size_t size; ++ char *name; ++ struct kmem_cache *slab; ++ mempool_t *pool; ++}; ++ ++enum { ++ IRQ_POLL_F_SCHED = 0, ++ IRQ_POLL_F_DISABLE = 1, ++}; ++ ++struct font_desc { ++ int idx; ++ const char *name; ++ int width; ++ int height; ++ const void *data; ++ int pref; ++}; ++ ++struct font_data { ++ unsigned int extra[4]; ++ const unsigned char data[0]; ++}; ++ ++typedef u16 ucs2_char_t; ++ ++typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *); ++ ++typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *, const long unsigned int); ++ ++struct acpi_probe_entry; ++ ++typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); ++ ++struct acpi_probe_entry { ++ __u8 id[5]; ++ __u8 type; ++ acpi_probe_entry_validate_subtbl subtable_valid; ++ union { ++ acpi_tbl_table_handler probe_table; ++ acpi_tbl_entry_handler probe_subtbl; ++ }; ++ kernel_ulong_t driver_data; ++}; ++ ++typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); ++ ++struct acpi_madt_generic_distributor { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 gic_id; ++ u64 base_address; ++ u32 global_irq_base; ++ u8 version; ++ u8 reserved2[3]; ++}; ++ ++enum acpi_madt_gic_version { ++ ACPI_MADT_GIC_VERSION_NONE = 0, ++ ACPI_MADT_GIC_VERSION_V1 = 1, ++ ACPI_MADT_GIC_VERSION_V2 = 2, ++ ACPI_MADT_GIC_VERSION_V3 = 3, ++ ACPI_MADT_GIC_VERSION_V4 = 4, ++ ACPI_MADT_GIC_VERSION_RESERVED = 5, ++}; ++ ++union gic_base { ++ void *common_base; ++ void **percpu_base; ++}; ++ ++struct gic_chip_data { ++ struct irq_chip chip; ++ union gic_base dist_base; ++ union gic_base cpu_base; ++ void *raw_dist_base; ++ void *raw_cpu_base; ++ u32 percpu_offset; ++ u32 saved_spi_enable[32]; ++ u32 saved_spi_active[32]; ++ u32 saved_spi_conf[64]; ++ u32 saved_spi_target[255]; ++ u32 *saved_ppi_enable; ++ u32 *saved_ppi_active; ++ u32 *saved_ppi_conf; ++ struct irq_domain *domain; ++ unsigned int gic_irqs; ++}; ++ ++struct gic_quirk { ++ const char *desc; ++ bool (*init)(void *); ++ u32 iidr; ++ u32 mask; ++}; ++ ++struct acpi_madt_generic_msi_frame { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 msi_frame_id; ++ u64 base_address; ++ u32 flags; ++ u16 spi_count; ++ u16 spi_base; ++}; ++ ++struct v2m_data { ++ struct list_head entry; ++ struct fwnode_handle *fwnode; ++ struct resource res; ++ void *base; ++ u32 spi_start; ++ u32 nr_spis; ++ u32 spi_offset; ++ long unsigned int *bm; ++ u32 flags; ++}; ++ ++struct acpi_madt_generic_redistributor { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u64 base_address; ++ u32 length; ++} __attribute__((packed)); ++ ++struct rdists { ++ struct { ++ void *rd_base; ++ struct page *pend_page; ++ phys_addr_t phys_base; ++ bool lpi_enabled; ++ } *rdist; ++ phys_addr_t prop_table_pa; ++ void *prop_table_va; ++ u64 flags; ++ u32 gicd_typer; ++ bool has_vlpis; ++ bool has_direct_lpi; ++}; ++ ++struct partition_affinity { ++ cpumask_t mask; ++ void *partition_id; ++}; ++ ++struct redist_region { ++ void *redist_base; ++ phys_addr_t phys_base; ++ bool single_redist; ++}; ++ ++struct partition_desc; ++ ++struct gic_chip_data___2 { ++ struct fwnode_handle *fwnode; ++ void *dist_base; ++ struct redist_region *redist_regions; ++ struct rdists rdists; ++ struct irq_domain *domain; ++ u64 redist_stride; ++ u32 nr_redist_regions; ++ bool has_rss; ++ unsigned int irq_nr; ++ struct partition_desc *ppi_descs[16]; ++}; ++ ++struct mbi_range { ++ u32 spi_start; ++ u32 nr_spis; ++ long unsigned int *bm; ++}; ++ ++struct acpi_madt_generic_translator { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 translation_id; ++ u64 base_address; ++ u32 reserved2; ++} __attribute__((packed)); ++ ++struct acpi_srat_gic_its_affinity { ++ struct acpi_subtable_header header; ++ u32 proximity_domain; ++ u16 reserved; ++ u32 its_id; ++} __attribute__((packed)); ++ ++enum its_vcpu_info_cmd_type { ++ MAP_VLPI = 0, ++ GET_VLPI = 1, ++ PROP_UPDATE_VLPI = 2, ++ PROP_UPDATE_AND_INV_VLPI = 3, ++ SCHEDULE_VPE = 4, ++ DESCHEDULE_VPE = 5, ++ INVALL_VPE = 6, ++}; ++ ++struct its_cmd_info { ++ enum its_vcpu_info_cmd_type cmd_type; ++ union { ++ struct its_vlpi_map *map; ++ u8 config; ++ }; ++}; ++ ++struct its_collection___2 { ++ u64 target_address; ++ u16 col_id; ++}; ++ ++struct its_baser { ++ void *base; ++ u64 val; ++ u32 order; ++ u32 psz; ++}; ++ ++struct its_cmd_block; ++ ++struct its_device___2; ++ ++struct its_node { ++ raw_spinlock_t lock; ++ struct mutex dev_alloc_lock; ++ struct list_head entry; ++ void *base; ++ phys_addr_t phys_base; ++ struct its_cmd_block *cmd_base; ++ struct its_cmd_block *cmd_write; ++ struct its_baser tables[8]; ++ struct its_collection___2 *collections; ++ struct fwnode_handle *fwnode_handle; ++ u64 (*get_msi_base)(struct its_device___2 *); ++ u64 cbaser_save; ++ u32 ctlr_save; ++ struct list_head its_device_list; ++ u64 flags; ++ long unsigned int list_nr; ++ u32 ite_size; ++ u32 device_ids; ++ int numa_node; ++ unsigned int msi_domain_flags; ++ u32 pre_its_base; ++ bool is_v4; ++ int vlpi_redist_offset; ++}; ++ ++struct its_cmd_block { ++ u64 raw_cmd[4]; ++}; ++ ++struct event_lpi_map { ++ long unsigned int *lpi_map; ++ u16 *col_map; ++ irq_hw_number_t lpi_base; ++ int nr_lpis; ++ raw_spinlock_t vlpi_lock; ++ struct its_vm *vm; ++ struct its_vlpi_map *vlpi_maps; ++ int nr_vlpis; ++}; ++ ++struct its_device___2 { ++ struct list_head entry; ++ struct its_node *its; ++ struct event_lpi_map event_map; ++ void *itt; ++ u32 nr_ites; ++ u32 device_id; ++ bool shared; ++}; ++ ++struct its_cmd_desc { ++ union { ++ struct { ++ struct its_device___2 *dev; ++ u32 event_id; ++ } its_inv_cmd; ++ struct { ++ struct its_device___2 *dev; ++ u32 event_id; ++ } its_clear_cmd; ++ struct { ++ struct its_device___2 *dev; ++ u32 event_id; ++ } its_int_cmd; ++ struct { ++ struct its_device___2 *dev; ++ int valid; ++ } its_mapd_cmd; ++ struct { ++ struct its_collection___2 *col; ++ int valid; ++ } its_mapc_cmd; ++ struct { ++ struct its_device___2 *dev; ++ u32 phys_id; ++ u32 event_id; ++ } its_mapti_cmd; ++ struct { ++ struct its_device___2 *dev; ++ struct its_collection___2 *col; ++ u32 event_id; ++ } its_movi_cmd; ++ struct { ++ struct its_device___2 *dev; ++ u32 event_id; ++ } its_discard_cmd; ++ struct { ++ struct its_collection___2 *col; ++ } its_invall_cmd; ++ struct { ++ struct its_vpe *vpe; ++ } its_vinvall_cmd; ++ struct { ++ struct its_vpe *vpe; ++ struct its_collection___2 *col; ++ bool valid; ++ } its_vmapp_cmd; ++ struct { ++ struct its_vpe *vpe; ++ struct its_device___2 *dev; ++ u32 virt_id; ++ u32 event_id; ++ bool db_enabled; ++ } its_vmapti_cmd; ++ struct { ++ struct its_vpe *vpe; ++ struct its_device___2 *dev; ++ u32 event_id; ++ bool db_enabled; ++ } its_vmovi_cmd; ++ struct { ++ struct its_vpe *vpe; ++ struct its_collection___2 *col; ++ u16 seq_num; ++ u16 its_list; ++ } its_vmovp_cmd; ++ }; ++}; ++ ++typedef struct its_collection___2 * (*its_cmd_builder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); ++ ++typedef struct its_vpe * (*its_cmd_vbuilder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); ++ ++struct lpi_range { ++ struct list_head entry; ++ u32 base_id; ++ u32 span; ++}; ++ ++struct its_srat_map { ++ u32 numa_node; ++ u32 its_id; ++}; ++ ++struct partition_desc___2 { ++ int nr_parts; ++ struct partition_affinity *parts; ++ struct irq_domain *domain; ++ struct irq_desc *chained_desc; ++ long unsigned int *bitmap; ++ struct irq_domain_ops ops; ++}; ++ ++struct mbigen_device { ++ struct platform_device *pdev; ++ void *base; ++}; ++ ++struct acpi_resource_irq { ++ u8 descriptor_length; ++ u8 triggering; ++ u8 polarity; ++ u8 sharable; ++ u8 wake_capable; ++ u8 interrupt_count; ++ u8 interrupts[1]; ++}; ++ ++struct acpi_resource_dma { ++ u8 type; ++ u8 bus_master; ++ u8 transfer; ++ u8 channel_count; ++ u8 channels[1]; ++}; ++ ++struct acpi_resource_start_dependent { ++ u8 descriptor_length; ++ u8 compatibility_priority; ++ u8 performance_robustness; ++}; ++ ++struct acpi_resource_io { ++ u8 io_decode; ++ u8 alignment; ++ u8 address_length; ++ u16 minimum; ++ u16 maximum; ++} __attribute__((packed)); ++ ++struct acpi_resource_fixed_io { ++ u16 address; ++ u8 address_length; ++} __attribute__((packed)); ++ ++struct acpi_resource_fixed_dma { ++ u16 request_lines; ++ u16 channels; ++ u8 width; ++} __attribute__((packed)); ++ ++struct acpi_resource_vendor { ++ u16 byte_length; ++ u8 byte_data[1]; ++} __attribute__((packed)); ++ ++struct acpi_resource_vendor_typed { ++ u16 byte_length; ++ u8 uuid_subtype; ++ u8 uuid[16]; ++ u8 byte_data[1]; ++}; ++ ++struct acpi_resource_end_tag { ++ u8 checksum; ++}; ++ ++struct acpi_resource_memory24 { ++ u8 write_protect; ++ u16 minimum; ++ u16 maximum; ++ u16 alignment; ++ u16 address_length; ++} __attribute__((packed)); ++ ++struct acpi_resource_memory32 { ++ u8 write_protect; ++ u32 minimum; ++ u32 maximum; ++ u32 alignment; ++ u32 address_length; ++} __attribute__((packed)); ++ ++struct acpi_resource_fixed_memory32 { ++ u8 write_protect; ++ u32 address; ++ u32 address_length; ++} __attribute__((packed)); ++ ++struct acpi_memory_attribute { ++ u8 write_protect; ++ u8 caching; ++ u8 range_type; ++ u8 translation; ++}; ++ ++struct acpi_io_attribute { ++ u8 range_type; ++ u8 translation; ++ u8 translation_type; ++ u8 reserved1; ++}; ++ ++union acpi_resource_attribute { ++ struct acpi_memory_attribute mem; ++ struct acpi_io_attribute io; ++ u8 type_specific; ++}; ++ ++struct acpi_resource_label { ++ u16 string_length; ++ char *string_ptr; ++} __attribute__((packed)); ++ ++struct acpi_resource_source { ++ u8 index; ++ u16 string_length; ++ char *string_ptr; ++} __attribute__((packed)); ++ ++struct acpi_address16_attribute { ++ u16 granularity; ++ u16 minimum; ++ u16 maximum; ++ u16 translation_offset; ++ u16 address_length; ++}; ++ ++struct acpi_address32_attribute { ++ u32 granularity; ++ u32 minimum; ++ u32 maximum; ++ u32 translation_offset; ++ u32 address_length; ++}; ++ ++struct acpi_address64_attribute { ++ u64 granularity; ++ u64 minimum; ++ u64 maximum; ++ u64 translation_offset; ++ u64 address_length; ++}; ++ ++struct acpi_resource_address { ++ u8 resource_type; ++ u8 producer_consumer; ++ u8 decode; ++ u8 min_address_fixed; ++ u8 max_address_fixed; ++ union acpi_resource_attribute info; ++}; ++ ++struct acpi_resource_address16 { ++ u8 resource_type; ++ u8 producer_consumer; ++ u8 decode; ++ u8 min_address_fixed; ++ u8 max_address_fixed; ++ union acpi_resource_attribute info; ++ struct acpi_address16_attribute address; ++ struct acpi_resource_source resource_source; ++} __attribute__((packed)); ++ ++struct acpi_resource_address32 { ++ u8 resource_type; ++ u8 producer_consumer; ++ u8 decode; ++ u8 min_address_fixed; ++ u8 max_address_fixed; ++ union acpi_resource_attribute info; ++ struct acpi_address32_attribute address; ++ struct acpi_resource_source resource_source; ++} __attribute__((packed)); ++ ++struct acpi_resource_address64 { ++ u8 resource_type; ++ u8 producer_consumer; ++ u8 decode; ++ u8 min_address_fixed; ++ u8 max_address_fixed; ++ union acpi_resource_attribute info; ++ struct acpi_address64_attribute address; ++ struct acpi_resource_source resource_source; ++} __attribute__((packed)); ++ ++struct acpi_resource_extended_address64 { ++ u8 resource_type; ++ u8 producer_consumer; ++ u8 decode; ++ u8 min_address_fixed; ++ u8 max_address_fixed; ++ union acpi_resource_attribute info; ++ u8 revision_ID; ++ struct acpi_address64_attribute address; ++ u64 type_specific; ++} __attribute__((packed)); ++ ++struct acpi_resource_extended_irq { ++ u8 producer_consumer; ++ u8 triggering; ++ u8 polarity; ++ u8 sharable; ++ u8 wake_capable; ++ u8 interrupt_count; ++ struct acpi_resource_source resource_source; ++ u32 interrupts[1]; ++} __attribute__((packed)); ++ ++struct acpi_resource_generic_register { ++ u8 space_id; ++ u8 bit_width; ++ u8 bit_offset; ++ u8 access_size; ++ u64 address; ++} __attribute__((packed)); ++ ++struct acpi_resource_gpio { ++ u8 revision_id; ++ u8 connection_type; ++ u8 producer_consumer; ++ u8 pin_config; ++ u8 sharable; ++ u8 wake_capable; ++ u8 io_restriction; ++ u8 triggering; ++ u8 polarity; ++ u16 drive_strength; ++ u16 debounce_timeout; ++ u16 pin_table_length; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ u16 *pin_table; ++ u8 *vendor_data; ++} __attribute__((packed)); ++ ++struct acpi_resource_common_serialbus { ++ u8 revision_id; ++ u8 type; ++ u8 producer_consumer; ++ u8 slave_mode; ++ u8 connection_sharing; ++ u8 type_revision_id; ++ u16 type_data_length; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ u8 *vendor_data; ++} __attribute__((packed)); ++ ++struct acpi_resource_i2c_serialbus { ++ u8 revision_id; ++ u8 type; ++ u8 producer_consumer; ++ u8 slave_mode; ++ u8 connection_sharing; ++ u8 type_revision_id; ++ u16 type_data_length; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ u8 *vendor_data; ++ u8 access_mode; ++ u16 slave_address; ++ u32 connection_speed; ++} __attribute__((packed)); ++ ++struct acpi_resource_spi_serialbus { ++ u8 revision_id; ++ u8 type; ++ u8 producer_consumer; ++ u8 slave_mode; ++ u8 connection_sharing; ++ u8 type_revision_id; ++ u16 type_data_length; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ u8 *vendor_data; ++ u8 wire_mode; ++ u8 device_polarity; ++ u8 data_bit_length; ++ u8 clock_phase; ++ u8 clock_polarity; ++ u16 device_selection; ++ u32 connection_speed; ++} __attribute__((packed)); ++ ++struct acpi_resource_uart_serialbus { ++ u8 revision_id; ++ u8 type; ++ u8 producer_consumer; ++ u8 slave_mode; ++ u8 connection_sharing; ++ u8 type_revision_id; ++ u16 type_data_length; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ u8 *vendor_data; ++ u8 endian; ++ u8 data_bits; ++ u8 stop_bits; ++ u8 flow_control; ++ u8 parity; ++ u8 lines_enabled; ++ u16 rx_fifo_size; ++ u16 tx_fifo_size; ++ u32 default_baud_rate; ++} __attribute__((packed)); ++ ++struct acpi_resource_pin_function { ++ u8 revision_id; ++ u8 pin_config; ++ u8 sharable; ++ u16 function_number; ++ u16 pin_table_length; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ u16 *pin_table; ++ u8 *vendor_data; ++} __attribute__((packed)); ++ ++struct acpi_resource_pin_config { ++ u8 revision_id; ++ u8 producer_consumer; ++ u8 sharable; ++ u8 pin_config_type; ++ u32 pin_config_value; ++ u16 pin_table_length; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ u16 *pin_table; ++ u8 *vendor_data; ++} __attribute__((packed)); ++ ++struct acpi_resource_pin_group { ++ u8 revision_id; ++ u8 producer_consumer; ++ u16 pin_table_length; ++ u16 vendor_length; ++ u16 *pin_table; ++ struct acpi_resource_label resource_label; ++ u8 *vendor_data; ++} __attribute__((packed)); ++ ++struct acpi_resource_pin_group_function { ++ u8 revision_id; ++ u8 producer_consumer; ++ u8 sharable; ++ u16 function_number; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ struct acpi_resource_label resource_source_label; ++ u8 *vendor_data; ++} __attribute__((packed)); ++ ++struct acpi_resource_pin_group_config { ++ u8 revision_id; ++ u8 producer_consumer; ++ u8 sharable; ++ u8 pin_config_type; ++ u32 pin_config_value; ++ u16 vendor_length; ++ struct acpi_resource_source resource_source; ++ struct acpi_resource_label resource_source_label; ++ u8 *vendor_data; ++} __attribute__((packed)); ++ ++union acpi_resource_data { ++ struct acpi_resource_irq irq; ++ struct acpi_resource_dma dma; ++ struct acpi_resource_start_dependent start_dpf; ++ struct acpi_resource_io io; ++ struct acpi_resource_fixed_io fixed_io; ++ struct acpi_resource_fixed_dma fixed_dma; ++ struct acpi_resource_vendor vendor; ++ struct acpi_resource_vendor_typed vendor_typed; ++ struct acpi_resource_end_tag end_tag; ++ struct acpi_resource_memory24 memory24; ++ struct acpi_resource_memory32 memory32; ++ struct acpi_resource_fixed_memory32 fixed_memory32; ++ struct acpi_resource_address16 address16; ++ struct acpi_resource_address32 address32; ++ struct acpi_resource_address64 address64; ++ struct acpi_resource_extended_address64 ext_address64; ++ struct acpi_resource_extended_irq extended_irq; ++ struct acpi_resource_generic_register generic_reg; ++ struct acpi_resource_gpio gpio; ++ struct acpi_resource_i2c_serialbus i2c_serial_bus; ++ struct acpi_resource_spi_serialbus spi_serial_bus; ++ struct acpi_resource_uart_serialbus uart_serial_bus; ++ struct acpi_resource_common_serialbus common_serial_bus; ++ struct acpi_resource_pin_function pin_function; ++ struct acpi_resource_pin_config pin_config; ++ struct acpi_resource_pin_group pin_group; ++ struct acpi_resource_pin_group_function pin_group_function; ++ struct acpi_resource_pin_group_config pin_group_config; ++ struct acpi_resource_address address; ++}; ++ ++struct acpi_resource { ++ u32 type; ++ u32 length; ++ union acpi_resource_data data; ++} __attribute__((packed)); ++ ++struct combiner_reg { ++ void *addr; ++ long unsigned int enabled; ++}; ++ ++struct combiner { ++ struct irq_domain *domain; ++ int parent_irq; ++ u32 nirqs; ++ u32 nregs; ++ struct combiner_reg regs[0]; ++}; ++ ++struct get_registers_context { ++ struct device *dev; ++ struct combiner *combiner; ++ int err; ++}; ++ ++struct plat_serial8250_port { ++ long unsigned int iobase; ++ void *membase; ++ resource_size_t mapbase; ++ unsigned int irq; ++ long unsigned int irqflags; ++ unsigned int uartclk; ++ void *private_data; ++ unsigned char regshift; ++ unsigned char iotype; ++ unsigned char hub6; ++ upf_t flags; ++ unsigned int type; ++ unsigned int (*serial_in)(struct uart_port *, int); ++ void (*serial_out)(struct uart_port *, int, int); ++ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); ++ void (*set_ldisc)(struct uart_port *, struct ktermios *); ++ unsigned int (*get_mctrl)(struct uart_port *); ++ int (*handle_irq)(struct uart_port *); ++ void (*pm)(struct uart_port *, unsigned int, unsigned int); ++ void (*handle_break)(struct uart_port *); ++}; ++ ++struct lpc_cycle_para { ++ unsigned int opflags; ++ unsigned int csize; ++}; ++ ++struct hisi_lpc_dev { ++ spinlock_t cycle_lock; ++ void *membase; ++ struct logic_pio_hwaddr *io_host; ++}; ++ ++struct hisi_lpc_acpi_cell { ++ const char *hid; ++ const char *name; ++ void *pdata; ++ size_t pdata_size; ++}; ++ ++struct cs_data { ++ u32 enable_mask; ++ u16 slow_cfg; ++ u16 fast_cfg; ++}; ++ ++struct ebi2_xmem_prop { ++ const char *prop; ++ u32 max; ++ bool slowreg; ++ u16 shift; ++}; ++ ++struct regmap; ++ ++struct vexpress_config_bridge_ops { ++ struct regmap * (*regmap_init)(struct device *, void *); ++ void (*regmap_exit)(struct regmap *, void *); ++}; ++ ++struct vexpress_config_bridge { ++ struct vexpress_config_bridge_ops *ops; ++ void *context; ++}; ++ ++struct regulator; ++ ++enum phy_mode { ++ PHY_MODE_INVALID = 0, ++ PHY_MODE_USB_HOST = 1, ++ PHY_MODE_USB_HOST_LS = 2, ++ PHY_MODE_USB_HOST_FS = 3, ++ PHY_MODE_USB_HOST_HS = 4, ++ PHY_MODE_USB_HOST_SS = 5, ++ PHY_MODE_USB_DEVICE = 6, ++ PHY_MODE_USB_DEVICE_LS = 7, ++ PHY_MODE_USB_DEVICE_FS = 8, ++ PHY_MODE_USB_DEVICE_HS = 9, ++ PHY_MODE_USB_DEVICE_SS = 10, ++ PHY_MODE_USB_OTG = 11, ++ PHY_MODE_SGMII = 12, ++ PHY_MODE_2500SGMII = 13, ++ PHY_MODE_10GKR = 14, ++ PHY_MODE_UFS_HS_A = 15, ++ PHY_MODE_UFS_HS_B = 16, ++}; ++ ++struct phy; ++ ++struct phy_ops { ++ int (*init)(struct phy *); ++ int (*exit)(struct phy *); ++ int (*power_on)(struct phy *); ++ int (*power_off)(struct phy *); ++ int (*set_mode)(struct phy *, enum phy_mode); ++ int (*reset)(struct phy *); ++ int (*calibrate)(struct phy *); ++ struct module *owner; ++}; ++ ++struct phy_attrs { ++ u32 bus_width; ++ enum phy_mode mode; ++}; ++ ++struct phy { ++ struct device dev; ++ int id; ++ const struct phy_ops *ops; ++ struct mutex mutex; ++ int init_count; ++ int power_count; ++ struct phy_attrs attrs; ++ struct regulator *pwr; ++}; ++ ++struct phy_provider { ++ struct device *dev; ++ struct device_node *children; ++ struct module *owner; ++ struct list_head list; ++ struct phy * (*of_xlate)(struct device *, struct of_phandle_args *); ++}; ++ ++struct phy_lookup { ++ struct list_head node; ++ const char *dev_id; ++ const char *con_id; ++ struct phy *phy; ++}; ++ ++enum cmu_type_t { ++ REF_CMU = 0, ++ PHY_CMU = 1, ++}; ++ ++enum clk_type_t { ++ CLK_EXT_DIFF = 0, ++ CLK_INT_DIFF = 1, ++ CLK_INT_SING = 2, ++}; ++ ++enum xgene_phy_mode { ++ MODE_SATA = 0, ++ MODE_SGMII = 1, ++ MODE_PCIE = 2, ++ MODE_USB = 3, ++ MODE_XFI = 4, ++ MODE_MAX = 5, ++}; ++ ++struct xgene_sata_override_param { ++ u32 speed[2]; ++ u32 txspeed[3]; ++ u32 txboostgain[6]; ++ u32 txeyetuning[6]; ++ u32 txeyedirection[6]; ++ u32 txamplitude[6]; ++ u32 txprecursor_cn1[6]; ++ u32 txprecursor_cn2[6]; ++ u32 txpostcursor_cp1[6]; ++}; ++ ++struct xgene_phy_ctx { ++ struct device *dev; ++ struct phy *phy; ++ enum xgene_phy_mode mode; ++ enum clk_type_t clk_type; ++ void *sds_base; ++ struct clk *clk; ++ struct xgene_sata_override_param sata_param; ++}; ++ ++struct pinctrl; ++ ++struct pinctrl_state; ++ ++struct dev_pin_info { ++ struct pinctrl *p; ++ struct pinctrl_state *default_state; ++ struct pinctrl_state *init_state; ++ struct pinctrl_state *sleep_state; ++ struct pinctrl_state *idle_state; ++}; ++ ++struct pinctrl { ++ struct list_head node; ++ struct device *dev; ++ struct list_head states; ++ struct pinctrl_state *state; ++ struct list_head dt_maps; ++ struct kref users; ++}; ++ ++struct pinctrl_state { ++ struct list_head node; ++ const char *name; ++ struct list_head settings; ++}; ++ ++struct pinctrl_pin_desc { ++ unsigned int number; ++ const char *name; ++ void *drv_data; ++}; ++ ++struct gpio_chip; ++ ++struct pinctrl_gpio_range { ++ struct list_head node; ++ const char *name; ++ unsigned int id; ++ unsigned int base; ++ unsigned int pin_base; ++ const unsigned int *pins; ++ unsigned int npins; ++ struct gpio_chip *gc; ++}; ++ ++struct gpio_irq_chip { ++ struct irq_chip *chip; ++ struct irq_domain *domain; ++ const struct irq_domain_ops *domain_ops; ++ irq_flow_handler_t handler; ++ unsigned int default_type; ++ struct lock_class_key *lock_key; ++ struct lock_class_key *request_key; ++ irq_flow_handler_t parent_handler; ++ void *parent_handler_data; ++ unsigned int num_parents; ++ unsigned int parent_irq; ++ unsigned int *parents; ++ unsigned int *map; ++ bool threaded; ++ bool need_valid_mask; ++ long unsigned int *valid_mask; ++ unsigned int first; ++}; ++ ++struct gpio_device; ++ ++struct gpio_chip { ++ const char *label; ++ struct gpio_device *gpiodev; ++ struct device *parent; ++ struct module *owner; ++ int (*request)(struct gpio_chip *, unsigned int); ++ void (*free)(struct gpio_chip *, unsigned int); ++ int (*get_direction)(struct gpio_chip *, unsigned int); ++ int (*direction_input)(struct gpio_chip *, unsigned int); ++ int (*direction_output)(struct gpio_chip *, unsigned int, int); ++ int (*get)(struct gpio_chip *, unsigned int); ++ int (*get_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *); ++ void (*set)(struct gpio_chip *, unsigned int, int); ++ void (*set_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *); ++ int (*set_config)(struct gpio_chip *, unsigned int, long unsigned int); ++ int (*to_irq)(struct gpio_chip *, unsigned int); ++ void (*dbg_show)(struct seq_file *, struct gpio_chip *); ++ int (*init_valid_mask)(struct gpio_chip *); ++ int base; ++ u16 ngpio; ++ const char * const *names; ++ bool can_sleep; ++ long unsigned int (*read_reg)(void *); ++ void (*write_reg)(void *, long unsigned int); ++ bool be_bits; ++ void *reg_dat; ++ void *reg_set; ++ void *reg_clr; ++ void *reg_dir; ++ bool bgpio_dir_inverted; ++ int bgpio_bits; ++ spinlock_t bgpio_lock; ++ long unsigned int bgpio_data; ++ long unsigned int bgpio_dir; ++ struct gpio_irq_chip irq; ++ bool need_valid_mask; ++ long unsigned int *valid_mask; ++ struct device_node *of_node; ++ unsigned int of_gpio_n_cells; ++ int (*of_xlate)(struct gpio_chip *, const struct of_phandle_args *, u32 *); ++}; ++ ++struct pinctrl_dev; ++ ++struct pinctrl_map; ++ ++struct pinctrl_ops { ++ int (*get_groups_count)(struct pinctrl_dev *); ++ const char * (*get_group_name)(struct pinctrl_dev *, unsigned int); ++ int (*get_group_pins)(struct pinctrl_dev *, unsigned int, const unsigned int **, unsigned int *); ++ void (*pin_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); ++ int (*dt_node_to_map)(struct pinctrl_dev *, struct device_node *, struct pinctrl_map **, unsigned int *); ++ void (*dt_free_map)(struct pinctrl_dev *, struct pinctrl_map *, unsigned int); ++}; ++ ++struct pinctrl_desc; ++ ++struct pinctrl_dev { ++ struct list_head node; ++ struct pinctrl_desc *desc; ++ struct radix_tree_root pin_desc_tree; ++ struct list_head gpio_ranges; ++ struct device *dev; ++ struct module *owner; ++ void *driver_data; ++ struct pinctrl *p; ++ struct pinctrl_state *hog_default; ++ struct pinctrl_state *hog_sleep; ++ struct mutex mutex; ++ struct dentry *device_root; ++}; ++ ++enum pinctrl_map_type { ++ PIN_MAP_TYPE_INVALID = 0, ++ PIN_MAP_TYPE_DUMMY_STATE = 1, ++ PIN_MAP_TYPE_MUX_GROUP = 2, ++ PIN_MAP_TYPE_CONFIGS_PIN = 3, ++ PIN_MAP_TYPE_CONFIGS_GROUP = 4, ++}; ++ ++struct pinctrl_map_mux { ++ const char *group; ++ const char *function; ++}; ++ ++struct pinctrl_map_configs { ++ const char *group_or_pin; ++ long unsigned int *configs; ++ unsigned int num_configs; ++}; ++ ++struct pinctrl_map { ++ const char *dev_name; ++ const char *name; ++ enum pinctrl_map_type type; ++ const char *ctrl_dev_name; ++ union { ++ struct pinctrl_map_mux mux; ++ struct pinctrl_map_configs configs; ++ } data; ++}; ++ ++struct pinmux_ops; ++ ++struct pinconf_ops; ++ ++struct pinconf_generic_params; ++ ++struct pin_config_item; ++ ++struct pinctrl_desc { ++ const char *name; ++ const struct pinctrl_pin_desc *pins; ++ unsigned int npins; ++ const struct pinctrl_ops *pctlops; ++ const struct pinmux_ops *pmxops; ++ const struct pinconf_ops *confops; ++ struct module *owner; ++ unsigned int num_custom_params; ++ const struct pinconf_generic_params *custom_params; ++ const struct pin_config_item *custom_conf_items; ++}; ++ ++struct pinmux_ops { ++ int (*request)(struct pinctrl_dev *, unsigned int); ++ int (*free)(struct pinctrl_dev *, unsigned int); ++ int (*get_functions_count)(struct pinctrl_dev *); ++ const char * (*get_function_name)(struct pinctrl_dev *, unsigned int); ++ int (*get_function_groups)(struct pinctrl_dev *, unsigned int, const char * const **, unsigned int *); ++ int (*set_mux)(struct pinctrl_dev *, unsigned int, unsigned int); ++ int (*gpio_request_enable)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); ++ void (*gpio_disable_free)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); ++ int (*gpio_set_direction)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int, bool); ++ bool strict; ++}; ++ ++struct pinconf_ops { ++ bool is_generic; ++ int (*pin_config_get)(struct pinctrl_dev *, unsigned int, long unsigned int *); ++ int (*pin_config_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int); ++ int (*pin_config_group_get)(struct pinctrl_dev *, unsigned int, long unsigned int *); ++ int (*pin_config_group_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int); ++ int (*pin_config_dbg_parse_modify)(struct pinctrl_dev *, const char *, long unsigned int *); ++ void (*pin_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); ++ void (*pin_config_group_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); ++ void (*pin_config_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, long unsigned int); ++}; ++ ++enum pin_config_param { ++ PIN_CONFIG_BIAS_BUS_HOLD = 0, ++ PIN_CONFIG_BIAS_DISABLE = 1, ++ PIN_CONFIG_BIAS_HIGH_IMPEDANCE = 2, ++ PIN_CONFIG_BIAS_PULL_DOWN = 3, ++ PIN_CONFIG_BIAS_PULL_PIN_DEFAULT = 4, ++ PIN_CONFIG_BIAS_PULL_UP = 5, ++ PIN_CONFIG_DRIVE_OPEN_DRAIN = 6, ++ PIN_CONFIG_DRIVE_OPEN_SOURCE = 7, ++ PIN_CONFIG_DRIVE_PUSH_PULL = 8, ++ PIN_CONFIG_DRIVE_STRENGTH = 9, ++ PIN_CONFIG_INPUT_DEBOUNCE = 10, ++ PIN_CONFIG_INPUT_ENABLE = 11, ++ PIN_CONFIG_INPUT_SCHMITT = 12, ++ PIN_CONFIG_INPUT_SCHMITT_ENABLE = 13, ++ PIN_CONFIG_LOW_POWER_MODE = 14, ++ PIN_CONFIG_OUTPUT_ENABLE = 15, ++ PIN_CONFIG_OUTPUT = 16, ++ PIN_CONFIG_POWER_SOURCE = 17, ++ PIN_CONFIG_SLEEP_HARDWARE_STATE = 18, ++ PIN_CONFIG_SLEW_RATE = 19, ++ PIN_CONFIG_SKEW_DELAY = 20, ++ PIN_CONFIG_PERSIST_STATE = 21, ++ PIN_CONFIG_END = 127, ++ PIN_CONFIG_MAX = 255, ++}; ++ ++struct pinconf_generic_params { ++ const char * const property; ++ enum pin_config_param param; ++ u32 default_value; ++}; ++ ++struct pin_config_item { ++ const enum pin_config_param param; ++ const char * const display; ++ const char * const format; ++ bool has_arg; ++}; ++ ++struct pinctrl_setting_mux { ++ unsigned int group; ++ unsigned int func; ++}; ++ ++struct pinctrl_setting_configs { ++ unsigned int group_or_pin; ++ long unsigned int *configs; ++ unsigned int num_configs; ++}; ++ ++struct pinctrl_setting { ++ struct list_head node; ++ enum pinctrl_map_type type; ++ struct pinctrl_dev *pctldev; ++ const char *dev_name; ++ union { ++ struct pinctrl_setting_mux mux; ++ struct pinctrl_setting_configs configs; ++ } data; ++}; ++ ++struct pin_desc { ++ struct pinctrl_dev *pctldev; ++ const char *name; ++ bool dynamic_name; ++ void *drv_data; ++ unsigned int mux_usecount; ++ const char *mux_owner; ++ const struct pinctrl_setting_mux *mux_setting; ++ const char *gpio_owner; ++}; ++ ++struct pinctrl_maps { ++ struct list_head node; ++ const struct pinctrl_map *maps; ++ unsigned int num_maps; ++}; ++ ++struct pctldev; ++ ++struct dbg_cfg { ++ enum pinctrl_map_type map_type; ++ char dev_name[16]; ++ char state_name[16]; ++ char pin_name[16]; ++}; ++ ++struct pinctrl_dt_map { ++ struct list_head node; ++ struct pinctrl_dev *pctldev; ++ struct pinctrl_map *map; ++ unsigned int num_maps; ++}; ++ ++struct msm_function { ++ const char *name; ++ const char * const *groups; ++ unsigned int ngroups; ++}; ++ ++struct msm_pingroup { ++ const char *name; ++ const unsigned int *pins; ++ unsigned int npins; ++ unsigned int *funcs; ++ unsigned int nfuncs; ++ u32 ctl_reg; ++ u32 io_reg; ++ u32 intr_cfg_reg; ++ u32 intr_status_reg; ++ u32 intr_target_reg; ++ unsigned int mux_bit: 5; ++ unsigned int pull_bit: 5; ++ unsigned int drv_bit: 5; ++ unsigned int oe_bit: 5; ++ unsigned int in_bit: 5; ++ unsigned int out_bit: 5; ++ char: 2; ++ unsigned int intr_enable_bit: 5; ++ unsigned int intr_status_bit: 5; ++ unsigned int intr_ack_high: 1; ++ unsigned int intr_target_bit: 5; ++ unsigned int intr_target_kpss_val: 5; ++ unsigned int intr_raw_status_bit: 5; ++ unsigned int intr_polarity_bit: 5; ++ char: 1; ++ unsigned int intr_detection_bit: 5; ++ unsigned int intr_detection_width: 5; ++}; ++ ++struct msm_pinctrl_soc_data { ++ const struct pinctrl_pin_desc *pins; ++ unsigned int npins; ++ const struct msm_function *functions; ++ unsigned int nfunctions; ++ const struct msm_pingroup *groups; ++ unsigned int ngroups; ++ unsigned int ngpios; ++ bool pull_no_keeper; ++}; ++ ++struct msm_pinctrl { ++ struct device *dev; ++ struct pinctrl_dev *pctrl; ++ struct gpio_chip chip; ++ struct pinctrl_desc desc; ++ struct notifier_block restart_nb; ++ struct irq_chip irq_chip; ++ int irq; ++ raw_spinlock_t lock; ++ long unsigned int dual_edge_irqs[5]; ++ long unsigned int enabled_irqs[5]; ++ const struct msm_pinctrl_soc_data *soc; ++ void *regs; ++}; ++ ++struct gpio_desc; ++ ++struct gpio_device { ++ int id; ++ struct device dev; ++ struct cdev chrdev; ++ struct device *mockdev; ++ struct module *owner; ++ struct gpio_chip *chip; ++ struct gpio_desc *descs; ++ int base; ++ u16 ngpio; ++ const char *label; ++ void *data; ++ struct list_head list; ++ struct list_head pin_ranges; ++}; ++ ++struct gpio_descs { ++ unsigned int ndescs; ++ struct gpio_desc *desc[0]; ++}; ++ ++struct gpio_desc { ++ struct gpio_device *gdev; ++ long unsigned int flags; ++ const char *label; ++ const char *name; ++}; ++ ++enum gpiod_flags { ++ GPIOD_ASIS = 0, ++ GPIOD_IN = 1, ++ GPIOD_OUT_LOW = 3, ++ GPIOD_OUT_HIGH = 7, ++ GPIOD_OUT_LOW_OPEN_DRAIN = 11, ++ GPIOD_OUT_HIGH_OPEN_DRAIN = 15, ++}; ++ ++struct pinctrl_dev___2; ++ ++struct gpio_pin_range { ++ struct list_head node; ++ struct pinctrl_dev___2 *pctldev; ++ struct pinctrl_gpio_range range; ++}; ++ ++enum of_gpio_flags { ++ OF_GPIO_ACTIVE_LOW = 1, ++ OF_GPIO_SINGLE_ENDED = 2, ++ OF_GPIO_OPEN_DRAIN = 4, ++ OF_GPIO_TRANSITORY = 8, ++}; ++ ++enum gpio_lookup_flags { ++ GPIO_ACTIVE_HIGH = 0, ++ GPIO_ACTIVE_LOW = 1, ++ GPIO_OPEN_DRAIN = 2, ++ GPIO_OPEN_SOURCE = 4, ++ GPIO_PERSISTENT = 0, ++ GPIO_TRANSITORY = 8, ++}; ++ ++struct gpiod_lookup { ++ const char *chip_label; ++ u16 chip_hwnum; ++ const char *con_id; ++ unsigned int idx; ++ enum gpio_lookup_flags flags; ++}; ++ ++struct gpiod_lookup_table { ++ struct list_head list; ++ const char *dev_id; ++ struct gpiod_lookup table[0]; ++}; ++ ++struct gpiod_hog { ++ struct list_head list; ++ const char *chip_label; ++ u16 chip_hwnum; ++ const char *line_name; ++ enum gpio_lookup_flags lflags; ++ int dflags; ++}; ++ ++struct gpiochip_info { ++ char name[32]; ++ char label[32]; ++ __u32 lines; ++}; ++ ++struct gpioline_info { ++ __u32 line_offset; ++ __u32 flags; ++ char name[32]; ++ char consumer[32]; ++}; ++ ++struct gpiohandle_request { ++ __u32 lineoffsets[64]; ++ __u32 flags; ++ __u8 default_values[64]; ++ char consumer_label[32]; ++ __u32 lines; ++ int fd; ++}; ++ ++struct gpiohandle_data { ++ __u8 values[64]; ++}; ++ ++struct gpioevent_request { ++ __u32 lineoffset; ++ __u32 handleflags; ++ __u32 eventflags; ++ char consumer_label[32]; ++ int fd; ++}; ++ ++struct gpioevent_data { ++ __u64 timestamp; ++ __u32 id; ++}; ++ ++struct acpi_gpio_info { ++ struct acpi_device *adev; ++ enum gpiod_flags flags; ++ bool gpioint; ++ int polarity; ++ int triggering; ++ unsigned int quirks; ++}; ++ ++struct linehandle_state { ++ struct gpio_device *gdev; ++ const char *label; ++ struct gpio_desc *descs[64]; ++ u32 numdescs; ++}; ++ ++struct lineevent_state { ++ struct gpio_device *gdev; ++ const char *label; ++ struct gpio_desc *desc; ++ u32 eflags; ++ int irq; ++ wait_queue_head_t wait; ++ struct { ++ union { ++ struct __kfifo kfifo; ++ struct gpioevent_data *type; ++ const struct gpioevent_data *const_type; ++ char (*rectype)[0]; ++ struct gpioevent_data *ptr; ++ const struct gpioevent_data *ptr_const; ++ }; ++ struct gpioevent_data buf[16]; ++ } events; ++ struct mutex read_lock; ++ u64 timestamp; ++}; ++ ++struct gpio { ++ unsigned int gpio; ++ long unsigned int flags; ++ const char *label; ++}; ++ ++struct of_mm_gpio_chip { ++ struct gpio_chip gc; ++ void (*save_regs)(struct of_mm_gpio_chip *); ++ void *regs; ++}; ++ ++struct class_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct class *, struct class_attribute *, char *); ++ ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t); ++}; ++ ++struct gpiod_data { ++ struct gpio_desc *desc; ++ struct mutex mutex; ++ struct kernfs_node *value_kn; ++ int irq; ++ unsigned char irq_flags; ++ bool direction_can_change; ++}; ++ ++enum dmi_field { ++ DMI_NONE = 0, ++ DMI_BIOS_VENDOR = 1, ++ DMI_BIOS_VERSION = 2, ++ DMI_BIOS_DATE = 3, ++ DMI_SYS_VENDOR = 4, ++ DMI_PRODUCT_NAME = 5, ++ DMI_PRODUCT_VERSION = 6, ++ DMI_PRODUCT_SERIAL = 7, ++ DMI_PRODUCT_UUID = 8, ++ DMI_PRODUCT_SKU = 9, ++ DMI_PRODUCT_FAMILY = 10, ++ DMI_BOARD_VENDOR = 11, ++ DMI_BOARD_NAME = 12, ++ DMI_BOARD_VERSION = 13, ++ DMI_BOARD_SERIAL = 14, ++ DMI_BOARD_ASSET_TAG = 15, ++ DMI_CHASSIS_VENDOR = 16, ++ DMI_CHASSIS_TYPE = 17, ++ DMI_CHASSIS_VERSION = 18, ++ DMI_CHASSIS_SERIAL = 19, ++ DMI_CHASSIS_ASSET_TAG = 20, ++ DMI_STRING_MAX = 21, ++ DMI_OEM_STRING = 22, ++}; ++ ++struct dmi_strmatch { ++ unsigned char slot: 7; ++ unsigned char exact_match: 1; ++ char substr[79]; ++}; ++ ++struct dmi_system_id { ++ int (*callback)(const struct dmi_system_id *); ++ const char *ident; ++ struct dmi_strmatch matches[4]; ++ void *driver_data; ++}; ++ ++typedef u64 acpi_physical_address; ++ ++typedef u8 acpi_adr_space_type; ++ ++struct acpi_connection_info { ++ u8 *connection; ++ u16 length; ++ u8 access_length; ++}; ++ ++struct acpi_gpio_event { ++ struct list_head node; ++ acpi_handle handle; ++ irq_handler_t handler; ++ unsigned int pin; ++ unsigned int irq; ++ long unsigned int irqflags; ++ bool irq_is_wake; ++ bool irq_requested; ++ struct gpio_desc *desc; ++}; ++ ++struct acpi_gpio_connection { ++ struct list_head node; ++ unsigned int pin; ++ struct gpio_desc *desc; ++}; ++ ++struct acpi_gpio_chip { ++ struct acpi_connection_info conn_info; ++ struct list_head conns; ++ struct mutex conn_lock; ++ struct gpio_chip *chip; ++ struct list_head events; ++ struct list_head deferred_req_irqs_list_entry; ++}; ++ ++struct acpi_gpio_lookup { ++ struct acpi_gpio_info info; ++ int index; ++ int pin_index; ++ bool active_low; ++ struct gpio_desc *desc; ++ int n; ++}; ++ ++struct bgpio_pdata { ++ const char *label; ++ int base; ++ int ngpio; ++}; ++ ++struct dwapb_port_property { ++ struct fwnode_handle *fwnode; ++ unsigned int idx; ++ unsigned int ngpio; ++ unsigned int gpio_base; ++ int irq[32]; ++ bool has_irq; ++ bool irq_shared; ++}; ++ ++struct dwapb_platform_data { ++ struct dwapb_port_property *properties; ++ unsigned int nports; ++}; ++ ++struct dwapb_context { ++ u32 data; ++ u32 dir; ++ u32 ext; ++ u32 int_en; ++ u32 int_mask; ++ u32 int_type; ++ u32 int_pol; ++ u32 int_deb; ++ u32 wake_en; ++}; ++ ++struct dwapb_gpio; ++ ++struct dwapb_gpio_port { ++ struct gpio_chip gc; ++ bool is_registered; ++ struct dwapb_gpio *gpio; ++ struct dwapb_context *ctx; ++ unsigned int idx; ++}; ++ ++struct reset_control; ++ ++struct dwapb_gpio { ++ struct device *dev; ++ void *regs; ++ struct dwapb_gpio_port *ports; ++ unsigned int nr_ports; ++ struct irq_domain *domain; ++ unsigned int flags; ++ struct reset_control *rst; ++ struct clk *clk; ++}; ++ ++struct amba_id { ++ unsigned int id; ++ unsigned int mask; ++ void *data; ++}; ++ ++struct amba_device { ++ struct device dev; ++ struct resource res; ++ struct clk *pclk; ++ unsigned int periphid; ++ unsigned int irq[9]; ++ char *driver_override; ++}; ++ ++struct amba_driver { ++ struct device_driver drv; ++ int (*probe)(struct amba_device *, const struct amba_id *); ++ int (*remove)(struct amba_device *); ++ void (*shutdown)(struct amba_device *); ++ const struct amba_id *id_table; ++}; ++ ++struct pl061_context_save_regs { ++ u8 gpio_data; ++ u8 gpio_dir; ++ u8 gpio_is; ++ u8 gpio_ibe; ++ u8 gpio_iev; ++ u8 gpio_ie; ++}; ++ ++struct pl061 { ++ raw_spinlock_t lock; ++ void *base; ++ struct gpio_chip gc; ++ struct irq_chip irq_chip; ++ int parent_irq; ++ struct pl061_context_save_regs csave_regs; ++}; ++ ++struct xgene_gpio { ++ struct gpio_chip chip; ++ void *base; ++ spinlock_t lock; ++ u32 set_dr_val[3]; ++}; ++ ++enum pwm_polarity { ++ PWM_POLARITY_NORMAL = 0, ++ PWM_POLARITY_INVERSED = 1, ++}; ++ ++struct pwm_args { ++ unsigned int period; ++ enum pwm_polarity polarity; ++}; ++ ++enum { ++ PWMF_REQUESTED = 1, ++ PWMF_EXPORTED = 2, ++}; ++ ++struct pwm_state { ++ unsigned int period; ++ unsigned int duty_cycle; ++ enum pwm_polarity polarity; ++ bool enabled; ++}; ++ ++struct pwm_chip; ++ ++struct pwm_device { ++ const char *label; ++ long unsigned int flags; ++ unsigned int hwpwm; ++ unsigned int pwm; ++ struct pwm_chip *chip; ++ void *chip_data; ++ struct pwm_args args; ++ struct pwm_state state; ++}; ++ ++struct pwm_ops; ++ ++struct pwm_chip { ++ struct device *dev; ++ struct list_head list; ++ const struct pwm_ops *ops; ++ int base; ++ unsigned int npwm; ++ struct pwm_device *pwms; ++ struct pwm_device * (*of_xlate)(struct pwm_chip *, const struct of_phandle_args *); ++ unsigned int of_pwm_n_cells; ++}; ++ ++struct pwm_capture; ++ ++struct pwm_ops { ++ int (*request)(struct pwm_chip *, struct pwm_device *); ++ void (*free)(struct pwm_chip *, struct pwm_device *); ++ int (*config)(struct pwm_chip *, struct pwm_device *, int, int); ++ int (*set_polarity)(struct pwm_chip *, struct pwm_device *, enum pwm_polarity); ++ int (*capture)(struct pwm_chip *, struct pwm_device *, struct pwm_capture *, long unsigned int); ++ int (*enable)(struct pwm_chip *, struct pwm_device *); ++ void (*disable)(struct pwm_chip *, struct pwm_device *); ++ int (*apply)(struct pwm_chip *, struct pwm_device *, struct pwm_state *); ++ void (*get_state)(struct pwm_chip *, struct pwm_device *, struct pwm_state *); ++ void (*dbg_show)(struct pwm_chip *, struct seq_file *); ++ struct module *owner; ++}; ++ ++struct pwm_capture { ++ unsigned int period; ++ unsigned int duty_cycle; ++}; ++ ++struct pwm_lookup { ++ struct list_head list; ++ const char *provider; ++ unsigned int index; ++ const char *dev_id; ++ const char *con_id; ++ unsigned int period; ++ enum pwm_polarity polarity; ++ const char *module; ++}; ++ ++struct pwm_export { ++ struct device child; ++ struct pwm_device *pwm; ++ struct mutex lock; ++}; ++ ++struct pci_sriov { ++ int pos; ++ int nres; ++ u32 cap; ++ u16 ctrl; ++ u16 total_VFs; ++ u16 initial_VFs; ++ u16 num_VFs; ++ u16 offset; ++ u16 stride; ++ u16 vf_device; ++ u32 pgsz; ++ u8 link; ++ u8 max_VF_buses; ++ u16 driver_max_VFs; ++ struct pci_dev *dev; ++ struct pci_dev *self; ++ u32 class; ++ u8 hdr_type; ++ u16 subsystem_vendor; ++ u16 subsystem_device; ++ resource_size_t barsz[6]; ++ bool drivers_autoprobe; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct pci_bus_resource { ++ struct list_head list; ++ struct resource *res; ++ unsigned int flags; ++}; ++ ++typedef u64 pci_bus_addr_t; ++ ++struct pci_bus_region { ++ pci_bus_addr_t start; ++ pci_bus_addr_t end; ++}; ++ ++enum pci_fixup_pass { ++ pci_fixup_early = 0, ++ pci_fixup_header = 1, ++ pci_fixup_final = 2, ++ pci_fixup_enable = 3, ++ pci_fixup_resume = 4, ++ pci_fixup_suspend = 5, ++ pci_fixup_resume_early = 6, ++ pci_fixup_suspend_late = 7, ++}; ++ ++struct hotplug_slot_ops; ++ ++struct hotplug_slot_info; ++ ++struct hotplug_slot { ++ struct hotplug_slot_ops *ops; ++ struct hotplug_slot_info *info; ++ void *private; ++ struct list_head slot_list; ++ struct pci_slot *pci_slot; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++enum pci_dev_flags { ++ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, ++ PCI_DEV_FLAGS_NO_D3 = 2, ++ PCI_DEV_FLAGS_ASSIGNED = 4, ++ PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, ++ PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, ++ PCI_DEV_FLAGS_NO_BUS_RESET = 64, ++ PCI_DEV_FLAGS_NO_PM_RESET = 128, ++ PCI_DEV_FLAGS_VPD_REF_F0 = 256, ++ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, ++ PCI_DEV_FLAGS_NO_FLR_RESET = 1024, ++ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, ++}; ++ ++enum pci_bus_flags { ++ PCI_BUS_FLAGS_NO_MSI = 1, ++ PCI_BUS_FLAGS_NO_MMRBC = 2, ++ PCI_BUS_FLAGS_NO_AERSID = 4, ++ PCI_BUS_FLAGS_NO_EXTCFG = 8, ++}; ++ ++enum pci_bus_speed { ++ PCI_SPEED_33MHz = 0, ++ PCI_SPEED_66MHz = 1, ++ PCI_SPEED_66MHz_PCIX = 2, ++ PCI_SPEED_100MHz_PCIX = 3, ++ PCI_SPEED_133MHz_PCIX = 4, ++ PCI_SPEED_66MHz_PCIX_ECC = 5, ++ PCI_SPEED_100MHz_PCIX_ECC = 6, ++ PCI_SPEED_133MHz_PCIX_ECC = 7, ++ PCI_SPEED_66MHz_PCIX_266 = 9, ++ PCI_SPEED_100MHz_PCIX_266 = 10, ++ PCI_SPEED_133MHz_PCIX_266 = 11, ++ AGP_UNKNOWN = 12, ++ AGP_1X = 13, ++ AGP_2X = 14, ++ AGP_4X = 15, ++ AGP_8X = 16, ++ PCI_SPEED_66MHz_PCIX_533 = 17, ++ PCI_SPEED_100MHz_PCIX_533 = 18, ++ PCI_SPEED_133MHz_PCIX_533 = 19, ++ PCIE_SPEED_2_5GT = 20, ++ PCIE_SPEED_5_0GT = 21, ++ PCIE_SPEED_8_0GT = 22, ++ PCIE_SPEED_16_0GT = 23, ++ PCI_SPEED_UNKNOWN = 255, ++}; ++ ++enum { ++ PCI_REASSIGN_ALL_RSRC = 1, ++ PCI_REASSIGN_ALL_BUS = 2, ++ PCI_PROBE_ONLY = 4, ++ PCI_CAN_SKIP_ISA_ALIGN = 8, ++ PCI_ENABLE_PROC_DOMAINS = 16, ++ PCI_COMPAT_DOMAIN_0 = 32, ++ PCI_SCAN_ALL_PCIE_DEVS = 64, ++}; ++ ++struct hotplug_slot_ops { ++ struct module *owner; ++ const char *mod_name; ++ int (*enable_slot)(struct hotplug_slot *); ++ int (*disable_slot)(struct hotplug_slot *); ++ int (*set_attention_status)(struct hotplug_slot *, u8); ++ int (*hardware_test)(struct hotplug_slot *, u32); ++ int (*get_power_status)(struct hotplug_slot *, u8 *); ++ int (*get_attention_status)(struct hotplug_slot *, u8 *); ++ int (*get_latch_status)(struct hotplug_slot *, u8 *); ++ int (*get_adapter_status)(struct hotplug_slot *, u8 *); ++ int (*reset_slot)(struct hotplug_slot *, int); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int kabi_reserved7; ++ long unsigned int kabi_reserved8; ++}; ++ ++struct hotplug_slot_info { ++ u8 power_status; ++ u8 attention_status; ++ u8 latch_status; ++ u8 adapter_status; ++}; ++ ++struct hpp_type0 { ++ u32 revision; ++ u8 cache_line_size; ++ u8 latency_timer; ++ u8 enable_serr; ++ u8 enable_perr; ++}; ++ ++struct hpp_type1 { ++ u32 revision; ++ u8 max_mem_read; ++ u8 avg_max_split; ++ u16 tot_max_split; ++}; ++ ++struct hpp_type2 { ++ u32 revision; ++ u32 unc_err_mask_and; ++ u32 unc_err_mask_or; ++ u32 unc_err_sever_and; ++ u32 unc_err_sever_or; ++ u32 cor_err_mask_and; ++ u32 cor_err_mask_or; ++ u32 adv_err_cap_and; ++ u32 adv_err_cap_or; ++ u16 pci_exp_devctl_and; ++ u16 pci_exp_devctl_or; ++ u16 pci_exp_lnkctl_and; ++ u16 pci_exp_lnkctl_or; ++ u32 sec_unc_err_sever_and; ++ u32 sec_unc_err_sever_or; ++ u32 sec_unc_err_mask_and; ++ u32 sec_unc_err_mask_or; ++}; ++ ++struct hotplug_params { ++ struct hpp_type0 *t0; ++ struct hpp_type1 *t1; ++ struct hpp_type2 *t2; ++ struct hpp_type0 type0_data; ++ struct hpp_type1 type1_data; ++ struct hpp_type2 type2_data; ++}; ++ ++enum pci_bar_type { ++ pci_bar_unknown = 0, ++ pci_bar_io = 1, ++ pci_bar_mem32 = 2, ++ pci_bar_mem64 = 3, ++}; ++ ++struct pci_domain_busn_res { ++ struct list_head list; ++ struct resource res; ++ int domain_nr; ++}; ++ ++struct skip_bus_num { ++ char module_name[32]; ++ char label[4]; ++ int bus_num; ++ int dev_num; ++ int skip; ++}; ++ ++struct bus_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct bus_type *, char *); ++ ssize_t (*store)(struct bus_type *, const char *, size_t); ++}; ++ ++enum pcie_reset_state { ++ pcie_deassert_reset = 1, ++ pcie_warm_reset = 2, ++ pcie_hot_reset = 3, ++}; ++ ++enum pcie_link_width { ++ PCIE_LNK_WIDTH_RESRV = 0, ++ PCIE_LNK_X1 = 1, ++ PCIE_LNK_X2 = 2, ++ PCIE_LNK_X4 = 4, ++ PCIE_LNK_X8 = 8, ++ PCIE_LNK_X12 = 12, ++ PCIE_LNK_X16 = 16, ++ PCIE_LNK_X32 = 32, ++ PCIE_LNK_WIDTH_UNKNOWN = 255, ++}; ++ ++struct pci_cap_saved_data { ++ u16 cap_nr; ++ bool cap_extended; ++ unsigned int size; ++ u32 data[0]; ++}; ++ ++struct pci_cap_saved_state { ++ struct hlist_node next; ++ struct pci_cap_saved_data cap; ++}; ++ ++typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); ++ ++struct pci_platform_pm_ops { ++ bool (*is_manageable)(struct pci_dev *); ++ int (*set_state)(struct pci_dev *, pci_power_t); ++ pci_power_t (*get_state)(struct pci_dev *); ++ void (*refresh_state)(struct pci_dev *); ++ pci_power_t (*choose_state)(struct pci_dev *); ++ int (*set_wakeup)(struct pci_dev *, bool); ++ bool (*need_resume)(struct pci_dev *); ++}; ++ ++struct pci_pme_device { ++ struct list_head list; ++ struct pci_dev *dev; ++}; ++ ++struct pci_saved_state { ++ u32 config_space[16]; ++ struct pci_cap_saved_data cap[0]; ++}; ++ ++struct pci_devres { ++ unsigned int enabled: 1; ++ unsigned int pinned: 1; ++ unsigned int orig_intx: 1; ++ unsigned int restore_intx: 1; ++ unsigned int mwi: 1; ++ u32 region_mask; ++}; ++ ++struct driver_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct device_driver *, char *); ++ ssize_t (*store)(struct device_driver *, const char *, size_t); ++}; ++ ++enum pci_ers_result { ++ PCI_ERS_RESULT_NONE = 1, ++ PCI_ERS_RESULT_CAN_RECOVER = 2, ++ PCI_ERS_RESULT_NEED_RESET = 3, ++ PCI_ERS_RESULT_DISCONNECT = 4, ++ PCI_ERS_RESULT_RECOVERED = 5, ++ PCI_ERS_RESULT_NO_AER_DRIVER = 6, ++}; ++ ++enum dev_dma_attr { ++ DEV_DMA_NOT_SUPPORTED = 0, ++ DEV_DMA_NON_COHERENT = 1, ++ DEV_DMA_COHERENT = 2, ++}; ++ ++struct pcie_device { ++ int irq; ++ struct pci_dev *port; ++ u32 service; ++ void *priv_data; ++ struct device device; ++}; ++ ++struct pcie_port_service_driver { ++ const char *name; ++ int (*probe)(struct pcie_device *); ++ void (*remove)(struct pcie_device *); ++ int (*suspend)(struct pcie_device *); ++ int (*resume_noirq)(struct pcie_device *); ++ int (*resume)(struct pcie_device *); ++ void (*error_resume)(struct pci_dev *); ++ pci_ers_result_t (*reset_link)(struct pci_dev *); ++ int port_type; ++ u32 service; ++ struct device_driver driver; ++}; ++ ++struct pci_dynid { ++ struct list_head node; ++ struct pci_device_id id; ++}; ++ ++struct drv_dev_and_id { ++ struct pci_driver *drv; ++ struct pci_dev *dev; ++ const struct pci_device_id *id; ++}; ++ ++enum pci_mmap_state { ++ pci_mmap_io = 0, ++ pci_mmap_mem = 1, ++}; ++ ++enum pci_mmap_api { ++ PCI_MMAP_SYSFS = 0, ++ PCI_MMAP_PROCFS = 1, ++}; ++ ++enum pci_lost_interrupt_reason { ++ PCI_LOST_IRQ_NO_INFORMATION = 0, ++ PCI_LOST_IRQ_DISABLE_MSI = 1, ++ PCI_LOST_IRQ_DISABLE_MSIX = 2, ++ PCI_LOST_IRQ_DISABLE_ACPI = 3, ++}; ++ ++struct pci_vpd_ops; ++ ++struct pci_vpd { ++ const struct pci_vpd_ops *ops; ++ struct bin_attribute *attr; ++ struct mutex lock; ++ unsigned int len; ++ u16 flag; ++ u8 cap; ++ unsigned int busy: 1; ++ unsigned int valid: 1; ++}; ++ ++struct pci_vpd_ops { ++ ssize_t (*read)(struct pci_dev *, loff_t, size_t, void *); ++ ssize_t (*write)(struct pci_dev *, loff_t, size_t, const void *); ++ int (*set_size)(struct pci_dev *, size_t); ++}; ++ ++struct pci_dev_resource { ++ struct list_head list; ++ struct resource *res; ++ struct pci_dev *dev; ++ resource_size_t start; ++ resource_size_t end; ++ resource_size_t add_size; ++ resource_size_t min_align; ++ long unsigned int flags; ++}; ++ ++enum release_type { ++ leaf_only = 0, ++ whole_subtree = 1, ++}; ++ ++enum enable_type { ++ undefined = 4294967295, ++ user_disabled = 0, ++ auto_disabled = 1, ++ user_enabled = 2, ++ auto_enabled = 3, ++}; ++ ++struct pci_slot_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct pci_slot *, char *); ++ ssize_t (*store)(struct pci_slot *, const char *, size_t); ++}; ++ ++struct of_pci_range_parser { ++ struct device_node *node; ++ const __be32 *range; ++ const __be32 *end; ++ int np; ++ int pna; ++}; ++ ++struct of_pci_range { ++ u32 pci_space; ++ u64 pci_addr; ++ u64 cpu_addr; ++ u64 size; ++ u32 flags; ++}; ++ ++struct pci_fixup { ++ u16 vendor; ++ u16 device; ++ u32 class; ++ unsigned int class_shift; ++ int hook_offset; ++}; ++ ++enum { ++ NVME_REG_CAP = 0, ++ NVME_REG_VS = 8, ++ NVME_REG_INTMS = 12, ++ NVME_REG_INTMC = 16, ++ NVME_REG_CC = 20, ++ NVME_REG_CSTS = 28, ++ NVME_REG_NSSR = 32, ++ NVME_REG_AQA = 36, ++ NVME_REG_ASQ = 40, ++ NVME_REG_ACQ = 48, ++ NVME_REG_CMBLOC = 56, ++ NVME_REG_CMBSZ = 60, ++ NVME_REG_DBS = 4096, ++}; ++ ++enum { ++ NVME_CC_ENABLE = 1, ++ NVME_CC_CSS_NVM = 0, ++ NVME_CC_EN_SHIFT = 0, ++ NVME_CC_CSS_SHIFT = 4, ++ NVME_CC_MPS_SHIFT = 7, ++ NVME_CC_AMS_SHIFT = 11, ++ NVME_CC_SHN_SHIFT = 14, ++ NVME_CC_IOSQES_SHIFT = 16, ++ NVME_CC_IOCQES_SHIFT = 20, ++ NVME_CC_AMS_RR = 0, ++ NVME_CC_AMS_WRRU = 2048, ++ NVME_CC_AMS_VS = 14336, ++ NVME_CC_SHN_NONE = 0, ++ NVME_CC_SHN_NORMAL = 16384, ++ NVME_CC_SHN_ABRUPT = 32768, ++ NVME_CC_SHN_MASK = 49152, ++ NVME_CC_IOSQES = 393216, ++ NVME_CC_IOCQES = 4194304, ++ NVME_CSTS_RDY = 1, ++ NVME_CSTS_CFS = 2, ++ NVME_CSTS_NSSRO = 16, ++ NVME_CSTS_PP = 32, ++ NVME_CSTS_SHST_NORMAL = 0, ++ NVME_CSTS_SHST_OCCUR = 4, ++ NVME_CSTS_SHST_CMPLT = 8, ++ NVME_CSTS_SHST_MASK = 12, ++}; ++ ++enum { ++ SWITCHTEC_GAS_MRPC_OFFSET = 0, ++ SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, ++ SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, ++ SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, ++ SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, ++ SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, ++ SWITCHTEC_GAS_NTB_OFFSET = 65536, ++ SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, ++}; ++ ++struct sys_info_regs { ++ u32 device_id; ++ u32 device_version; ++ u32 firmware_version; ++ u32 reserved1; ++ u32 vendor_table_revision; ++ u32 table_format_version; ++ u32 partition_id; ++ u32 cfg_file_fmt_version; ++ u16 cfg_running; ++ u16 img_running; ++ u32 reserved2[57]; ++ char vendor_id[8]; ++ char product_id[16]; ++ char product_revision[4]; ++ char component_vendor[8]; ++ u16 component_id; ++ u8 component_revision; ++} __attribute__((packed)); ++ ++enum { ++ SWITCHTEC_NTB_REG_INFO_OFFSET = 0, ++ SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, ++ SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, ++}; ++ ++struct nt_partition_info { ++ u32 xlink_enabled; ++ u32 target_part_low; ++ u32 target_part_high; ++ u32 reserved; ++}; ++ ++struct ntb_info_regs { ++ u8 partition_count; ++ u8 partition_id; ++ u16 reserved1; ++ u64 ep_map; ++ u16 requester_id; ++ u16 reserved2; ++ u32 reserved3[4]; ++ struct nt_partition_info ntp_info[48]; ++} __attribute__((packed)); ++ ++struct ntb_ctrl_regs { ++ u32 partition_status; ++ u32 partition_op; ++ u32 partition_ctrl; ++ u32 bar_setup; ++ u32 bar_error; ++ u16 lut_table_entries; ++ u16 lut_table_offset; ++ u32 lut_error; ++ u16 req_id_table_size; ++ u16 req_id_table_offset; ++ u32 req_id_error; ++ u32 reserved1[7]; ++ struct { ++ u32 ctl; ++ u32 win_size; ++ u64 xlate_addr; ++ } bar_entry[6]; ++ u32 reserved2[216]; ++ u32 req_id_table[256]; ++ u32 reserved3[512]; ++ u64 lut_entry[512]; ++}; ++ ++struct pci_dev_reset_methods { ++ u16 vendor; ++ u16 device; ++ int (*reset)(struct pci_dev *, int); ++}; ++ ++struct pci_dev_acs_enabled { ++ u16 vendor; ++ u16 device; ++ int (*acs_enabled)(struct pci_dev *, u16); ++}; ++ ++struct pci_dev_acs_ops { ++ u16 vendor; ++ u16 device; ++ int (*enable_acs)(struct pci_dev *); ++ int (*disable_acs_redir)(struct pci_dev *); ++}; ++ ++struct portdrv_service_data { ++ struct pcie_port_service_driver *drv; ++ struct device *dev; ++ u32 service; ++}; ++ ++typedef int (*pcie_pm_callback_t)(struct pcie_device *); ++ ++struct aer_broadcast_data { ++ enum pci_channel_state state; ++ enum pci_ers_result result; ++}; ++ ++struct aspm_latency { ++ u32 l0s; ++ u32 l1; ++}; ++ ++struct pcie_link_state { ++ struct pci_dev *pdev; ++ struct pci_dev *downstream; ++ struct pcie_link_state *root; ++ struct pcie_link_state *parent; ++ struct list_head sibling; ++ struct list_head children; ++ struct list_head link; ++ u32 aspm_support: 7; ++ u32 aspm_enabled: 7; ++ u32 aspm_capable: 7; ++ u32 aspm_default: 7; ++ char: 4; ++ u32 aspm_disable: 7; ++ u32 clkpm_capable: 1; ++ u32 clkpm_enabled: 1; ++ u32 clkpm_default: 1; ++ struct aspm_latency latency_up; ++ struct aspm_latency latency_dw; ++ struct aspm_latency acceptable[8]; ++ struct { ++ u32 up_cap_ptr; ++ u32 dw_cap_ptr; ++ u32 ctl1; ++ u32 ctl2; ++ } l1ss; ++}; ++ ++struct aspm_register_info { ++ u32 support: 2; ++ u32 enabled: 2; ++ u32 latency_encoding_l0s; ++ u32 latency_encoding_l1; ++ u32 l1ss_cap_ptr; ++ u32 l1ss_cap; ++ u32 l1ss_ctl1; ++ u32 l1ss_ctl2; ++}; ++ ++struct aer_stats { ++ u64 dev_cor_errs[16]; ++ u64 dev_fatal_errs[26]; ++ u64 dev_nonfatal_errs[26]; ++ u64 dev_total_cor_errs; ++ u64 dev_total_fatal_errs; ++ u64 dev_total_nonfatal_errs; ++ u64 rootport_total_cor_errs; ++ u64 rootport_total_fatal_errs; ++ u64 rootport_total_nonfatal_errs; ++}; ++ ++enum acpi_hest_types { ++ ACPI_HEST_TYPE_IA32_CHECK = 0, ++ ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, ++ ACPI_HEST_TYPE_IA32_NMI = 2, ++ ACPI_HEST_TYPE_NOT_USED3 = 3, ++ ACPI_HEST_TYPE_NOT_USED4 = 4, ++ ACPI_HEST_TYPE_NOT_USED5 = 5, ++ ACPI_HEST_TYPE_AER_ROOT_PORT = 6, ++ ACPI_HEST_TYPE_AER_ENDPOINT = 7, ++ ACPI_HEST_TYPE_AER_BRIDGE = 8, ++ ACPI_HEST_TYPE_GENERIC_ERROR = 9, ++ ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10, ++ ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11, ++ ACPI_HEST_TYPE_RESERVED = 12, ++}; ++ ++struct acpi_hest_aer_common { ++ u16 reserved1; ++ u8 flags; ++ u8 enabled; ++ u32 records_to_preallocate; ++ u32 max_sections_per_record; ++ u32 bus; ++ u16 device; ++ u16 function; ++ u16 device_control; ++ u16 reserved2; ++ u32 uncorrectable_mask; ++ u32 uncorrectable_severity; ++ u32 correctable_mask; ++ u32 advanced_capabilities; ++}; ++ ++struct aer_header_log_regs { ++ unsigned int dw0; ++ unsigned int dw1; ++ unsigned int dw2; ++ unsigned int dw3; ++}; ++ ++struct aer_capability_regs { ++ u32 header; ++ u32 uncor_status; ++ u32 uncor_mask; ++ u32 uncor_severity; ++ u32 cor_status; ++ u32 cor_mask; ++ u32 cap_control; ++ struct aer_header_log_regs header_log; ++ u32 root_command; ++ u32 root_status; ++ u16 cor_err_source; ++ u16 uncor_err_source; ++}; ++ ++struct aer_err_info { ++ struct pci_dev *dev[5]; ++ int error_dev_num; ++ unsigned int id: 16; ++ unsigned int severity: 2; ++ unsigned int __pad1: 5; ++ unsigned int multi_error_valid: 1; ++ unsigned int first_error: 5; ++ unsigned int __pad2: 2; ++ unsigned int tlp_header_valid: 1; ++ unsigned int status; ++ unsigned int mask; ++ struct aer_header_log_regs tlp; ++}; ++ ++struct aer_err_source { ++ unsigned int status; ++ unsigned int id; ++}; ++ ++struct aer_rpc { ++ struct pci_dev *rpd; ++ struct work_struct dpc_handler; ++ struct aer_err_source e_sources[100]; ++ struct aer_err_info e_info; ++ short unsigned int prod_idx; ++ short unsigned int cons_idx; ++ int isr; ++ spinlock_t e_lock; ++ struct mutex rpc_mutex; ++}; ++ ++struct aer_hest_parse_info { ++ struct pci_dev *pci_dev; ++ int firmware_first; ++}; ++ ++struct aer_recover_entry { ++ u8 bus; ++ u8 devfn; ++ u16 domain; ++ int severity; ++ struct aer_capability_regs *regs; ++}; ++ ++struct pcie_pme_service_data { ++ spinlock_t lock; ++ struct pcie_device *srv; ++ struct work_struct work; ++ bool noirq; ++}; ++ ++struct dpc_dev { ++ struct pcie_device *dev; ++ u16 cap_pos; ++ bool rp_extensions; ++ u8 rp_log_size; ++}; ++ ++struct controller; ++ ++struct slot { ++ u8 state; ++ struct controller *ctrl; ++ struct hotplug_slot *hotplug_slot; ++ struct delayed_work work; ++ struct mutex lock; ++}; ++ ++struct controller { ++ struct mutex ctrl_lock; ++ struct pcie_device *pcie; ++ struct rw_semaphore reset_lock; ++ struct slot *slot; ++ wait_queue_head_t queue; ++ u32 slot_cap; ++ u16 slot_ctrl; ++ struct task_struct *poll_thread; ++ long unsigned int cmd_started; ++ unsigned int cmd_busy: 1; ++ unsigned int link_active_reporting: 1; ++ unsigned int notification_enabled: 1; ++ unsigned int power_fault_detected; ++ atomic_t pending_events; ++ int request_result; ++ wait_queue_head_t requester; ++}; ++ ++struct controller___2; ++ ++struct hpc_ops; ++ ++struct slot___2 { ++ u8 bus; ++ u8 device; ++ u16 status; ++ u32 number; ++ u8 is_a_board; ++ u8 state; ++ u8 presence_save; ++ u8 pwr_save; ++ struct controller___2 *ctrl; ++ const struct hpc_ops *hpc_ops; ++ struct hotplug_slot *hotplug_slot; ++ struct list_head slot_list; ++ struct delayed_work work; ++ struct mutex lock; ++ struct workqueue_struct *wq; ++ u8 hp_slot; ++}; ++ ++struct controller___2 { ++ struct mutex crit_sect; ++ struct mutex cmd_lock; ++ int num_slots; ++ int slot_num_inc; ++ struct pci_dev *pci_dev; ++ struct list_head slot_list; ++ const struct hpc_ops *hpc_ops; ++ wait_queue_head_t queue; ++ u8 slot_device_offset; ++ u32 pcix_misc2_reg; ++ u32 first_slot; ++ u32 cap_offset; ++ long unsigned int mmio_base; ++ long unsigned int mmio_size; ++ void *creg; ++ struct timer_list poll_timer; ++}; ++ ++struct hpc_ops { ++ int (*power_on_slot)(struct slot___2 *); ++ int (*slot_enable)(struct slot___2 *); ++ int (*slot_disable)(struct slot___2 *); ++ int (*set_bus_speed_mode)(struct slot___2 *, enum pci_bus_speed); ++ int (*get_power_status)(struct slot___2 *, u8 *); ++ int (*get_attention_status)(struct slot___2 *, u8 *); ++ int (*set_attention_status)(struct slot___2 *, u8); ++ int (*get_latch_status)(struct slot___2 *, u8 *); ++ int (*get_adapter_status)(struct slot___2 *, u8 *); ++ int (*get_adapter_speed)(struct slot___2 *, enum pci_bus_speed *); ++ int (*get_mode1_ECC_cap)(struct slot___2 *, u8 *); ++ int (*get_prog_int)(struct slot___2 *, u8 *); ++ int (*query_power_fault)(struct slot___2 *); ++ void (*green_led_on)(struct slot___2 *); ++ void (*green_led_off)(struct slot___2 *); ++ void (*green_led_blink)(struct slot___2 *); ++ void (*release_ctlr)(struct controller___2 *); ++ int (*check_cmd_status)(struct controller___2 *); ++}; ++ ++struct event_info { ++ u32 event_type; ++ struct slot___2 *p_slot; ++ struct work_struct work; ++}; ++ ++struct pushbutton_work_info { ++ struct slot___2 *p_slot; ++ struct work_struct work; ++}; ++ ++enum ctrl_offsets { ++ BASE_OFFSET = 0, ++ SLOT_AVAIL1 = 4, ++ SLOT_AVAIL2 = 8, ++ SLOT_CONFIG = 12, ++ SEC_BUS_CONFIG = 16, ++ MSI_CTRL = 18, ++ PROG_INTERFACE = 19, ++ CMD = 20, ++ CMD_STATUS = 22, ++ INTR_LOC = 24, ++ SERR_LOC = 28, ++ SERR_INTR_ENABLE = 32, ++ SLOT1 = 36, ++}; ++ ++struct acpiphp_slot; ++ ++struct slot___3 { ++ struct hotplug_slot *hotplug_slot; ++ struct acpiphp_slot *acpi_slot; ++ struct hotplug_slot_info info; ++ unsigned int sun; ++}; ++ ++struct acpiphp_slot { ++ struct list_head node; ++ struct pci_bus *bus; ++ struct list_head funcs; ++ struct slot___3 *slot; ++ u8 device; ++ u32 flags; ++}; ++ ++struct acpiphp_attention_info { ++ int (*set_attn)(struct hotplug_slot *, u8); ++ int (*get_attn)(struct hotplug_slot *, u8 *); ++ struct module *owner; ++}; ++ ++struct acpi_object_list { ++ u32 count; ++ union acpi_object *pointer; ++}; ++ ++struct acpiphp_context; ++ ++struct acpiphp_bridge { ++ struct list_head list; ++ struct list_head slots; ++ struct kref ref; ++ struct acpiphp_context *context; ++ int nr_slots; ++ struct pci_bus *pci_bus; ++ struct pci_dev *pci_dev; ++ bool is_going_away; ++}; ++ ++struct acpiphp_func { ++ struct acpiphp_bridge *parent; ++ struct acpiphp_slot *slot; ++ struct list_head sibling; ++ u8 function; ++ u32 flags; ++}; ++ ++struct acpiphp_context { ++ struct acpi_hotplug_context hp; ++ struct acpiphp_func func; ++ struct acpiphp_bridge *bridge; ++ unsigned int refcount; ++}; ++ ++struct acpiphp_root_context { ++ struct acpi_hotplug_context hp; ++ struct acpiphp_bridge *root_bridge; ++}; ++ ++struct msix_entry { ++ u32 vector; ++ u16 entry; ++}; ++ ++struct acpi_bus_type { ++ struct list_head list; ++ const char *name; ++ bool (*match)(struct device *); ++ struct acpi_device * (*find_companion)(struct device *); ++ void (*setup)(struct device *); ++ void (*cleanup)(struct device *); ++}; ++ ++enum pm_qos_flags_status { ++ PM_QOS_FLAGS_UNDEFINED = 4294967295, ++ PM_QOS_FLAGS_NONE = 0, ++ PM_QOS_FLAGS_SOME = 1, ++ PM_QOS_FLAGS_ALL = 2, ++}; ++ ++enum dmi_device_type { ++ DMI_DEV_TYPE_ANY = 0, ++ DMI_DEV_TYPE_OTHER = 1, ++ DMI_DEV_TYPE_UNKNOWN = 2, ++ DMI_DEV_TYPE_VIDEO = 3, ++ DMI_DEV_TYPE_SCSI = 4, ++ DMI_DEV_TYPE_ETHERNET = 5, ++ DMI_DEV_TYPE_TOKENRING = 6, ++ DMI_DEV_TYPE_SOUND = 7, ++ DMI_DEV_TYPE_PATA = 8, ++ DMI_DEV_TYPE_SATA = 9, ++ DMI_DEV_TYPE_SAS = 10, ++ DMI_DEV_TYPE_IPMI = 4294967295, ++ DMI_DEV_TYPE_OEM_STRING = 4294967294, ++ DMI_DEV_TYPE_DEV_ONBOARD = 4294967293, ++ DMI_DEV_TYPE_DEV_SLOT = 4294967292, ++}; ++ ++struct dmi_device { ++ struct list_head list; ++ int type; ++ const char *name; ++ void *device_data; ++}; ++ ++struct dmi_dev_onboard { ++ struct dmi_device dev; ++ int instance; ++ int segment; ++ int bus; ++ int devfn; ++}; ++ ++enum smbios_attr_enum { ++ SMBIOS_ATTR_NONE = 0, ++ SMBIOS_ATTR_LABEL_SHOW = 1, ++ SMBIOS_ATTR_INSTANCE_SHOW = 2, ++}; ++ ++enum acpi_attr_enum { ++ ACPI_ATTR_LABEL_SHOW = 0, ++ ACPI_ATTR_INDEX_SHOW = 1, ++}; ++ ++struct xgene_msi; ++ ++struct xgene_msi_group { ++ struct xgene_msi *msi; ++ int gic_irq; ++ u32 msi_grp; ++}; ++ ++struct xgene_msi { ++ struct device_node *node; ++ struct irq_domain *inner_domain; ++ struct irq_domain *msi_domain; ++ u64 msi_addr; ++ void *msi_regs; ++ long unsigned int *bitmap; ++ struct mutex bitmap_lock; ++ struct xgene_msi_group *msi_groups; ++ int num_cpus; ++}; ++ ++enum pci_interrupt_pin { ++ PCI_INTERRUPT_UNKNOWN = 0, ++ PCI_INTERRUPT_INTA = 1, ++ PCI_INTERRUPT_INTB = 2, ++ PCI_INTERRUPT_INTC = 3, ++ PCI_INTERRUPT_INTD = 4, ++}; ++ ++enum pci_barno { ++ BAR_0 = 0, ++ BAR_1 = 1, ++ BAR_2 = 2, ++ BAR_3 = 3, ++ BAR_4 = 4, ++ BAR_5 = 5, ++}; ++ ++struct pci_epf_header { ++ u16 vendorid; ++ u16 deviceid; ++ u8 revid; ++ u8 progif_code; ++ u8 subclass_code; ++ u8 baseclass_code; ++ u8 cache_line_size; ++ u16 subsys_vendor_id; ++ u16 subsys_id; ++ enum pci_interrupt_pin interrupt_pin; ++}; ++ ++struct pci_epf_bar { ++ dma_addr_t phys_addr; ++ size_t size; ++ enum pci_barno barno; ++ int flags; ++}; ++ ++struct config_group___2; ++ ++struct pci_epc_ops; ++ ++struct pci_epc_mem; ++ ++struct pci_epc { ++ struct device dev; ++ struct list_head pci_epf; ++ const struct pci_epc_ops *ops; ++ struct pci_epc_mem *mem; ++ u8 max_functions; ++ struct config_group___2 *group; ++ spinlock_t lock; ++ unsigned int features; ++}; ++ ++enum pci_epc_irq_type { ++ PCI_EPC_IRQ_UNKNOWN = 0, ++ PCI_EPC_IRQ_LEGACY = 1, ++ PCI_EPC_IRQ_MSI = 2, ++ PCI_EPC_IRQ_MSIX = 3, ++}; ++ ++struct pci_epc_ops { ++ int (*write_header)(struct pci_epc *, u8, struct pci_epf_header *); ++ int (*set_bar)(struct pci_epc *, u8, struct pci_epf_bar *); ++ void (*clear_bar)(struct pci_epc *, u8, struct pci_epf_bar *); ++ int (*map_addr)(struct pci_epc *, u8, phys_addr_t, u64, size_t); ++ void (*unmap_addr)(struct pci_epc *, u8, phys_addr_t); ++ int (*set_msi)(struct pci_epc *, u8, u8); ++ int (*get_msi)(struct pci_epc *, u8); ++ int (*set_msix)(struct pci_epc *, u8, u16); ++ int (*get_msix)(struct pci_epc *, u8); ++ int (*raise_irq)(struct pci_epc *, u8, enum pci_epc_irq_type, u16); ++ int (*start)(struct pci_epc *); ++ void (*stop)(struct pci_epc *); ++ struct module *owner; ++}; ++ ++struct pci_epc_mem { ++ phys_addr_t phys_base; ++ size_t size; ++ long unsigned int *bitmap; ++ size_t page_size; ++ int pages; ++}; ++ ++enum dw_pcie_region_type { ++ DW_PCIE_REGION_UNKNOWN = 0, ++ DW_PCIE_REGION_INBOUND = 1, ++ DW_PCIE_REGION_OUTBOUND = 2, ++}; ++ ++struct pcie_port; ++ ++struct dw_pcie_host_ops { ++ int (*rd_own_conf)(struct pcie_port *, int, int, u32 *); ++ int (*wr_own_conf)(struct pcie_port *, int, int, u32); ++ int (*rd_other_conf)(struct pcie_port *, struct pci_bus *, unsigned int, int, int, u32 *); ++ int (*wr_other_conf)(struct pcie_port *, struct pci_bus *, unsigned int, int, int, u32); ++ int (*host_init)(struct pcie_port *); ++ void (*msi_set_irq)(struct pcie_port *, int); ++ void (*msi_clear_irq)(struct pcie_port *, int); ++ phys_addr_t (*get_msi_addr)(struct pcie_port *); ++ u32 (*get_msi_data)(struct pcie_port *, int); ++ void (*scan_bus)(struct pcie_port *); ++ void (*set_num_vectors)(struct pcie_port *); ++ int (*msi_host_init)(struct pcie_port *); ++ void (*msi_irq_ack)(int, struct pcie_port *); ++}; ++ ++struct pcie_port { ++ u8 root_bus_nr; ++ u64 cfg0_base; ++ void *va_cfg0_base; ++ u32 cfg0_size; ++ u64 cfg1_base; ++ void *va_cfg1_base; ++ u32 cfg1_size; ++ resource_size_t io_base; ++ phys_addr_t io_bus_addr; ++ u32 io_size; ++ u64 mem_base; ++ phys_addr_t mem_bus_addr; ++ u32 mem_size; ++ struct resource *cfg; ++ struct resource *io; ++ struct resource *mem; ++ struct resource *busn; ++ int irq; ++ const struct dw_pcie_host_ops *ops; ++ int msi_irq; ++ struct irq_domain *irq_domain; ++ struct irq_domain *msi_domain; ++ dma_addr_t msi_data; ++ struct page *msi_page; ++ u32 num_vectors; ++ u32 irq_status[8]; ++ raw_spinlock_t lock; ++ long unsigned int msi_irq_in_use[4]; ++}; ++ ++enum dw_pcie_as_type { ++ DW_PCIE_AS_UNKNOWN = 0, ++ DW_PCIE_AS_MEM = 1, ++ DW_PCIE_AS_IO = 2, ++}; ++ ++struct dw_pcie_ep; ++ ++struct dw_pcie_ep_ops { ++ void (*ep_init)(struct dw_pcie_ep *); ++ int (*raise_irq)(struct dw_pcie_ep *, u8, enum pci_epc_irq_type, u16); ++}; ++ ++struct dw_pcie_ep { ++ struct pci_epc *epc; ++ struct dw_pcie_ep_ops *ops; ++ phys_addr_t phys_base; ++ size_t addr_size; ++ size_t page_size; ++ u8 bar_to_atu[6]; ++ phys_addr_t *outbound_addr; ++ long unsigned int *ib_window_map; ++ long unsigned int *ob_window_map; ++ u32 num_ib_windows; ++ u32 num_ob_windows; ++ void *msi_mem; ++ phys_addr_t msi_mem_phys; ++ u8 msi_cap; ++ u8 msix_cap; ++}; ++ ++struct dw_pcie; ++ ++struct dw_pcie_ops { ++ u64 (*cpu_addr_fixup)(struct dw_pcie *, u64); ++ u32 (*read_dbi)(struct dw_pcie *, void *, u32, size_t); ++ void (*write_dbi)(struct dw_pcie *, void *, u32, size_t, u32); ++ int (*link_up)(struct dw_pcie *); ++ int (*start_link)(struct dw_pcie *); ++ void (*stop_link)(struct dw_pcie *); ++}; ++ ++struct dw_pcie { ++ struct device *dev; ++ void *dbi_base; ++ void *dbi_base2; ++ u32 num_viewport; ++ u8 iatu_unroll_enabled; ++ struct pcie_port pp; ++ struct dw_pcie_ep ep; ++ const struct dw_pcie_ops *ops; ++}; ++ ++struct hisi_pcie; ++ ++struct pcie_soc_ops { ++ int (*hisi_pcie_link_up)(struct hisi_pcie *); ++}; ++ ++struct hisi_pcie { ++ struct dw_pcie *pci; ++ struct regmap *subctrl; ++ u32 port_id; ++ const struct pcie_soc_ops *soc_ops; ++}; ++ ++struct thunder_pem_pci { ++ u32 ea_entry[3]; ++ void *pem_reg_base; ++}; ++ ++struct xgene_pcie_port { ++ struct device_node *node; ++ struct device *dev; ++ struct clk *clk; ++ void *csr_base; ++ void *cfg_base; ++ long unsigned int cfg_addr; ++ bool link_up; ++ u32 version; ++}; ++ ++enum hdmi_infoframe_type { ++ HDMI_INFOFRAME_TYPE_VENDOR = 129, ++ HDMI_INFOFRAME_TYPE_AVI = 130, ++ HDMI_INFOFRAME_TYPE_SPD = 131, ++ HDMI_INFOFRAME_TYPE_AUDIO = 132, ++}; ++ ++struct hdmi_any_infoframe { ++ enum hdmi_infoframe_type type; ++ unsigned char version; ++ unsigned char length; ++}; ++ ++enum hdmi_colorspace { ++ HDMI_COLORSPACE_RGB = 0, ++ HDMI_COLORSPACE_YUV422 = 1, ++ HDMI_COLORSPACE_YUV444 = 2, ++ HDMI_COLORSPACE_YUV420 = 3, ++ HDMI_COLORSPACE_RESERVED4 = 4, ++ HDMI_COLORSPACE_RESERVED5 = 5, ++ HDMI_COLORSPACE_RESERVED6 = 6, ++ HDMI_COLORSPACE_IDO_DEFINED = 7, ++}; ++ ++enum hdmi_scan_mode { ++ HDMI_SCAN_MODE_NONE = 0, ++ HDMI_SCAN_MODE_OVERSCAN = 1, ++ HDMI_SCAN_MODE_UNDERSCAN = 2, ++ HDMI_SCAN_MODE_RESERVED = 3, ++}; ++ ++enum hdmi_colorimetry { ++ HDMI_COLORIMETRY_NONE = 0, ++ HDMI_COLORIMETRY_ITU_601 = 1, ++ HDMI_COLORIMETRY_ITU_709 = 2, ++ HDMI_COLORIMETRY_EXTENDED = 3, ++}; ++ ++enum hdmi_picture_aspect { ++ HDMI_PICTURE_ASPECT_NONE = 0, ++ HDMI_PICTURE_ASPECT_4_3 = 1, ++ HDMI_PICTURE_ASPECT_16_9 = 2, ++ HDMI_PICTURE_ASPECT_64_27 = 3, ++ HDMI_PICTURE_ASPECT_256_135 = 4, ++ HDMI_PICTURE_ASPECT_RESERVED = 5, ++}; ++ ++enum hdmi_active_aspect { ++ HDMI_ACTIVE_ASPECT_16_9_TOP = 2, ++ HDMI_ACTIVE_ASPECT_14_9_TOP = 3, ++ HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, ++ HDMI_ACTIVE_ASPECT_PICTURE = 8, ++ HDMI_ACTIVE_ASPECT_4_3 = 9, ++ HDMI_ACTIVE_ASPECT_16_9 = 10, ++ HDMI_ACTIVE_ASPECT_14_9 = 11, ++ HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, ++ HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, ++ HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15, ++}; ++ ++enum hdmi_extended_colorimetry { ++ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601 = 0, ++ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709 = 1, ++ HDMI_EXTENDED_COLORIMETRY_S_YCC_601 = 2, ++ HDMI_EXTENDED_COLORIMETRY_OPYCC_601 = 3, ++ HDMI_EXTENDED_COLORIMETRY_OPRGB = 4, ++ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM = 5, ++ HDMI_EXTENDED_COLORIMETRY_BT2020 = 6, ++ HDMI_EXTENDED_COLORIMETRY_RESERVED = 7, ++}; ++ ++enum hdmi_quantization_range { ++ HDMI_QUANTIZATION_RANGE_DEFAULT = 0, ++ HDMI_QUANTIZATION_RANGE_LIMITED = 1, ++ HDMI_QUANTIZATION_RANGE_FULL = 2, ++ HDMI_QUANTIZATION_RANGE_RESERVED = 3, ++}; ++ ++enum hdmi_nups { ++ HDMI_NUPS_UNKNOWN = 0, ++ HDMI_NUPS_HORIZONTAL = 1, ++ HDMI_NUPS_VERTICAL = 2, ++ HDMI_NUPS_BOTH = 3, ++}; ++ ++enum hdmi_ycc_quantization_range { ++ HDMI_YCC_QUANTIZATION_RANGE_LIMITED = 0, ++ HDMI_YCC_QUANTIZATION_RANGE_FULL = 1, ++}; ++ ++enum hdmi_content_type { ++ HDMI_CONTENT_TYPE_GRAPHICS = 0, ++ HDMI_CONTENT_TYPE_PHOTO = 1, ++ HDMI_CONTENT_TYPE_CINEMA = 2, ++ HDMI_CONTENT_TYPE_GAME = 3, ++}; ++ ++struct hdmi_avi_infoframe { ++ enum hdmi_infoframe_type type; ++ unsigned char version; ++ unsigned char length; ++ enum hdmi_colorspace colorspace; ++ enum hdmi_scan_mode scan_mode; ++ enum hdmi_colorimetry colorimetry; ++ enum hdmi_picture_aspect picture_aspect; ++ enum hdmi_active_aspect active_aspect; ++ bool itc; ++ enum hdmi_extended_colorimetry extended_colorimetry; ++ enum hdmi_quantization_range quantization_range; ++ enum hdmi_nups nups; ++ unsigned char video_code; ++ enum hdmi_ycc_quantization_range ycc_quantization_range; ++ enum hdmi_content_type content_type; ++ unsigned char pixel_repeat; ++ short unsigned int top_bar; ++ short unsigned int bottom_bar; ++ short unsigned int left_bar; ++ short unsigned int right_bar; ++}; ++ ++enum hdmi_spd_sdi { ++ HDMI_SPD_SDI_UNKNOWN = 0, ++ HDMI_SPD_SDI_DSTB = 1, ++ HDMI_SPD_SDI_DVDP = 2, ++ HDMI_SPD_SDI_DVHS = 3, ++ HDMI_SPD_SDI_HDDVR = 4, ++ HDMI_SPD_SDI_DVC = 5, ++ HDMI_SPD_SDI_DSC = 6, ++ HDMI_SPD_SDI_VCD = 7, ++ HDMI_SPD_SDI_GAME = 8, ++ HDMI_SPD_SDI_PC = 9, ++ HDMI_SPD_SDI_BD = 10, ++ HDMI_SPD_SDI_SACD = 11, ++ HDMI_SPD_SDI_HDDVD = 12, ++ HDMI_SPD_SDI_PMP = 13, ++}; ++ ++struct hdmi_spd_infoframe { ++ enum hdmi_infoframe_type type; ++ unsigned char version; ++ unsigned char length; ++ char vendor[8]; ++ char product[16]; ++ enum hdmi_spd_sdi sdi; ++}; ++ ++enum hdmi_audio_coding_type { ++ HDMI_AUDIO_CODING_TYPE_STREAM = 0, ++ HDMI_AUDIO_CODING_TYPE_PCM = 1, ++ HDMI_AUDIO_CODING_TYPE_AC3 = 2, ++ HDMI_AUDIO_CODING_TYPE_MPEG1 = 3, ++ HDMI_AUDIO_CODING_TYPE_MP3 = 4, ++ HDMI_AUDIO_CODING_TYPE_MPEG2 = 5, ++ HDMI_AUDIO_CODING_TYPE_AAC_LC = 6, ++ HDMI_AUDIO_CODING_TYPE_DTS = 7, ++ HDMI_AUDIO_CODING_TYPE_ATRAC = 8, ++ HDMI_AUDIO_CODING_TYPE_DSD = 9, ++ HDMI_AUDIO_CODING_TYPE_EAC3 = 10, ++ HDMI_AUDIO_CODING_TYPE_DTS_HD = 11, ++ HDMI_AUDIO_CODING_TYPE_MLP = 12, ++ HDMI_AUDIO_CODING_TYPE_DST = 13, ++ HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14, ++ HDMI_AUDIO_CODING_TYPE_CXT = 15, ++}; ++ ++enum hdmi_audio_sample_size { ++ HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0, ++ HDMI_AUDIO_SAMPLE_SIZE_16 = 1, ++ HDMI_AUDIO_SAMPLE_SIZE_20 = 2, ++ HDMI_AUDIO_SAMPLE_SIZE_24 = 3, ++}; ++ ++enum hdmi_audio_sample_frequency { ++ HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0, ++ HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1, ++ HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2, ++ HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3, ++ HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4, ++ HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5, ++ HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6, ++ HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7, ++}; ++ ++enum hdmi_audio_coding_type_ext { ++ HDMI_AUDIO_CODING_TYPE_EXT_CT = 0, ++ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1, ++ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2, ++ HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3, ++ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC = 4, ++ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2 = 5, ++ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC = 6, ++ HDMI_AUDIO_CODING_TYPE_EXT_DRA = 7, ++ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND = 8, ++ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, ++}; ++ ++struct hdmi_audio_infoframe { ++ enum hdmi_infoframe_type type; ++ unsigned char version; ++ unsigned char length; ++ unsigned char channels; ++ enum hdmi_audio_coding_type coding_type; ++ enum hdmi_audio_sample_size sample_size; ++ enum hdmi_audio_sample_frequency sample_frequency; ++ enum hdmi_audio_coding_type_ext coding_type_ext; ++ unsigned char channel_allocation; ++ unsigned char level_shift_value; ++ bool downmix_inhibit; ++}; ++ ++enum hdmi_3d_structure { ++ HDMI_3D_STRUCTURE_INVALID = 4294967295, ++ HDMI_3D_STRUCTURE_FRAME_PACKING = 0, ++ HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE = 1, ++ HDMI_3D_STRUCTURE_LINE_ALTERNATIVE = 2, ++ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL = 3, ++ HDMI_3D_STRUCTURE_L_DEPTH = 4, ++ HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH = 5, ++ HDMI_3D_STRUCTURE_TOP_AND_BOTTOM = 6, ++ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, ++}; ++ ++struct hdmi_vendor_infoframe { ++ enum hdmi_infoframe_type type; ++ unsigned char version; ++ unsigned char length; ++ unsigned int oui; ++ u8 vic; ++ enum hdmi_3d_structure s3d_struct; ++ unsigned int s3d_ext_data; ++}; ++ ++union hdmi_vendor_any_infoframe { ++ struct { ++ enum hdmi_infoframe_type type; ++ unsigned char version; ++ unsigned char length; ++ unsigned int oui; ++ } any; ++ struct hdmi_vendor_infoframe hdmi; ++}; ++ ++union hdmi_infoframe { ++ struct hdmi_any_infoframe any; ++ struct hdmi_avi_infoframe avi; ++ struct hdmi_spd_infoframe spd; ++ union hdmi_vendor_any_infoframe vendor; ++ struct hdmi_audio_infoframe audio; ++}; ++ ++struct linux_logo { ++ int type; ++ unsigned int width; ++ unsigned int height; ++ unsigned int clutsize; ++ const unsigned char *clut; ++ const unsigned char *data; ++}; ++ ++enum { ++ FB_BLANK_UNBLANK = 0, ++ FB_BLANK_NORMAL = 1, ++ FB_BLANK_VSYNC_SUSPEND = 2, ++ FB_BLANK_HSYNC_SUSPEND = 3, ++ FB_BLANK_POWERDOWN = 4, ++}; ++ ++struct fb_event { ++ struct fb_info *info; ++ void *data; ++}; ++ ++enum backlight_update_reason { ++ BACKLIGHT_UPDATE_HOTKEY = 0, ++ BACKLIGHT_UPDATE_SYSFS = 1, ++}; ++ ++enum backlight_notification { ++ BACKLIGHT_REGISTERED = 0, ++ BACKLIGHT_UNREGISTERED = 1, ++}; ++ ++struct fb_con2fbmap { ++ __u32 console; ++ __u32 framebuffer; ++}; ++ ++struct fb_cmap_user { ++ __u32 start; ++ __u32 len; ++ __u16 *red; ++ __u16 *green; ++ __u16 *blue; ++ __u16 *transp; ++}; ++ ++struct fb_modelist { ++ struct list_head list; ++ struct fb_videomode mode; ++}; ++ ++struct logo_data { ++ int depth; ++ int needs_directpalette; ++ int needs_truepalette; ++ int needs_cmapreset; ++ const struct linux_logo *logo; ++}; ++ ++struct fb_fix_screeninfo32 { ++ char id[16]; ++ compat_caddr_t smem_start; ++ u32 smem_len; ++ u32 type; ++ u32 type_aux; ++ u32 visual; ++ u16 xpanstep; ++ u16 ypanstep; ++ u16 ywrapstep; ++ u32 line_length; ++ compat_caddr_t mmio_start; ++ u32 mmio_len; ++ u32 accel; ++ u16 reserved[3]; ++}; ++ ++struct fb_cmap32 { ++ u32 start; ++ u32 len; ++ compat_caddr_t red; ++ compat_caddr_t green; ++ compat_caddr_t blue; ++ compat_caddr_t transp; ++}; ++ ++enum display_flags { ++ DISPLAY_FLAGS_HSYNC_LOW = 1, ++ DISPLAY_FLAGS_HSYNC_HIGH = 2, ++ DISPLAY_FLAGS_VSYNC_LOW = 4, ++ DISPLAY_FLAGS_VSYNC_HIGH = 8, ++ DISPLAY_FLAGS_DE_LOW = 16, ++ DISPLAY_FLAGS_DE_HIGH = 32, ++ DISPLAY_FLAGS_PIXDATA_POSEDGE = 64, ++ DISPLAY_FLAGS_PIXDATA_NEGEDGE = 128, ++ DISPLAY_FLAGS_INTERLACED = 256, ++ DISPLAY_FLAGS_DOUBLESCAN = 512, ++ DISPLAY_FLAGS_DOUBLECLK = 1024, ++ DISPLAY_FLAGS_SYNC_POSEDGE = 2048, ++ DISPLAY_FLAGS_SYNC_NEGEDGE = 4096, ++}; ++ ++struct videomode { ++ long unsigned int pixelclock; ++ u32 hactive; ++ u32 hfront_porch; ++ u32 hback_porch; ++ u32 hsync_len; ++ u32 vactive; ++ u32 vfront_porch; ++ u32 vback_porch; ++ u32 vsync_len; ++ enum display_flags flags; ++}; ++ ++struct broken_edid { ++ u8 manufacturer[4]; ++ u32 model; ++ u32 fix; ++}; ++ ++struct __fb_timings { ++ u32 dclk; ++ u32 hfreq; ++ u32 vfreq; ++ u32 hactive; ++ u32 vactive; ++ u32 hblank; ++ u32 vblank; ++ u32 htotal; ++ u32 vtotal; ++}; ++ ++typedef unsigned int u_int; ++ ++struct fb_cvt_data { ++ u32 xres; ++ u32 yres; ++ u32 refresh; ++ u32 f_refresh; ++ u32 pixclock; ++ u32 hperiod; ++ u32 hblank; ++ u32 hfreq; ++ u32 htotal; ++ u32 vtotal; ++ u32 vsync; ++ u32 hsync; ++ u32 h_front_porch; ++ u32 h_back_porch; ++ u32 v_front_porch; ++ u32 v_back_porch; ++ u32 h_margin; ++ u32 v_margin; ++ u32 interlace; ++ u32 aspect_ratio; ++ u32 active_pixels; ++ u32 flags; ++ u32 status; ++}; ++ ++struct display { ++ const u_char *fontdata; ++ int userfont; ++ u_short scrollmode; ++ u_short inverse; ++ short int yscroll; ++ int vrows; ++ int cursor_shape; ++ int con_rotate; ++ u32 xres_virtual; ++ u32 yres_virtual; ++ u32 height; ++ u32 width; ++ u32 bits_per_pixel; ++ u32 grayscale; ++ u32 nonstd; ++ u32 accel_flags; ++ u32 rotate; ++ struct fb_bitfield red; ++ struct fb_bitfield green; ++ struct fb_bitfield blue; ++ struct fb_bitfield transp; ++ const struct fb_videomode *mode; ++}; ++ ++struct fbcon_ops { ++ void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); ++ void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); ++ void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); ++ void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); ++ void (*cursor)(struct vc_data *, struct fb_info *, int, int, int); ++ int (*update_start)(struct fb_info *); ++ int (*rotate_font)(struct fb_info *, struct vc_data *); ++ struct fb_var_screeninfo var; ++ struct timer_list cursor_timer; ++ struct fb_cursor cursor_state; ++ struct display *p; ++ struct fb_info *info; ++ int currcon; ++ int cur_blink_jiffies; ++ int cursor_flash; ++ int cursor_reset; ++ int blank_state; ++ int graphics; ++ int save_graphics; ++ int flags; ++ int rotate; ++ int cur_rotate; ++ char *cursor_data; ++ u8 *fontbuffer; ++ u8 *fontdata; ++ u8 *cursor_src; ++ u32 cursor_size; ++ u32 fd_size; ++}; ++ ++enum { ++ FBCON_LOGO_CANSHOW = 4294967295, ++ FBCON_LOGO_DRAW = 4294967294, ++ FBCON_LOGO_DONTSHOW = 4294967293, ++}; ++ ++enum { ++ CLCD_CAP_RGB444 = 1, ++ CLCD_CAP_RGB5551 = 2, ++ CLCD_CAP_RGB565 = 4, ++ CLCD_CAP_RGB888 = 8, ++ CLCD_CAP_BGR444 = 16, ++ CLCD_CAP_BGR5551 = 32, ++ CLCD_CAP_BGR565 = 64, ++ CLCD_CAP_BGR888 = 128, ++ CLCD_CAP_444 = 17, ++ CLCD_CAP_5551 = 34, ++ CLCD_CAP_565 = 68, ++ CLCD_CAP_888 = 136, ++ CLCD_CAP_RGB = 15, ++ CLCD_CAP_BGR = 240, ++ CLCD_CAP_ALL = 255, ++}; ++ ++struct clcd_panel { ++ struct fb_videomode mode; ++ short int width; ++ short int height; ++ u32 tim2; ++ u32 tim3; ++ u32 cntl; ++ u32 caps; ++ unsigned int bpp: 8; ++ unsigned int fixedtimings: 1; ++ unsigned int grayscale: 1; ++ unsigned int connector; ++ struct backlight_device *backlight; ++ bool bgr_connection; ++}; ++ ++struct clcd_regs { ++ u32 tim0; ++ u32 tim1; ++ u32 tim2; ++ u32 tim3; ++ u32 cntl; ++ long unsigned int pixclock; ++}; ++ ++struct clcd_fb; ++ ++struct clcd_board { ++ const char *name; ++ u32 caps; ++ int (*check)(struct clcd_fb *, struct fb_var_screeninfo *); ++ void (*decode)(struct clcd_fb *, struct clcd_regs *); ++ void (*disable)(struct clcd_fb *); ++ void (*enable)(struct clcd_fb *); ++ int (*setup)(struct clcd_fb *); ++ int (*mmap)(struct clcd_fb *, struct vm_area_struct *); ++ void (*remove)(struct clcd_fb *); ++}; ++ ++struct clcd_vendor_data; ++ ++struct clcd_fb { ++ struct fb_info fb; ++ struct amba_device *dev; ++ struct clk *clk; ++ struct clcd_vendor_data *vendor; ++ struct clcd_panel *panel; ++ struct clcd_board *board; ++ void *board_data; ++ void *regs; ++ u16 off_ienb; ++ u16 off_cntl; ++ u32 clcd_cntl; ++ u32 cmap[16]; ++ bool clk_enabled; ++}; ++ ++struct clcd_vendor_data { ++ bool clock_timregs; ++ bool packed_24_bit_pixels; ++ bool st_bitmux_control; ++ int (*init_board)(struct amba_device *, struct clcd_board *); ++ int (*init_panel)(struct clcd_fb *, struct device_node *); ++}; ++ ++struct timing_entry { ++ u32 min; ++ u32 typ; ++ u32 max; ++}; ++ ++struct display_timing { ++ struct timing_entry pixelclock; ++ struct timing_entry hactive; ++ struct timing_entry hfront_porch; ++ struct timing_entry hback_porch; ++ struct timing_entry hsync_len; ++ struct timing_entry vactive; ++ struct timing_entry vfront_porch; ++ struct timing_entry vback_porch; ++ struct timing_entry vsync_len; ++ enum display_flags flags; ++}; ++ ++enum drm_panel_orientation { ++ DRM_MODE_PANEL_ORIENTATION_UNKNOWN = 4294967295, ++ DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, ++ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP = 1, ++ DRM_MODE_PANEL_ORIENTATION_LEFT_UP = 2, ++ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP = 3, ++}; ++ ++struct simplefb_format { ++ const char *name; ++ u32 bits_per_pixel; ++ struct fb_bitfield red; ++ struct fb_bitfield green; ++ struct fb_bitfield blue; ++ struct fb_bitfield transp; ++ u32 fourcc; ++}; ++ ++struct simplefb_platform_data { ++ u32 width; ++ u32 height; ++ u32 stride; ++ const char *format; ++}; ++ ++struct simplefb_params { ++ u32 width; ++ u32 height; ++ u32 stride; ++ struct simplefb_format *format; ++}; ++ ++struct simplefb_par { ++ u32 palette[16]; ++ bool clks_enabled; ++ unsigned int clk_count; ++ struct clk **clks; ++}; ++ ++struct display_timings { ++ unsigned int num_timings; ++ unsigned int native_mode; ++ struct display_timing **timings; ++}; ++ ++enum dev_prop_type { ++ DEV_PROP_U8 = 0, ++ DEV_PROP_U16 = 1, ++ DEV_PROP_U32 = 2, ++ DEV_PROP_U64 = 3, ++ DEV_PROP_STRING = 4, ++ DEV_PROP_MAX = 5, ++}; ++ ++struct property_entry { ++ const char *name; ++ size_t length; ++ bool is_array; ++ enum dev_prop_type type; ++ union { ++ union { ++ const u8 *u8_data; ++ const u16 *u16_data; ++ const u32 *u32_data; ++ const u64 *u64_data; ++ const char * const *str; ++ } pointer; ++ union { ++ u8 u8_data; ++ u16 u16_data; ++ u32 u32_data; ++ u64 u64_data; ++ const char *str; ++ } value; ++ }; ++}; ++ ++enum ipmi_addr_src { ++ SI_INVALID = 0, ++ SI_HOTMOD = 1, ++ SI_HARDCODED = 2, ++ SI_SPMI = 3, ++ SI_ACPI = 4, ++ SI_SMBIOS = 5, ++ SI_PCI = 6, ++ SI_DEVICETREE = 7, ++ SI_PLATFORM = 8, ++ SI_LAST = 9, ++}; ++ ++union ipmi_smi_info_union { ++ struct { ++ acpi_handle acpi_handle; ++ } acpi_info; ++}; ++ ++struct dmi_header { ++ u8 type; ++ u8 length; ++ u16 handle; ++}; ++ ++enum si_type { ++ SI_TYPE_INVALID = 0, ++ SI_KCS = 1, ++ SI_SMIC = 2, ++ SI_BT = 3, ++}; ++ ++struct si_sm_io { ++ unsigned char (*inputb)(const struct si_sm_io *, unsigned int); ++ void (*outputb)(const struct si_sm_io *, unsigned int, unsigned char); ++ void *addr; ++ int regspacing; ++ int regsize; ++ int regshift; ++ int addr_type; ++ long int addr_data; ++ enum ipmi_addr_src addr_source; ++ void (*addr_source_cleanup)(struct si_sm_io *); ++ void *addr_source_data; ++ union ipmi_smi_info_union addr_info; ++ int (*io_setup)(struct si_sm_io *); ++ void (*io_cleanup)(struct si_sm_io *); ++ unsigned int io_size; ++ int irq; ++ int (*irq_setup)(struct si_sm_io *); ++ void *irq_handler_data; ++ void (*irq_cleanup)(struct si_sm_io *); ++ u8 slave_addr; ++ enum si_type si_type; ++ struct device *dev; ++}; ++ ++enum si_sm_result { ++ SI_SM_CALL_WITHOUT_DELAY = 0, ++ SI_SM_CALL_WITH_DELAY = 1, ++ SI_SM_CALL_WITH_TICK_DELAY = 2, ++ SI_SM_TRANSACTION_COMPLETE = 3, ++ SI_SM_IDLE = 4, ++ SI_SM_HOSED = 5, ++ SI_SM_ATTN = 6, ++}; ++ ++struct si_sm_data; ++ ++struct si_sm_handlers { ++ char *version; ++ unsigned int (*init_data)(struct si_sm_data *, struct si_sm_io *); ++ int (*start_transaction)(struct si_sm_data *, unsigned char *, unsigned int); ++ int (*get_result)(struct si_sm_data *, unsigned char *, unsigned int); ++ enum si_sm_result (*event)(struct si_sm_data *, long int); ++ int (*detect)(struct si_sm_data *); ++ void (*cleanup)(struct si_sm_data *); ++ int (*size)(); ++}; ++ ++struct ipmi_dmi_info { ++ enum si_type si_type; ++ u32 flags; ++ long unsigned int addr; ++ u8 slave_addr; ++ struct ipmi_dmi_info *next; ++}; ++ ++typedef u8 acpi_owner_id; ++ ++union acpi_name_union { ++ u32 integer; ++ char ascii[4]; ++}; ++ ++struct acpi_table_desc { ++ acpi_physical_address address; ++ struct acpi_table_header *pointer; ++ u32 length; ++ union acpi_name_union signature; ++ acpi_owner_id owner_id; ++ u8 flags; ++ u16 validation_count; ++}; ++ ++struct acpi_madt_local_apic { ++ struct acpi_subtable_header header; ++ u8 processor_id; ++ u8 id; ++ u32 lapic_flags; ++}; ++ ++struct acpi_madt_io_apic { ++ struct acpi_subtable_header header; ++ u8 id; ++ u8 reserved; ++ u32 address; ++ u32 global_irq_base; ++}; ++ ++struct acpi_madt_interrupt_override { ++ struct acpi_subtable_header header; ++ u8 bus; ++ u8 source_irq; ++ u32 global_irq; ++ u16 inti_flags; ++} __attribute__((packed)); ++ ++struct acpi_madt_nmi_source { ++ struct acpi_subtable_header header; ++ u16 inti_flags; ++ u32 global_irq; ++}; ++ ++struct acpi_madt_local_apic_nmi { ++ struct acpi_subtable_header header; ++ u8 processor_id; ++ u16 inti_flags; ++ u8 lint; ++} __attribute__((packed)); ++ ++struct acpi_madt_local_apic_override { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u64 address; ++} __attribute__((packed)); ++ ++struct acpi_madt_io_sapic { ++ struct acpi_subtable_header header; ++ u8 id; ++ u8 reserved; ++ u32 global_irq_base; ++ u64 address; ++}; ++ ++struct acpi_madt_local_sapic { ++ struct acpi_subtable_header header; ++ u8 processor_id; ++ u8 id; ++ u8 eid; ++ u8 reserved[3]; ++ u32 lapic_flags; ++ u32 uid; ++ char uid_string[1]; ++} __attribute__((packed)); ++ ++struct acpi_madt_interrupt_source { ++ struct acpi_subtable_header header; ++ u16 inti_flags; ++ u8 type; ++ u8 id; ++ u8 eid; ++ u8 io_sapic_vector; ++ u32 global_irq; ++ u32 flags; ++}; ++ ++struct acpi_madt_local_x2apic { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 local_apic_id; ++ u32 lapic_flags; ++ u32 uid; ++}; ++ ++struct acpi_madt_local_x2apic_nmi { ++ struct acpi_subtable_header header; ++ u16 inti_flags; ++ u32 uid; ++ u8 lint; ++ u8 reserved[3]; ++}; ++ ++struct acpi_subtable_proc { ++ int id; ++ acpi_tbl_entry_handler handler; ++ int count; ++}; ++ ++struct cpio_data { ++ void *data; ++ size_t size; ++ char name[18]; ++}; ++ ++struct transaction; ++ ++struct acpi_ec { ++ acpi_handle handle; ++ u32 gpe; ++ long unsigned int command_addr; ++ long unsigned int data_addr; ++ bool global_lock; ++ long unsigned int flags; ++ long unsigned int reference_count; ++ struct mutex mutex; ++ wait_queue_head_t wait; ++ struct list_head list; ++ struct transaction *curr; ++ spinlock_t lock; ++ struct work_struct work; ++ long unsigned int timestamp; ++ long unsigned int nr_pending_queries; ++ bool busy_polling; ++ unsigned int polling_guard; ++}; ++ ++typedef char *acpi_string; ++ ++struct acpi_osi_entry { ++ char string[64]; ++ bool enable; ++}; ++ ++struct acpi_osi_config { ++ u8 default_disabling; ++ unsigned int linux_enable: 1; ++ unsigned int linux_dmi: 1; ++ unsigned int linux_cmdline: 1; ++ unsigned int darwin_enable: 1; ++ unsigned int darwin_dmi: 1; ++ unsigned int darwin_cmdline: 1; ++}; ++ ++typedef u32 acpi_name; ++ ++struct acpi_predefined_names { ++ const char *name; ++ u8 type; ++ char *val; ++}; ++ ++typedef u32 (*acpi_osd_handler)(void *); ++ ++typedef void (*acpi_osd_exec_callback)(void *); ++ ++typedef u32 (*acpi_sci_handler)(void *); ++ ++typedef u32 (*acpi_gpe_handler)(acpi_handle, u32, void *); ++ ++typedef void (*acpi_notify_handler)(acpi_handle, u32, void *); ++ ++typedef void (*acpi_object_handler)(acpi_handle, void *); ++ ++typedef acpi_status (*acpi_init_handler)(acpi_handle, u32); ++ ++typedef acpi_status (*acpi_exception_handler)(acpi_status, acpi_name, u16, u32, void *); ++ ++typedef acpi_status (*acpi_table_handler)(u32, void *, void *); ++ ++typedef acpi_status (*acpi_adr_space_handler)(u32, acpi_physical_address, u32, u64 *, void *, void *); ++ ++typedef acpi_status (*acpi_adr_space_setup)(acpi_handle, u32, void *, void **); ++ ++typedef u32 (*acpi_interface_handler)(acpi_string, u32); ++ ++struct acpi_pci_id { ++ u16 segment; ++ u16 bus; ++ u16 device; ++ u16 function; ++}; ++ ++struct acpi_mem_space_context { ++ u32 length; ++ acpi_physical_address address; ++ acpi_physical_address mapped_physical_address; ++ u8 *mapped_logical_address; ++ acpi_size mapped_length; ++}; ++ ++typedef enum { ++ OSL_GLOBAL_LOCK_HANDLER = 0, ++ OSL_NOTIFY_HANDLER = 1, ++ OSL_GPE_HANDLER = 2, ++ OSL_DEBUGGER_MAIN_THREAD = 3, ++ OSL_DEBUGGER_EXEC_THREAD = 4, ++ OSL_EC_POLL_HANDLER = 5, ++ OSL_EC_BURST_HANDLER = 6, ++} acpi_execute_type; ++ ++struct acpi_rw_lock { ++ void *writer_mutex; ++ void *reader_mutex; ++ u32 num_readers; ++}; ++ ++struct acpi_mutex_info { ++ void *mutex; ++ u32 use_count; ++ u64 thread_id; ++}; ++ ++union acpi_operand_object; ++ ++struct acpi_namespace_node { ++ union acpi_operand_object *object; ++ u8 descriptor_type; ++ u8 type; ++ u8 flags; ++ acpi_owner_id owner_id; ++ union acpi_name_union name; ++ struct acpi_namespace_node *parent; ++ struct acpi_namespace_node *child; ++ struct acpi_namespace_node *peer; ++}; ++ ++struct acpi_object_common { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++}; ++ ++struct acpi_object_integer { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 fill[3]; ++ u64 value; ++}; ++ ++struct acpi_object_string { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ char *pointer; ++ u32 length; ++}; ++ ++struct acpi_object_buffer { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 *pointer; ++ u32 length; ++ u32 aml_length; ++ u8 *aml_start; ++ struct acpi_namespace_node *node; ++}; ++ ++struct acpi_object_package { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ struct acpi_namespace_node *node; ++ union acpi_operand_object **elements; ++ u8 *aml_start; ++ u32 aml_length; ++ u32 count; ++}; ++ ++struct acpi_object_event { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ void *os_semaphore; ++}; ++ ++struct acpi_walk_state; ++ ++typedef acpi_status (*acpi_internal_method)(struct acpi_walk_state *); ++ ++struct acpi_object_method { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 info_flags; ++ u8 param_count; ++ u8 sync_level; ++ union acpi_operand_object *mutex; ++ union acpi_operand_object *node; ++ u8 *aml_start; ++ union { ++ acpi_internal_method implementation; ++ union acpi_operand_object *handler; ++ } dispatch; ++ u32 aml_length; ++ u8 thread_count; ++ acpi_owner_id owner_id; ++}; ++ ++struct acpi_thread_state; ++ ++struct acpi_object_mutex { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 sync_level; ++ u16 acquisition_depth; ++ void *os_mutex; ++ u64 thread_id; ++ struct acpi_thread_state *owner_thread; ++ union acpi_operand_object *prev; ++ union acpi_operand_object *next; ++ struct acpi_namespace_node *node; ++ u8 original_sync_level; ++}; ++ ++struct acpi_object_region { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 space_id; ++ struct acpi_namespace_node *node; ++ union acpi_operand_object *handler; ++ union acpi_operand_object *next; ++ acpi_physical_address address; ++ u32 length; ++}; ++ ++struct acpi_object_notify_common { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ union acpi_operand_object *notify_list[2]; ++ union acpi_operand_object *handler; ++}; ++ ++struct acpi_gpe_block_info; ++ ++struct acpi_object_device { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ union acpi_operand_object *notify_list[2]; ++ union acpi_operand_object *handler; ++ struct acpi_gpe_block_info *gpe_block; ++}; ++ ++struct acpi_object_power_resource { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ union acpi_operand_object *notify_list[2]; ++ union acpi_operand_object *handler; ++ u32 system_level; ++ u32 resource_order; ++}; ++ ++struct acpi_object_processor { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 proc_id; ++ u8 length; ++ union acpi_operand_object *notify_list[2]; ++ union acpi_operand_object *handler; ++ acpi_io_address address; ++}; ++ ++struct acpi_object_thermal_zone { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ union acpi_operand_object *notify_list[2]; ++ union acpi_operand_object *handler; ++}; ++ ++struct acpi_object_field_common { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 field_flags; ++ u8 attribute; ++ u8 access_byte_width; ++ struct acpi_namespace_node *node; ++ u32 bit_length; ++ u32 base_byte_offset; ++ u32 value; ++ u8 start_field_bit_offset; ++ u8 access_length; ++ union acpi_operand_object *region_obj; ++}; ++ ++struct acpi_object_region_field { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 field_flags; ++ u8 attribute; ++ u8 access_byte_width; ++ struct acpi_namespace_node *node; ++ u32 bit_length; ++ u32 base_byte_offset; ++ u32 value; ++ u8 start_field_bit_offset; ++ u8 access_length; ++ u16 resource_length; ++ union acpi_operand_object *region_obj; ++ u8 *resource_buffer; ++ u16 pin_number_index; ++}; ++ ++struct acpi_object_buffer_field { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 field_flags; ++ u8 attribute; ++ u8 access_byte_width; ++ struct acpi_namespace_node *node; ++ u32 bit_length; ++ u32 base_byte_offset; ++ u32 value; ++ u8 start_field_bit_offset; ++ u8 access_length; ++ union acpi_operand_object *buffer_obj; ++}; ++ ++struct acpi_object_bank_field { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 field_flags; ++ u8 attribute; ++ u8 access_byte_width; ++ struct acpi_namespace_node *node; ++ u32 bit_length; ++ u32 base_byte_offset; ++ u32 value; ++ u8 start_field_bit_offset; ++ u8 access_length; ++ union acpi_operand_object *region_obj; ++ union acpi_operand_object *bank_obj; ++}; ++ ++struct acpi_object_index_field { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 field_flags; ++ u8 attribute; ++ u8 access_byte_width; ++ struct acpi_namespace_node *node; ++ u32 bit_length; ++ u32 base_byte_offset; ++ u32 value; ++ u8 start_field_bit_offset; ++ u8 access_length; ++ union acpi_operand_object *index_obj; ++ union acpi_operand_object *data_obj; ++}; ++ ++struct acpi_object_notify_handler { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ struct acpi_namespace_node *node; ++ u32 handler_type; ++ acpi_notify_handler handler; ++ void *context; ++ union acpi_operand_object *next[2]; ++}; ++ ++struct acpi_object_addr_handler { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 space_id; ++ u8 handler_flags; ++ acpi_adr_space_handler handler; ++ struct acpi_namespace_node *node; ++ void *context; ++ acpi_adr_space_setup setup; ++ union acpi_operand_object *region_list; ++ union acpi_operand_object *next; ++}; ++ ++struct acpi_object_reference { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ u8 class; ++ u8 target_type; ++ u8 resolved; ++ void *object; ++ struct acpi_namespace_node *node; ++ union acpi_operand_object **where; ++ u8 *index_pointer; ++ u8 *aml; ++ u32 value; ++}; ++ ++struct acpi_object_extra { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ struct acpi_namespace_node *method_REG; ++ struct acpi_namespace_node *scope_node; ++ void *region_context; ++ u8 *aml_start; ++ u32 aml_length; ++}; ++ ++struct acpi_object_data { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ acpi_object_handler handler; ++ void *pointer; ++}; ++ ++struct acpi_object_cache_list { ++ union acpi_operand_object *next_object; ++ u8 descriptor_type; ++ u8 type; ++ u16 reference_count; ++ u8 flags; ++ union acpi_operand_object *next; ++}; ++ ++union acpi_operand_object { ++ struct acpi_object_common common; ++ struct acpi_object_integer integer; ++ struct acpi_object_string string; ++ struct acpi_object_buffer buffer; ++ struct acpi_object_package package; ++ struct acpi_object_event event; ++ struct acpi_object_method method; ++ struct acpi_object_mutex mutex; ++ struct acpi_object_region region; ++ struct acpi_object_notify_common common_notify; ++ struct acpi_object_device device; ++ struct acpi_object_power_resource power_resource; ++ struct acpi_object_processor processor; ++ struct acpi_object_thermal_zone thermal_zone; ++ struct acpi_object_field_common common_field; ++ struct acpi_object_region_field field; ++ struct acpi_object_buffer_field buffer_field; ++ struct acpi_object_bank_field bank_field; ++ struct acpi_object_index_field index_field; ++ struct acpi_object_notify_handler notify; ++ struct acpi_object_addr_handler address_space; ++ struct acpi_object_reference reference; ++ struct acpi_object_extra extra; ++ struct acpi_object_data data; ++ struct acpi_object_cache_list cache; ++ struct acpi_namespace_node node; ++}; ++ ++struct acpi_table_list { ++ struct acpi_table_desc *tables; ++ u32 current_table_count; ++ u32 max_table_count; ++ u8 flags; ++}; ++ ++union acpi_parse_object; ++ ++union acpi_generic_state; ++ ++struct acpi_parse_state { ++ u8 *aml_start; ++ u8 *aml; ++ u8 *aml_end; ++ u8 *pkg_start; ++ u8 *pkg_end; ++ union acpi_parse_object *start_op; ++ struct acpi_namespace_node *start_node; ++ union acpi_generic_state *scope; ++ union acpi_parse_object *start_scope; ++ u32 aml_size; ++}; ++ ++typedef acpi_status (*acpi_parse_downwards)(struct acpi_walk_state *, union acpi_parse_object **); ++ ++typedef acpi_status (*acpi_parse_upwards)(struct acpi_walk_state *); ++ ++struct acpi_opcode_info; ++ ++struct acpi_walk_state { ++ struct acpi_walk_state *next; ++ u8 descriptor_type; ++ u8 walk_type; ++ u16 opcode; ++ u8 next_op_info; ++ u8 num_operands; ++ u8 operand_index; ++ acpi_owner_id owner_id; ++ u8 last_predicate; ++ u8 current_result; ++ u8 return_used; ++ u8 scope_depth; ++ u8 pass_number; ++ u8 namespace_override; ++ u8 result_size; ++ u8 result_count; ++ u8 *aml; ++ u32 arg_types; ++ u32 method_breakpoint; ++ u32 user_breakpoint; ++ u32 parse_flags; ++ struct acpi_parse_state parser_state; ++ u32 prev_arg_types; ++ u32 arg_count; ++ struct acpi_namespace_node arguments[7]; ++ struct acpi_namespace_node local_variables[8]; ++ union acpi_operand_object *operands[9]; ++ union acpi_operand_object **params; ++ u8 *aml_last_while; ++ union acpi_operand_object **caller_return_desc; ++ union acpi_generic_state *control_state; ++ struct acpi_namespace_node *deferred_node; ++ union acpi_operand_object *implicit_return_obj; ++ struct acpi_namespace_node *method_call_node; ++ union acpi_parse_object *method_call_op; ++ union acpi_operand_object *method_desc; ++ struct acpi_namespace_node *method_node; ++ union acpi_parse_object *op; ++ const struct acpi_opcode_info *op_info; ++ union acpi_parse_object *origin; ++ union acpi_operand_object *result_obj; ++ union acpi_generic_state *results; ++ union acpi_operand_object *return_desc; ++ union acpi_generic_state *scope_info; ++ union acpi_parse_object *prev_op; ++ union acpi_parse_object *next_op; ++ struct acpi_thread_state *thread; ++ acpi_parse_downwards descending_callback; ++ acpi_parse_upwards ascending_callback; ++}; ++ ++struct acpi_sci_handler_info { ++ struct acpi_sci_handler_info *next; ++ acpi_sci_handler address; ++ void *context; ++}; ++ ++struct acpi_gpe_handler_info { ++ acpi_gpe_handler address; ++ void *context; ++ struct acpi_namespace_node *method_node; ++ u8 original_flags; ++ u8 originally_enabled; ++}; ++ ++struct acpi_gpe_notify_info { ++ struct acpi_namespace_node *device_node; ++ struct acpi_gpe_notify_info *next; ++}; ++ ++union acpi_gpe_dispatch_info { ++ struct acpi_namespace_node *method_node; ++ struct acpi_gpe_handler_info *handler; ++ struct acpi_gpe_notify_info *notify_list; ++}; ++ ++struct acpi_gpe_register_info; ++ ++struct acpi_gpe_event_info { ++ union acpi_gpe_dispatch_info dispatch; ++ struct acpi_gpe_register_info *register_info; ++ u8 flags; ++ u8 gpe_number; ++ u8 runtime_count; ++ u8 disable_for_dispatch; ++}; ++ ++struct acpi_gpe_register_info { ++ struct acpi_generic_address status_address; ++ struct acpi_generic_address enable_address; ++ u16 base_gpe_number; ++ u8 enable_for_wake; ++ u8 enable_for_run; ++ u8 mask_for_run; ++ u8 enable_mask; ++} __attribute__((packed)); ++ ++struct acpi_gpe_xrupt_info; ++ ++struct acpi_gpe_block_info { ++ struct acpi_namespace_node *node; ++ struct acpi_gpe_block_info *previous; ++ struct acpi_gpe_block_info *next; ++ struct acpi_gpe_xrupt_info *xrupt_block; ++ struct acpi_gpe_register_info *register_info; ++ struct acpi_gpe_event_info *event_info; ++ u64 address; ++ u32 register_count; ++ u16 gpe_count; ++ u16 block_base_number; ++ u8 space_id; ++ u8 initialized; ++}; ++ ++struct acpi_gpe_xrupt_info { ++ struct acpi_gpe_xrupt_info *previous; ++ struct acpi_gpe_xrupt_info *next; ++ struct acpi_gpe_block_info *gpe_block_list_head; ++ u32 interrupt_number; ++}; ++ ++struct acpi_common_state { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++}; ++ ++struct acpi_update_state { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ union acpi_operand_object *object; ++}; ++ ++struct acpi_pkg_state { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ u32 index; ++ union acpi_operand_object *source_object; ++ union acpi_operand_object *dest_object; ++ struct acpi_walk_state *walk_state; ++ void *this_target_obj; ++ u32 num_packages; ++}; ++ ++struct acpi_control_state { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ u16 opcode; ++ union acpi_parse_object *predicate_op; ++ u8 *aml_predicate_start; ++ u8 *package_end; ++ u64 loop_timeout; ++}; ++ ++union acpi_parse_value { ++ u64 integer; ++ u32 size; ++ char *string; ++ u8 *buffer; ++ char *name; ++ union acpi_parse_object *arg; ++}; ++ ++struct acpi_parse_obj_common { ++ union acpi_parse_object *parent; ++ u8 descriptor_type; ++ u8 flags; ++ u16 aml_opcode; ++ u8 *aml; ++ union acpi_parse_object *next; ++ struct acpi_namespace_node *node; ++ union acpi_parse_value value; ++ u8 arg_list_length; ++}; ++ ++struct acpi_parse_obj_named { ++ union acpi_parse_object *parent; ++ u8 descriptor_type; ++ u8 flags; ++ u16 aml_opcode; ++ u8 *aml; ++ union acpi_parse_object *next; ++ struct acpi_namespace_node *node; ++ union acpi_parse_value value; ++ u8 arg_list_length; ++ char *path; ++ u8 *data; ++ u32 length; ++ u32 name; ++}; ++ ++struct acpi_parse_obj_asl { ++ union acpi_parse_object *parent; ++ u8 descriptor_type; ++ u8 flags; ++ u16 aml_opcode; ++ u8 *aml; ++ union acpi_parse_object *next; ++ struct acpi_namespace_node *node; ++ union acpi_parse_value value; ++ u8 arg_list_length; ++ union acpi_parse_object *child; ++ union acpi_parse_object *parent_method; ++ char *filename; ++ u8 file_changed; ++ char *parent_filename; ++ char *external_name; ++ char *namepath; ++ char name_seg[4]; ++ u32 extra_value; ++ u32 column; ++ u32 line_number; ++ u32 logical_line_number; ++ u32 logical_byte_offset; ++ u32 end_line; ++ u32 end_logical_line; ++ u32 acpi_btype; ++ u32 aml_length; ++ u32 aml_subtree_length; ++ u32 final_aml_length; ++ u32 final_aml_offset; ++ u32 compile_flags; ++ u16 parse_opcode; ++ u8 aml_opcode_length; ++ u8 aml_pkg_len_bytes; ++ u8 extra; ++ char parse_op_name[20]; ++}; ++ ++union acpi_parse_object { ++ struct acpi_parse_obj_common common; ++ struct acpi_parse_obj_named named; ++ struct acpi_parse_obj_asl asl; ++}; ++ ++struct acpi_scope_state { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ struct acpi_namespace_node *node; ++}; ++ ++struct acpi_pscope_state { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ u32 arg_count; ++ union acpi_parse_object *op; ++ u8 *arg_end; ++ u8 *pkg_end; ++ u32 arg_list; ++}; ++ ++struct acpi_thread_state { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ u8 current_sync_level; ++ struct acpi_walk_state *walk_state_list; ++ union acpi_operand_object *acquired_mutex_list; ++ u64 thread_id; ++}; ++ ++struct acpi_result_values { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ union acpi_operand_object *obj_desc[8]; ++}; ++ ++struct acpi_global_notify_handler { ++ acpi_notify_handler handler; ++ void *context; ++}; ++ ++struct acpi_notify_info { ++ void *next; ++ u8 descriptor_type; ++ u8 flags; ++ u16 value; ++ u16 state; ++ u8 handler_list_id; ++ struct acpi_namespace_node *node; ++ union acpi_operand_object *handler_list_head; ++ struct acpi_global_notify_handler *global; ++}; ++ ++union acpi_generic_state { ++ struct acpi_common_state common; ++ struct acpi_control_state control; ++ struct acpi_update_state update; ++ struct acpi_scope_state scope; ++ struct acpi_pscope_state parse_scope; ++ struct acpi_pkg_state pkg; ++ struct acpi_thread_state thread; ++ struct acpi_result_values results; ++ struct acpi_notify_info notify; ++}; ++ ++struct acpi_address_range { ++ struct acpi_address_range *next; ++ struct acpi_namespace_node *region_node; ++ acpi_physical_address start_address; ++ acpi_physical_address end_address; ++}; ++ ++struct acpi_opcode_info { ++ u32 parse_args; ++ u32 runtime_args; ++ u16 flags; ++ u8 object_type; ++ u8 class; ++ u8 type; ++}; ++ ++struct acpi_comment_node { ++ char *comment; ++ struct acpi_comment_node *next; ++}; ++ ++struct acpi_bit_register_info { ++ u8 parent_register; ++ u8 bit_position; ++ u16 access_bit_mask; ++}; ++ ++struct acpi_interface_info { ++ char *name; ++ struct acpi_interface_info *next; ++ u8 flags; ++ u8 value; ++}; ++ ++struct acpi_os_dpc { ++ acpi_osd_exec_callback function; ++ void *context; ++ struct work_struct work; ++}; ++ ++struct acpi_ioremap { ++ struct list_head list; ++ void *virt; ++ acpi_physical_address phys; ++ acpi_size size; ++ long unsigned int refcount; ++}; ++ ++struct acpi_hp_work { ++ struct work_struct work; ++ struct acpi_device *adev; ++ u32 src; ++}; ++ ++struct acpi_pld_info { ++ u8 revision; ++ u8 ignore_color; ++ u8 red; ++ u8 green; ++ u8 blue; ++ u16 width; ++ u16 height; ++ u8 user_visible; ++ u8 dock; ++ u8 lid; ++ u8 panel; ++ u8 vertical_position; ++ u8 horizontal_position; ++ u8 shape; ++ u8 group_orientation; ++ u8 group_token; ++ u8 group_position; ++ u8 bay; ++ u8 ejectable; ++ u8 ospm_eject_required; ++ u8 cabinet_number; ++ u8 card_cage_number; ++ u8 reference; ++ u8 rotation; ++ u8 order; ++ u8 reserved; ++ u16 vertical_offset; ++ u16 horizontal_offset; ++}; ++ ++struct acpi_handle_list { ++ u32 count; ++ acpi_handle handles[10]; ++}; ++ ++enum acpi_predicate { ++ all_versions = 0, ++ less_than_or_equal = 1, ++ equal = 2, ++ greater_than_or_equal = 3, ++}; ++ ++struct acpi_platform_list { ++ char oem_id[7]; ++ char oem_table_id[9]; ++ u32 oem_revision; ++ char *table; ++ enum acpi_predicate pred; ++ char *reason; ++ u32 data; ++}; ++ ++struct acpi_device_bus_id { ++ char bus_id[15]; ++ unsigned int instance_no; ++ struct list_head node; ++}; ++ ++struct acpi_dev_match_info { ++ const char *dev_name; ++ struct acpi_device_id hid[2]; ++ const char *uid; ++ s64 hrv; ++}; ++ ++struct nvs_region { ++ __u64 phys_start; ++ __u64 size; ++ struct list_head node; ++}; ++ ++struct acpi_hardware_id { ++ struct list_head list; ++ const char *id; ++}; ++ ++struct acpi_data_node { ++ const char *name; ++ acpi_handle handle; ++ struct fwnode_handle fwnode; ++ struct fwnode_handle *parent; ++ struct acpi_device_data data; ++ struct list_head sibling; ++ struct kobject kobj; ++ struct completion kobj_done; ++}; ++ ++struct acpi_data_node_attr { ++ struct attribute attr; ++ ssize_t (*show)(struct acpi_data_node *, char *); ++ ssize_t (*store)(struct acpi_data_node *, const char *, size_t); ++}; ++ ++struct pm_domain_data { ++ struct list_head list_node; ++ struct device *dev; ++}; ++ ++struct dev_power_governor { ++ bool (*power_down_ok)(struct dev_pm_domain *); ++ bool (*suspend_ok)(struct device *); ++}; ++ ++typedef u32 (*acpi_event_handler)(void *); ++ ++enum acpi_bus_device_type { ++ ACPI_BUS_TYPE_DEVICE = 0, ++ ACPI_BUS_TYPE_POWER = 1, ++ ACPI_BUS_TYPE_PROCESSOR = 2, ++ ACPI_BUS_TYPE_THERMAL = 3, ++ ACPI_BUS_TYPE_POWER_BUTTON = 4, ++ ACPI_BUS_TYPE_SLEEP_BUTTON = 5, ++ ACPI_BUS_TYPE_ECDT_EC = 6, ++ ACPI_BUS_DEVICE_TYPE_COUNT = 7, ++}; ++ ++struct acpi_device_physical_node { ++ unsigned int node_id; ++ struct list_head node; ++ struct device *dev; ++ bool put_online: 1; ++}; ++ ++struct acpi_osc_context { ++ char *uuid_str; ++ int rev; ++ struct acpi_buffer cap; ++ struct acpi_buffer ret; ++}; ++ ++struct acpi_pnp_device_id { ++ u32 length; ++ char *string; ++}; ++ ++struct acpi_pnp_device_id_list { ++ u32 count; ++ u32 list_size; ++ struct acpi_pnp_device_id ids[1]; ++}; ++ ++struct acpi_device_info { ++ u32 info_size; ++ u32 name; ++ acpi_object_type type; ++ u8 param_count; ++ u16 valid; ++ u8 flags; ++ u8 highest_dstates[4]; ++ u8 lowest_dstates[5]; ++ u64 address; ++ struct acpi_pnp_device_id hardware_id; ++ struct acpi_pnp_device_id unique_id; ++ struct acpi_pnp_device_id class_code; ++ struct acpi_pnp_device_id_list compatible_id_list; ++}; ++ ++struct acpi_table_spcr { ++ struct acpi_table_header header; ++ u8 interface_type; ++ u8 reserved[3]; ++ struct acpi_generic_address serial_port; ++ u8 interrupt_type; ++ u8 pc_interrupt; ++ u32 interrupt; ++ u8 baud_rate; ++ u8 parity; ++ u8 stop_bits; ++ u8 flow_control; ++ u8 terminal_type; ++ u8 reserved1; ++ u16 pci_device_id; ++ u16 pci_vendor_id; ++ u8 pci_bus; ++ u8 pci_device; ++ u8 pci_function; ++ u32 pci_flags; ++ u8 pci_segment; ++ u32 reserved2; ++} __attribute__((packed)); ++ ++struct acpi_table_stao { ++ struct acpi_table_header header; ++ u8 ignore_uart; ++} __attribute__((packed)); ++ ++enum acpi_reconfig_event { ++ ACPI_RECONFIG_DEVICE_ADD = 0, ++ ACPI_RECONFIG_DEVICE_REMOVE = 1, ++}; ++ ++struct acpi_dep_data { ++ struct list_head node; ++ acpi_handle master; ++ acpi_handle slave; ++}; ++ ++struct acpi_table_events_work { ++ struct work_struct work; ++ void *table; ++ u32 event; ++}; ++ ++struct resource_win { ++ struct resource res; ++ resource_size_t offset; ++}; ++ ++struct res_proc_context { ++ struct list_head *list; ++ int (*preproc)(struct acpi_resource *, void *); ++ void *preproc_data; ++ int count; ++ int error; ++}; ++ ++struct acpi_table_madt { ++ struct acpi_table_header header; ++ u32 address; ++ u32 flags; ++}; ++ ++typedef u32 acpi_event_status; ++ ++struct acpi_table_ecdt { ++ struct acpi_table_header header; ++ struct acpi_generic_address control; ++ struct acpi_generic_address data; ++ u32 uid; ++ u8 gpe; ++ u8 id[1]; ++} __attribute__((packed)); ++ ++struct transaction { ++ const u8 *wdata; ++ u8 *rdata; ++ short unsigned int irq_count; ++ u8 command; ++ u8 wi; ++ u8 ri; ++ u8 wlen; ++ u8 rlen; ++ u8 flags; ++}; ++ ++typedef int (*acpi_ec_query_func)(void *); ++ ++enum ec_command { ++ ACPI_EC_COMMAND_READ = 128, ++ ACPI_EC_COMMAND_WRITE = 129, ++ ACPI_EC_BURST_ENABLE = 130, ++ ACPI_EC_BURST_DISABLE = 131, ++ ACPI_EC_COMMAND_QUERY = 132, ++}; ++ ++enum { ++ EC_FLAGS_QUERY_ENABLED = 0, ++ EC_FLAGS_QUERY_PENDING = 1, ++ EC_FLAGS_QUERY_GUARDING = 2, ++ EC_FLAGS_GPE_HANDLER_INSTALLED = 3, ++ EC_FLAGS_EC_HANDLER_INSTALLED = 4, ++ EC_FLAGS_EVT_HANDLER_INSTALLED = 5, ++ EC_FLAGS_STARTED = 6, ++ EC_FLAGS_STOPPED = 7, ++ EC_FLAGS_GPE_MASKED = 8, ++}; ++ ++struct acpi_ec_query_handler { ++ struct list_head node; ++ acpi_ec_query_func func; ++ acpi_handle handle; ++ void *data; ++ u8 query_bit; ++ struct kref kref; ++}; ++ ++struct acpi_ec_query { ++ struct transaction transaction; ++ struct work_struct work; ++ struct acpi_ec_query_handler *handler; ++}; ++ ++struct pci_osc_bit_struct { ++ u32 bit; ++ char *desc; ++}; ++ ++struct acpi_handle_node { ++ struct list_head node; ++ acpi_handle handle; ++}; ++ ++struct acpi_pci_link_irq { ++ u32 active; ++ u8 triggering; ++ u8 polarity; ++ u8 resource_type; ++ u8 possible_count; ++ u32 possible[16]; ++ u8 initialized: 1; ++ u8 reserved: 7; ++}; ++ ++struct acpi_pci_link { ++ struct list_head list; ++ struct acpi_device *device; ++ struct acpi_pci_link_irq irq; ++ int refcnt; ++}; ++ ++struct acpi_pci_routing_table { ++ u32 length; ++ u32 pin; ++ u64 address; ++ u32 source_index; ++ char source[4]; ++}; ++ ++struct acpi_prt_entry { ++ struct acpi_pci_id id; ++ u8 pin; ++ acpi_handle link; ++ u32 index; ++}; ++ ++struct prt_quirk { ++ const struct dmi_system_id *system; ++ unsigned int segment; ++ unsigned int bus; ++ unsigned int device; ++ unsigned char pin; ++ const char *source; ++ const char *actual_source; ++}; ++ ++struct apd_private_data; ++ ++struct apd_device_desc { ++ unsigned int flags; ++ unsigned int fixed_clk_rate; ++ struct property_entry *properties; ++ int (*setup)(struct apd_private_data *); ++}; ++ ++struct apd_private_data { ++ struct clk *clk; ++ struct acpi_device *adev; ++ const struct apd_device_desc *dev_desc; ++}; ++ ++struct acpi_power_resource { ++ struct acpi_device device; ++ struct list_head list_node; ++ char *name; ++ u32 system_level; ++ u32 order; ++ unsigned int ref_count; ++ bool wakeup_enabled; ++ struct mutex resource_lock; ++}; ++ ++struct acpi_power_resource_entry { ++ struct list_head node; ++ struct acpi_power_resource *resource; ++}; ++ ++struct acpi_bus_event { ++ struct list_head node; ++ acpi_device_class device_class; ++ acpi_bus_id bus_id; ++ u32 type; ++ u32 data; ++}; ++ ++struct acpi_genl_event { ++ acpi_device_class device_class; ++ char bus_id[15]; ++ u32 type; ++ u32 data; ++}; ++ ++enum { ++ ACPI_GENL_ATTR_UNSPEC = 0, ++ ACPI_GENL_ATTR_EVENT = 1, ++ __ACPI_GENL_ATTR_MAX = 2, ++}; ++ ++enum { ++ ACPI_GENL_CMD_UNSPEC = 0, ++ ACPI_GENL_CMD_EVENT = 1, ++ __ACPI_GENL_CMD_MAX = 2, ++}; ++ ++struct acpi_ged_device { ++ struct device *dev; ++ struct list_head event_list; ++}; ++ ++struct acpi_ged_event { ++ struct list_head node; ++ struct device *dev; ++ unsigned int gsi; ++ unsigned int irq; ++ acpi_handle handle; ++}; ++ ++typedef void (*acpi_gbl_event_handler)(u32, acpi_handle, u32, void *); ++ ++struct acpi_table_bert { ++ struct acpi_table_header header; ++ u32 region_length; ++ u64 address; ++}; ++ ++struct acpi_table_attr { ++ struct bin_attribute attr; ++ char name[4]; ++ int instance; ++ char filename[8]; ++ struct list_head node; ++}; ++ ++struct acpi_data_attr { ++ struct bin_attribute attr; ++ u64 addr; ++}; ++ ++struct acpi_data_obj { ++ char *name; ++ int (*fn)(void *, struct acpi_data_attr *); ++}; ++ ++struct event_counter { ++ u32 count; ++ u32 flags; ++}; ++ ++struct acpi_table_slit { ++ struct acpi_table_header header; ++ u64 locality_count; ++ u8 entry[1]; ++} __attribute__((packed)); ++ ++struct acpi_table_srat { ++ struct acpi_table_header header; ++ u32 table_revision; ++ u64 reserved; ++}; ++ ++struct acpi_srat_cpu_affinity { ++ struct acpi_subtable_header header; ++ u8 proximity_domain_lo; ++ u8 apic_id; ++ u32 flags; ++ u8 local_sapic_eid; ++ u8 proximity_domain_hi[3]; ++ u32 clock_domain; ++}; ++ ++struct acpi_srat_mem_affinity { ++ struct acpi_subtable_header header; ++ u32 proximity_domain; ++ u16 reserved; ++ u64 base_address; ++ u64 length; ++ u32 reserved1; ++ u32 flags; ++ u64 reserved2; ++} __attribute__((packed)); ++ ++struct acpi_srat_x2apic_cpu_affinity { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 proximity_domain; ++ u32 apic_id; ++ u32 flags; ++ u32 clock_domain; ++ u32 reserved2; ++}; ++ ++struct acpi_lpat { ++ int temp; ++ int raw; ++}; ++ ++struct acpi_lpat_conversion_table { ++ struct acpi_lpat *lpat; ++ int lpat_count; ++}; ++ ++struct acpi_irq_parse_one_ctx { ++ int rc; ++ unsigned int index; ++ long unsigned int *res_flags; ++ struct irq_fwspec *fwspec; ++}; ++ ++enum { ++ ACPI_REFCLASS_LOCAL = 0, ++ ACPI_REFCLASS_ARG = 1, ++ ACPI_REFCLASS_REFOF = 2, ++ ACPI_REFCLASS_INDEX = 3, ++ ACPI_REFCLASS_TABLE = 4, ++ ACPI_REFCLASS_NAME = 5, ++ ACPI_REFCLASS_DEBUG = 6, ++ ACPI_REFCLASS_MAX = 6, ++}; ++ ++struct acpi_common_descriptor { ++ void *common_pointer; ++ u8 descriptor_type; ++}; ++ ++union acpi_descriptor { ++ struct acpi_common_descriptor common; ++ union acpi_operand_object object; ++ struct acpi_namespace_node node; ++ union acpi_parse_object op; ++}; ++ ++struct acpi_create_field_info { ++ struct acpi_namespace_node *region_node; ++ struct acpi_namespace_node *field_node; ++ struct acpi_namespace_node *register_node; ++ struct acpi_namespace_node *data_register_node; ++ struct acpi_namespace_node *connection_node; ++ u8 *resource_buffer; ++ u32 bank_value; ++ u32 field_bit_position; ++ u32 field_bit_length; ++ u16 resource_length; ++ u16 pin_number_index; ++ u8 field_flags; ++ u8 attribute; ++ u8 field_type; ++ u8 access_length; ++}; ++ ++struct acpi_init_walk_info { ++ u32 table_index; ++ u32 object_count; ++ u32 method_count; ++ u32 serial_method_count; ++ u32 non_serial_method_count; ++ u32 serialized_method_count; ++ u32 device_count; ++ u32 op_region_count; ++ u32 field_count; ++ u32 buffer_count; ++ u32 package_count; ++ u32 op_region_init; ++ u32 field_init; ++ u32 buffer_init; ++ u32 package_init; ++ acpi_owner_id owner_id; ++}; ++ ++struct acpi_name_info { ++ char name[4]; ++ u16 argument_list; ++ u8 expected_btypes; ++} __attribute__((packed)); ++ ++struct acpi_package_info { ++ u8 type; ++ u8 object_type1; ++ u8 count1; ++ u8 object_type2; ++ u8 count2; ++ u16 reserved; ++} __attribute__((packed)); ++ ++struct acpi_package_info2 { ++ u8 type; ++ u8 count; ++ u8 object_type[4]; ++ u8 reserved; ++}; ++ ++struct acpi_package_info3 { ++ u8 type; ++ u8 count; ++ u8 object_type[2]; ++ u8 tail_object_type; ++ u16 reserved; ++} __attribute__((packed)); ++ ++struct acpi_package_info4 { ++ u8 type; ++ u8 object_type1; ++ u8 count1; ++ u8 sub_object_types; ++ u8 pkg_count; ++ u16 reserved; ++} __attribute__((packed)); ++ ++union acpi_predefined_info { ++ struct acpi_name_info info; ++ struct acpi_package_info ret_info; ++ struct acpi_package_info2 ret_info2; ++ struct acpi_package_info3 ret_info3; ++ struct acpi_package_info4 ret_info4; ++}; ++ ++struct acpi_evaluate_info { ++ struct acpi_namespace_node *prefix_node; ++ const char *relative_pathname; ++ union acpi_operand_object **parameters; ++ struct acpi_namespace_node *node; ++ union acpi_operand_object *obj_desc; ++ char *full_pathname; ++ const union acpi_predefined_info *predefined; ++ union acpi_operand_object *return_object; ++ union acpi_operand_object *parent_package; ++ u32 return_flags; ++ u32 return_btype; ++ u16 param_count; ++ u8 pass_number; ++ u8 return_object_type; ++ u8 node_flags; ++ u8 flags; ++}; ++ ++enum { ++ AML_FIELD_ACCESS_ANY = 0, ++ AML_FIELD_ACCESS_BYTE = 1, ++ AML_FIELD_ACCESS_WORD = 2, ++ AML_FIELD_ACCESS_DWORD = 3, ++ AML_FIELD_ACCESS_QWORD = 4, ++ AML_FIELD_ACCESS_BUFFER = 5, ++}; ++ ++typedef enum { ++ ACPI_IMODE_LOAD_PASS1 = 1, ++ ACPI_IMODE_LOAD_PASS2 = 2, ++ ACPI_IMODE_EXECUTE = 3, ++} acpi_interpreter_mode; ++ ++typedef acpi_status (*acpi_execute_op)(struct acpi_walk_state *); ++ ++struct acpi_reg_walk_info { ++ u32 function; ++ u32 reg_run_count; ++ acpi_adr_space_type space_id; ++}; ++ ++enum { ++ AML_FIELD_ATTRIB_QUICK = 2, ++ AML_FIELD_ATTRIB_SEND_RCV = 4, ++ AML_FIELD_ATTRIB_BYTE = 6, ++ AML_FIELD_ATTRIB_WORD = 8, ++ AML_FIELD_ATTRIB_BLOCK = 10, ++ AML_FIELD_ATTRIB_MULTIBYTE = 11, ++ AML_FIELD_ATTRIB_WORD_CALL = 12, ++ AML_FIELD_ATTRIB_BLOCK_CALL = 13, ++ AML_FIELD_ATTRIB_RAW_BYTES = 14, ++ AML_FIELD_ATTRIB_RAW_PROCESS = 15, ++}; ++ ++enum { ++ AML_FIELD_UPDATE_PRESERVE = 0, ++ AML_FIELD_UPDATE_WRITE_AS_ONES = 32, ++ AML_FIELD_UPDATE_WRITE_AS_ZEROS = 64, ++}; ++ ++struct acpi_signal_fatal_info { ++ u32 type; ++ u32 code; ++ u32 argument; ++}; ++ ++enum { ++ MATCH_MTR = 0, ++ MATCH_MEQ = 1, ++ MATCH_MLE = 2, ++ MATCH_MLT = 3, ++ MATCH_MGE = 4, ++ MATCH_MGT = 5, ++}; ++ ++typedef enum { ++ ACPI_TRACE_AML_METHOD = 0, ++ ACPI_TRACE_AML_OPCODE = 1, ++ ACPI_TRACE_AML_REGION = 2, ++} acpi_trace_event_type; ++ ++struct acpi_pci_device { ++ acpi_handle device; ++ struct acpi_pci_device *next; ++}; ++ ++struct acpi_port_info { ++ char *name; ++ u16 start; ++ u16 end; ++ u8 osi_dependency; ++}; ++ ++typedef acpi_status (*acpi_sleep_function)(u8); ++ ++struct acpi_sleep_functions { ++ acpi_sleep_function legacy_function; ++ acpi_sleep_function extended_function; ++}; ++ ++struct acpi_device_walk_info { ++ struct acpi_table_desc *table_desc; ++ struct acpi_evaluate_info *evaluate_info; ++ u32 device_count; ++ u32 num_STA; ++ u32 num_INI; ++}; ++ ++enum acpi_return_package_types { ++ ACPI_PTYPE1_FIXED = 1, ++ ACPI_PTYPE1_VAR = 2, ++ ACPI_PTYPE1_OPTION = 3, ++ ACPI_PTYPE2 = 4, ++ ACPI_PTYPE2_COUNT = 5, ++ ACPI_PTYPE2_PKG_COUNT = 6, ++ ACPI_PTYPE2_FIXED = 7, ++ ACPI_PTYPE2_MIN = 8, ++ ACPI_PTYPE2_REV_FIXED = 9, ++ ACPI_PTYPE2_FIX_VAR = 10, ++ ACPI_PTYPE2_VAR_VAR = 11, ++ ACPI_PTYPE2_UUID_PAIR = 12, ++ ACPI_PTYPE_CUSTOM = 13, ++}; ++ ++typedef acpi_status (*acpi_object_converter)(struct acpi_namespace_node *, union acpi_operand_object *, union acpi_operand_object **); ++ ++struct acpi_simple_repair_info { ++ char name[4]; ++ u32 unexpected_btypes; ++ u32 package_index; ++ acpi_object_converter object_converter; ++}; ++ ++typedef acpi_status (*acpi_repair_function)(struct acpi_evaluate_info *, union acpi_operand_object **); ++ ++struct acpi_repair_info { ++ char name[4]; ++ acpi_repair_function repair_function; ++}; ++ ++struct acpi_namestring_info { ++ const char *external_name; ++ const char *next_external_char; ++ char *internal_name; ++ u32 length; ++ u32 num_segments; ++ u32 num_carats; ++ u8 fully_qualified; ++}; ++ ++typedef acpi_status (*acpi_walk_callback)(acpi_handle, u32, void *, void **); ++ ++struct acpi_get_devices_info { ++ acpi_walk_callback user_function; ++ void *context; ++ const char *hid; ++}; ++ ++struct aml_resource_small_header { ++ u8 descriptor_type; ++}; ++ ++struct aml_resource_irq { ++ u8 descriptor_type; ++ u16 irq_mask; ++ u8 flags; ++} __attribute__((packed)); ++ ++struct aml_resource_dma { ++ u8 descriptor_type; ++ u8 dma_channel_mask; ++ u8 flags; ++}; ++ ++struct aml_resource_start_dependent { ++ u8 descriptor_type; ++ u8 flags; ++}; ++ ++struct aml_resource_end_dependent { ++ u8 descriptor_type; ++}; ++ ++struct aml_resource_io { ++ u8 descriptor_type; ++ u8 flags; ++ u16 minimum; ++ u16 maximum; ++ u8 alignment; ++ u8 address_length; ++}; ++ ++struct aml_resource_fixed_io { ++ u8 descriptor_type; ++ u16 address; ++ u8 address_length; ++} __attribute__((packed)); ++ ++struct aml_resource_vendor_small { ++ u8 descriptor_type; ++}; ++ ++struct aml_resource_end_tag { ++ u8 descriptor_type; ++ u8 checksum; ++}; ++ ++struct aml_resource_fixed_dma { ++ u8 descriptor_type; ++ u16 request_lines; ++ u16 channels; ++ u8 width; ++} __attribute__((packed)); ++ ++struct aml_resource_large_header { ++ u8 descriptor_type; ++ u16 resource_length; ++} __attribute__((packed)); ++ ++struct aml_resource_memory24 { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 flags; ++ u16 minimum; ++ u16 maximum; ++ u16 alignment; ++ u16 address_length; ++} __attribute__((packed)); ++ ++struct aml_resource_vendor_large { ++ u8 descriptor_type; ++ u16 resource_length; ++} __attribute__((packed)); ++ ++struct aml_resource_memory32 { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 flags; ++ u32 minimum; ++ u32 maximum; ++ u32 alignment; ++ u32 address_length; ++} __attribute__((packed)); ++ ++struct aml_resource_fixed_memory32 { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 flags; ++ u32 address; ++ u32 address_length; ++} __attribute__((packed)); ++ ++struct aml_resource_address { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 resource_type; ++ u8 flags; ++ u8 specific_flags; ++} __attribute__((packed)); ++ ++struct aml_resource_extended_address64 { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 resource_type; ++ u8 flags; ++ u8 specific_flags; ++ u8 revision_ID; ++ u8 reserved; ++ u64 granularity; ++ u64 minimum; ++ u64 maximum; ++ u64 translation_offset; ++ u64 address_length; ++ u64 type_specific; ++} __attribute__((packed)); ++ ++struct aml_resource_address64 { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 resource_type; ++ u8 flags; ++ u8 specific_flags; ++ u64 granularity; ++ u64 minimum; ++ u64 maximum; ++ u64 translation_offset; ++ u64 address_length; ++} __attribute__((packed)); ++ ++struct aml_resource_address32 { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 resource_type; ++ u8 flags; ++ u8 specific_flags; ++ u32 granularity; ++ u32 minimum; ++ u32 maximum; ++ u32 translation_offset; ++ u32 address_length; ++} __attribute__((packed)); ++ ++struct aml_resource_address16 { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 resource_type; ++ u8 flags; ++ u8 specific_flags; ++ u16 granularity; ++ u16 minimum; ++ u16 maximum; ++ u16 translation_offset; ++ u16 address_length; ++} __attribute__((packed)); ++ ++struct aml_resource_extended_irq { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 flags; ++ u8 interrupt_count; ++ u32 interrupts[1]; ++} __attribute__((packed)); ++ ++struct aml_resource_generic_register { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 address_space_id; ++ u8 bit_width; ++ u8 bit_offset; ++ u8 access_size; ++ u64 address; ++} __attribute__((packed)); ++ ++struct aml_resource_gpio { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u8 connection_type; ++ u16 flags; ++ u16 int_flags; ++ u8 pin_config; ++ u16 drive_strength; ++ u16 debounce_timeout; ++ u16 pin_table_offset; ++ u8 res_source_index; ++ u16 res_source_offset; ++ u16 vendor_offset; ++ u16 vendor_length; ++} __attribute__((packed)); ++ ++struct aml_resource_common_serialbus { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u8 res_source_index; ++ u8 type; ++ u8 flags; ++ u16 type_specific_flags; ++ u8 type_revision_id; ++ u16 type_data_length; ++} __attribute__((packed)); ++ ++struct aml_resource_i2c_serialbus { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u8 res_source_index; ++ u8 type; ++ u8 flags; ++ u16 type_specific_flags; ++ u8 type_revision_id; ++ u16 type_data_length; ++ u32 connection_speed; ++ u16 slave_address; ++} __attribute__((packed)); ++ ++struct aml_resource_spi_serialbus { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u8 res_source_index; ++ u8 type; ++ u8 flags; ++ u16 type_specific_flags; ++ u8 type_revision_id; ++ u16 type_data_length; ++ u32 connection_speed; ++ u8 data_bit_length; ++ u8 clock_phase; ++ u8 clock_polarity; ++ u16 device_selection; ++} __attribute__((packed)); ++ ++struct aml_resource_uart_serialbus { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u8 res_source_index; ++ u8 type; ++ u8 flags; ++ u16 type_specific_flags; ++ u8 type_revision_id; ++ u16 type_data_length; ++ u32 default_baud_rate; ++ u16 rx_fifo_size; ++ u16 tx_fifo_size; ++ u8 parity; ++ u8 lines_enabled; ++} __attribute__((packed)); ++ ++struct aml_resource_pin_function { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u16 flags; ++ u8 pin_config; ++ u16 function_number; ++ u16 pin_table_offset; ++ u8 res_source_index; ++ u16 res_source_offset; ++ u16 vendor_offset; ++ u16 vendor_length; ++} __attribute__((packed)); ++ ++struct aml_resource_pin_config { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u16 flags; ++ u8 pin_config_type; ++ u32 pin_config_value; ++ u16 pin_table_offset; ++ u8 res_source_index; ++ u16 res_source_offset; ++ u16 vendor_offset; ++ u16 vendor_length; ++} __attribute__((packed)); ++ ++struct aml_resource_pin_group { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u16 flags; ++ u16 pin_table_offset; ++ u16 label_offset; ++ u16 vendor_offset; ++ u16 vendor_length; ++} __attribute__((packed)); ++ ++struct aml_resource_pin_group_function { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u16 flags; ++ u16 function_number; ++ u8 res_source_index; ++ u16 res_source_offset; ++ u16 res_source_label_offset; ++ u16 vendor_offset; ++ u16 vendor_length; ++} __attribute__((packed)); ++ ++struct aml_resource_pin_group_config { ++ u8 descriptor_type; ++ u16 resource_length; ++ u8 revision_id; ++ u16 flags; ++ u8 pin_config_type; ++ u32 pin_config_value; ++ u8 res_source_index; ++ u16 res_source_offset; ++ u16 res_source_label_offset; ++ u16 vendor_offset; ++ u16 vendor_length; ++} __attribute__((packed)); ++ ++union aml_resource { ++ u8 descriptor_type; ++ struct aml_resource_small_header small_header; ++ struct aml_resource_large_header large_header; ++ struct aml_resource_irq irq; ++ struct aml_resource_dma dma; ++ struct aml_resource_start_dependent start_dpf; ++ struct aml_resource_end_dependent end_dpf; ++ struct aml_resource_io io; ++ struct aml_resource_fixed_io fixed_io; ++ struct aml_resource_fixed_dma fixed_dma; ++ struct aml_resource_vendor_small vendor_small; ++ struct aml_resource_end_tag end_tag; ++ struct aml_resource_memory24 memory24; ++ struct aml_resource_generic_register generic_reg; ++ struct aml_resource_vendor_large vendor_large; ++ struct aml_resource_memory32 memory32; ++ struct aml_resource_fixed_memory32 fixed_memory32; ++ struct aml_resource_address16 address16; ++ struct aml_resource_address32 address32; ++ struct aml_resource_address64 address64; ++ struct aml_resource_extended_address64 ext_address64; ++ struct aml_resource_extended_irq extended_irq; ++ struct aml_resource_gpio gpio; ++ struct aml_resource_i2c_serialbus i2c_serial_bus; ++ struct aml_resource_spi_serialbus spi_serial_bus; ++ struct aml_resource_uart_serialbus uart_serial_bus; ++ struct aml_resource_common_serialbus common_serial_bus; ++ struct aml_resource_pin_function pin_function; ++ struct aml_resource_pin_config pin_config; ++ struct aml_resource_pin_group pin_group; ++ struct aml_resource_pin_group_function pin_group_function; ++ struct aml_resource_pin_group_config pin_group_config; ++ struct aml_resource_address address; ++ u32 dword_item; ++ u16 word_item; ++ u8 byte_item; ++}; ++ ++struct acpi_rsconvert_info { ++ u8 opcode; ++ u8 resource_offset; ++ u8 aml_offset; ++ u8 value; ++}; ++ ++enum { ++ ACPI_RSC_INITGET = 0, ++ ACPI_RSC_INITSET = 1, ++ ACPI_RSC_FLAGINIT = 2, ++ ACPI_RSC_1BITFLAG = 3, ++ ACPI_RSC_2BITFLAG = 4, ++ ACPI_RSC_3BITFLAG = 5, ++ ACPI_RSC_ADDRESS = 6, ++ ACPI_RSC_BITMASK = 7, ++ ACPI_RSC_BITMASK16 = 8, ++ ACPI_RSC_COUNT = 9, ++ ACPI_RSC_COUNT16 = 10, ++ ACPI_RSC_COUNT_GPIO_PIN = 11, ++ ACPI_RSC_COUNT_GPIO_RES = 12, ++ ACPI_RSC_COUNT_GPIO_VEN = 13, ++ ACPI_RSC_COUNT_SERIAL_RES = 14, ++ ACPI_RSC_COUNT_SERIAL_VEN = 15, ++ ACPI_RSC_DATA8 = 16, ++ ACPI_RSC_EXIT_EQ = 17, ++ ACPI_RSC_EXIT_LE = 18, ++ ACPI_RSC_EXIT_NE = 19, ++ ACPI_RSC_LENGTH = 20, ++ ACPI_RSC_MOVE_GPIO_PIN = 21, ++ ACPI_RSC_MOVE_GPIO_RES = 22, ++ ACPI_RSC_MOVE_SERIAL_RES = 23, ++ ACPI_RSC_MOVE_SERIAL_VEN = 24, ++ ACPI_RSC_MOVE8 = 25, ++ ACPI_RSC_MOVE16 = 26, ++ ACPI_RSC_MOVE32 = 27, ++ ACPI_RSC_MOVE64 = 28, ++ ACPI_RSC_SET8 = 29, ++ ACPI_RSC_SOURCE = 30, ++ ACPI_RSC_SOURCEX = 31, ++}; ++ ++typedef u16 acpi_rs_length; ++ ++typedef u32 acpi_rsdesc_size; ++ ++struct acpi_vendor_uuid { ++ u8 subtype; ++ u8 data[16]; ++}; ++ ++typedef acpi_status (*acpi_walk_resource_callback)(struct acpi_resource *, void *); ++ ++struct acpi_vendor_walk_info { ++ struct acpi_vendor_uuid *uuid; ++ struct acpi_buffer *buffer; ++ acpi_status status; ++}; ++ ++struct acpi_fadt_info { ++ const char *name; ++ u16 address64; ++ u16 address32; ++ u16 length; ++ u8 default_length; ++ u8 flags; ++}; ++ ++struct acpi_fadt_pm_info { ++ struct acpi_generic_address *target; ++ u16 source; ++ u8 register_num; ++}; ++ ++struct acpi_table_rsdp { ++ char signature[8]; ++ u8 checksum; ++ char oem_id[6]; ++ u8 revision; ++ u32 rsdt_physical_address; ++ u32 length; ++ u64 xsdt_physical_address; ++ u8 extended_checksum; ++ u8 reserved[3]; ++} __attribute__((packed)); ++ ++struct acpi_pkg_info { ++ u8 *free_space; ++ acpi_size length; ++ u32 object_space; ++ u32 num_packages; ++}; ++ ++struct acpi_exception_info { ++ char *name; ++}; ++ ++typedef acpi_status (*acpi_pkg_callback)(u8, union acpi_operand_object *, union acpi_generic_state *, void *); ++ ++typedef u32 acpi_mutex_handle; ++ ++typedef acpi_status (*acpi_walk_aml_callback)(u8 *, u32, u32, u8, void **); ++ ++struct acpi_table_mcfg { ++ struct acpi_table_header header; ++ u8 reserved[8]; ++}; ++ ++struct acpi_mcfg_allocation { ++ u64 address; ++ u16 pci_segment; ++ u8 start_bus_number; ++ u8 end_bus_number; ++ u32 reserved; ++}; ++ ++struct mcfg_entry { ++ struct list_head list; ++ phys_addr_t addr; ++ u16 segment; ++ u8 bus_start; ++ u8 bus_end; ++}; ++ ++struct mcfg_fixup { ++ char oem_id[7]; ++ char oem_table_id[9]; ++ u32 oem_revision; ++ u16 segment; ++ struct resource bus_range; ++ struct pci_ecam_ops *ops; ++ struct resource cfgres; ++}; ++ ++struct input_id { ++ __u16 bustype; ++ __u16 vendor; ++ __u16 product; ++ __u16 version; ++}; ++ ++struct input_absinfo { ++ __s32 value; ++ __s32 minimum; ++ __s32 maximum; ++ __s32 fuzz; ++ __s32 flat; ++ __s32 resolution; ++}; ++ ++struct input_keymap_entry { ++ __u8 flags; ++ __u8 len; ++ __u16 index; ++ __u32 keycode; ++ __u8 scancode[32]; ++}; ++ ++struct ff_replay { ++ __u16 length; ++ __u16 delay; ++}; ++ ++struct ff_trigger { ++ __u16 button; ++ __u16 interval; ++}; ++ ++struct ff_envelope { ++ __u16 attack_length; ++ __u16 attack_level; ++ __u16 fade_length; ++ __u16 fade_level; ++}; ++ ++struct ff_constant_effect { ++ __s16 level; ++ struct ff_envelope envelope; ++}; ++ ++struct ff_ramp_effect { ++ __s16 start_level; ++ __s16 end_level; ++ struct ff_envelope envelope; ++}; ++ ++struct ff_condition_effect { ++ __u16 right_saturation; ++ __u16 left_saturation; ++ __s16 right_coeff; ++ __s16 left_coeff; ++ __u16 deadband; ++ __s16 center; ++}; ++ ++struct ff_periodic_effect { ++ __u16 waveform; ++ __u16 period; ++ __s16 magnitude; ++ __s16 offset; ++ __u16 phase; ++ struct ff_envelope envelope; ++ __u32 custom_len; ++ __s16 *custom_data; ++}; ++ ++struct ff_rumble_effect { ++ __u16 strong_magnitude; ++ __u16 weak_magnitude; ++}; ++ ++struct ff_effect { ++ __u16 type; ++ __s16 id; ++ __u16 direction; ++ struct ff_trigger trigger; ++ struct ff_replay replay; ++ union { ++ struct ff_constant_effect constant; ++ struct ff_ramp_effect ramp; ++ struct ff_periodic_effect periodic; ++ struct ff_condition_effect condition[2]; ++ struct ff_rumble_effect rumble; ++ } u; ++}; ++ ++struct input_device_id { ++ kernel_ulong_t flags; ++ __u16 bustype; ++ __u16 vendor; ++ __u16 product; ++ __u16 version; ++ kernel_ulong_t evbit[1]; ++ kernel_ulong_t keybit[12]; ++ kernel_ulong_t relbit[1]; ++ kernel_ulong_t absbit[1]; ++ kernel_ulong_t mscbit[1]; ++ kernel_ulong_t ledbit[1]; ++ kernel_ulong_t sndbit[1]; ++ kernel_ulong_t ffbit[2]; ++ kernel_ulong_t swbit[1]; ++ kernel_ulong_t propbit[1]; ++ kernel_ulong_t driver_info; ++}; ++ ++struct input_value { ++ __u16 type; ++ __u16 code; ++ __s32 value; ++}; ++ ++struct ff_device; ++ ++struct input_mt; ++ ++struct input_handle; ++ ++struct input_dev { ++ const char *name; ++ const char *phys; ++ const char *uniq; ++ struct input_id id; ++ long unsigned int propbit[1]; ++ long unsigned int evbit[1]; ++ long unsigned int keybit[12]; ++ long unsigned int relbit[1]; ++ long unsigned int absbit[1]; ++ long unsigned int mscbit[1]; ++ long unsigned int ledbit[1]; ++ long unsigned int sndbit[1]; ++ long unsigned int ffbit[2]; ++ long unsigned int swbit[1]; ++ unsigned int hint_events_per_packet; ++ unsigned int keycodemax; ++ unsigned int keycodesize; ++ void *keycode; ++ int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); ++ int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); ++ struct ff_device *ff; ++ unsigned int repeat_key; ++ struct timer_list timer; ++ int rep[2]; ++ struct input_mt *mt; ++ struct input_absinfo *absinfo; ++ long unsigned int key[12]; ++ long unsigned int led[1]; ++ long unsigned int snd[1]; ++ long unsigned int sw[1]; ++ int (*open)(struct input_dev *); ++ void (*close)(struct input_dev *); ++ int (*flush)(struct input_dev *, struct file *); ++ int (*event)(struct input_dev *, unsigned int, unsigned int, int); ++ struct input_handle *grab; ++ spinlock_t event_lock; ++ struct mutex mutex; ++ unsigned int users; ++ bool going_away; ++ struct device dev; ++ struct list_head h_list; ++ struct list_head node; ++ unsigned int num_vals; ++ unsigned int max_vals; ++ struct input_value *vals; ++ bool devres_managed; ++}; ++ ++struct ff_device { ++ int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); ++ int (*erase)(struct input_dev *, int); ++ int (*playback)(struct input_dev *, int, int); ++ void (*set_gain)(struct input_dev *, u16); ++ void (*set_autocenter)(struct input_dev *, u16); ++ void (*destroy)(struct ff_device *); ++ void *private; ++ long unsigned int ffbit[2]; ++ struct mutex mutex; ++ int max_effects; ++ struct ff_effect *effects; ++ struct file *effect_owners[0]; ++}; ++ ++struct input_handler; ++ ++struct input_handle { ++ void *private; ++ int open; ++ const char *name; ++ struct input_dev *dev; ++ struct input_handler *handler; ++ struct list_head d_node; ++ struct list_head h_node; ++}; ++ ++struct input_handler { ++ void *private; ++ void (*event)(struct input_handle *, unsigned int, unsigned int, int); ++ void (*events)(struct input_handle *, const struct input_value *, unsigned int); ++ bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); ++ bool (*match)(struct input_handler *, struct input_dev *); ++ int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); ++ void (*disconnect)(struct input_handle *); ++ void (*start)(struct input_handle *); ++ bool legacy_minors; ++ int minor; ++ const char *name; ++ const struct input_device_id *id_table; ++ struct list_head h_list; ++ struct list_head node; ++}; ++ ++struct acpi_button { ++ unsigned int type; ++ struct input_dev *input; ++ char phys[32]; ++ long unsigned int pushed; ++ int last_state; ++ ktime_t last_time; ++ bool suspended; ++}; ++ ++struct acpi_fan_fps { ++ u64 control; ++ u64 trip_point; ++ u64 speed; ++ u64 noise_level; ++ u64 power; ++}; ++ ++struct acpi_fan_fif { ++ u64 revision; ++ u64 fine_grain_ctrl; ++ u64 step_size; ++ u64 low_speed_notification; ++}; ++ ++struct acpi_fan { ++ bool acpi4; ++ struct acpi_fan_fif fif; ++ struct acpi_fan_fps *fps; ++ int fps_count; ++ struct thermal_cooling_device *cdev; ++}; ++ ++struct acpi_pci_slot { ++ struct pci_slot *pci_slot; ++ struct list_head list; ++}; ++ ++struct acpi_power_register { ++ u8 descriptor; ++ u16 length; ++ u8 space_id; ++ u8 bit_width; ++ u8 bit_offset; ++ u8 access_size; ++ u64 address; ++} __attribute__((packed)); ++ ++struct acpi_lpi_states_array { ++ unsigned int size; ++ unsigned int composite_states_size; ++ struct acpi_lpi_state *entries; ++ struct acpi_lpi_state *composite_states[8]; ++}; ++ ++struct container_dev { ++ struct device dev; ++ int (*offline)(struct container_dev *); ++}; ++ ++struct acpi_thermal_state { ++ u8 critical: 1; ++ u8 hot: 1; ++ u8 passive: 1; ++ u8 active: 1; ++ u8 reserved: 4; ++ int active_index; ++}; ++ ++struct acpi_thermal_state_flags { ++ u8 valid: 1; ++ u8 enabled: 1; ++ u8 reserved: 6; ++}; ++ ++struct acpi_thermal_critical { ++ struct acpi_thermal_state_flags flags; ++ long unsigned int temperature; ++}; ++ ++struct acpi_thermal_hot { ++ struct acpi_thermal_state_flags flags; ++ long unsigned int temperature; ++}; ++ ++struct acpi_thermal_passive { ++ struct acpi_thermal_state_flags flags; ++ long unsigned int temperature; ++ long unsigned int tc1; ++ long unsigned int tc2; ++ long unsigned int tsp; ++ struct acpi_handle_list devices; ++}; ++ ++struct acpi_thermal_active { ++ struct acpi_thermal_state_flags flags; ++ long unsigned int temperature; ++ struct acpi_handle_list devices; ++}; ++ ++struct acpi_thermal_trips { ++ struct acpi_thermal_critical critical; ++ struct acpi_thermal_hot hot; ++ struct acpi_thermal_passive passive; ++ struct acpi_thermal_active active[10]; ++}; ++ ++struct acpi_thermal_flags { ++ u8 cooling_mode: 1; ++ u8 devices: 1; ++ u8 reserved: 6; ++}; ++ ++struct acpi_thermal { ++ struct acpi_device *device; ++ acpi_bus_id name; ++ long unsigned int temperature; ++ long unsigned int last_temperature; ++ long unsigned int polling_frequency; ++ volatile u8 zombie; ++ struct acpi_thermal_flags flags; ++ struct acpi_thermal_state state; ++ struct acpi_thermal_trips trips; ++ struct acpi_handle_list devices; ++ struct thermal_zone_device *thermal_zone; ++ int tz_enabled; ++ int kelvin_offset; ++ struct work_struct thermal_check_work; ++}; ++ ++struct acpi_memory_info { ++ struct list_head list; ++ u64 start_addr; ++ u64 length; ++ short unsigned int caching; ++ short unsigned int write_protect; ++ unsigned int enabled: 1; ++}; ++ ++struct acpi_memory_device { ++ struct acpi_device *device; ++ unsigned int state; ++ struct list_head res_list; ++}; ++ ++struct acpi_pcct_hw_reduced { ++ struct acpi_subtable_header header; ++ u32 platform_interrupt; ++ u8 flags; ++ u8 reserved; ++ u64 base_address; ++ u64 length; ++ struct acpi_generic_address doorbell_register; ++ u64 preserve_mask; ++ u64 write_mask; ++ u32 latency; ++ u32 max_access_rate; ++ u16 min_turnaround_time; ++} __attribute__((packed)); ++ ++struct acpi_pcct_shared_memory { ++ u32 signature; ++ u16 command; ++ u16 status; ++}; ++ ++struct mbox_chan; ++ ++struct mbox_chan_ops { ++ int (*send_data)(struct mbox_chan *, void *); ++ int (*startup)(struct mbox_chan *); ++ void (*shutdown)(struct mbox_chan *); ++ bool (*last_tx_done)(struct mbox_chan *); ++ bool (*peek_data)(struct mbox_chan *); ++}; ++ ++struct mbox_controller; ++ ++struct mbox_client; ++ ++struct mbox_chan { ++ struct mbox_controller *mbox; ++ unsigned int txdone_method; ++ struct mbox_client *cl; ++ struct completion tx_complete; ++ void *active_req; ++ unsigned int msg_count; ++ unsigned int msg_free; ++ void *msg_data[20]; ++ spinlock_t lock; ++ void *con_priv; ++}; ++ ++struct mbox_controller { ++ struct device *dev; ++ const struct mbox_chan_ops *ops; ++ struct mbox_chan *chans; ++ int num_chans; ++ bool txdone_irq; ++ bool txdone_poll; ++ unsigned int txpoll_period; ++ struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); ++ struct hrtimer poll_hrt; ++ struct list_head node; ++}; ++ ++struct mbox_client { ++ struct device *dev; ++ bool tx_block; ++ long unsigned int tx_tout; ++ bool knows_txdone; ++ void (*rx_callback)(struct mbox_client *, void *); ++ void (*tx_prepare)(struct mbox_client *, void *); ++ void (*tx_done)(struct mbox_client *, void *, int); ++}; ++ ++struct cpc_reg { ++ u8 descriptor; ++ u16 length; ++ u8 space_id; ++ u8 bit_width; ++ u8 bit_offset; ++ u8 access_width; ++ u64 address; ++} __attribute__((packed)); ++ ++struct cpc_register_resource { ++ acpi_object_type type; ++ u64 *sys_mem_vaddr; ++ union { ++ struct cpc_reg reg; ++ u64 int_value; ++ } cpc_entry; ++}; ++ ++struct cpc_desc { ++ int num_entries; ++ int version; ++ int cpu_id; ++ int write_cmd_status; ++ int write_cmd_id; ++ struct cpc_register_resource cpc_regs[21]; ++ struct acpi_psd_package domain_info; ++ struct kobject kobj; ++}; ++ ++enum cppc_regs { ++ HIGHEST_PERF = 0, ++ NOMINAL_PERF = 1, ++ LOW_NON_LINEAR_PERF = 2, ++ LOWEST_PERF = 3, ++ GUARANTEED_PERF = 4, ++ DESIRED_PERF = 5, ++ MIN_PERF = 6, ++ MAX_PERF = 7, ++ PERF_REDUC_TOLERANCE = 8, ++ TIME_WINDOW = 9, ++ CTR_WRAP_TIME = 10, ++ REFERENCE_CTR = 11, ++ DELIVERED_CTR = 12, ++ PERF_LIMITED = 13, ++ ENABLE = 14, ++ AUTO_SEL_ENABLE = 15, ++ AUTO_ACT_WINDOW = 16, ++ ENERGY_PERF = 17, ++ REFERENCE_PERF = 18, ++ LOWEST_FREQ = 19, ++ NOMINAL_FREQ = 20, ++}; ++ ++struct cppc_perf_caps { ++ u32 highest_perf; ++ u32 nominal_perf; ++ u32 lowest_perf; ++ u32 lowest_nonlinear_perf; ++ u32 lowest_freq; ++ u32 nominal_freq; ++}; ++ ++struct cppc_perf_ctrls { ++ u32 max_perf; ++ u32 min_perf; ++ u32 desired_perf; ++}; ++ ++struct cppc_perf_fb_ctrs { ++ u64 reference; ++ u64 delivered; ++ u64 reference_perf; ++ u64 wraparound_time; ++}; ++ ++struct cppc_cpudata { ++ int cpu; ++ struct cppc_perf_caps perf_caps; ++ struct cppc_perf_ctrls perf_ctrls; ++ struct cppc_perf_fb_ctrs perf_fb_ctrs; ++ struct cpufreq_policy *cur_policy; ++ unsigned int shared_type; ++ cpumask_var_t shared_cpu_map; ++}; ++ ++struct cppc_pcc_data { ++ struct mbox_chan *pcc_channel; ++ void *pcc_comm_addr; ++ bool pcc_channel_acquired; ++ unsigned int deadline_us; ++ unsigned int pcc_mpar; ++ unsigned int pcc_mrtt; ++ unsigned int pcc_nominal; ++ bool pending_pcc_write_cmd; ++ bool platform_owns_pcc; ++ unsigned int pcc_write_cnt; ++ struct rw_semaphore pcc_lock; ++ wait_queue_head_t pcc_write_wait_q; ++ ktime_t last_cmd_cmpl_time; ++ ktime_t last_mpar_reset; ++ int mpar_count; ++ int refcount; ++}; ++ ++struct cppc_attr { ++ struct attribute attr; ++ ssize_t (*show)(struct kobject *, struct attribute *, char *); ++ ssize_t (*store)(struct kobject *, struct attribute *, const char *, ssize_t); ++}; ++ ++enum acpi_pptt_type { ++ ACPI_PPTT_TYPE_PROCESSOR = 0, ++ ACPI_PPTT_TYPE_CACHE = 1, ++ ACPI_PPTT_TYPE_ID = 2, ++ ACPI_PPTT_TYPE_RESERVED = 3, ++}; ++ ++struct acpi_pptt_processor { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 flags; ++ u32 parent; ++ u32 acpi_processor_id; ++ u32 number_of_priv_resources; ++}; ++ ++struct acpi_pptt_cache { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 flags; ++ u32 next_level_of_cache; ++ u32 size; ++ u32 number_of_sets; ++ u8 associativity; ++ u8 attributes; ++ u16 line_size; ++}; ++ ++struct acpi_whea_header { ++ u8 action; ++ u8 instruction; ++ u8 flags; ++ u8 reserved; ++ struct acpi_generic_address register_region; ++ u64 value; ++ u64 mask; ++} __attribute__((packed)); ++ ++struct apei_exec_context; ++ ++typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *, struct acpi_whea_header *); ++ ++struct apei_exec_ins_type; ++ ++struct apei_exec_context { ++ u32 ip; ++ u64 value; ++ u64 var1; ++ u64 var2; ++ u64 src_base; ++ u64 dst_base; ++ struct apei_exec_ins_type *ins_table; ++ u32 instructions; ++ struct acpi_whea_header *action_table; ++ u32 entries; ++}; ++ ++struct apei_exec_ins_type { ++ u32 flags; ++ apei_exec_ins_func_t run; ++}; ++ ++struct apei_resources { ++ struct list_head iomem; ++ struct list_head ioport; ++}; ++ ++typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *, struct acpi_whea_header *, void *); ++ ++struct apei_res { ++ struct list_head list; ++ long unsigned int start; ++ long unsigned int end; ++}; ++ ++struct acpi_table_hest { ++ struct acpi_table_header header; ++ u32 error_source_count; ++}; ++ ++struct acpi_hest_ia_machine_check { ++ struct acpi_hest_header header; ++ u16 reserved1; ++ u8 flags; ++ u8 enabled; ++ u32 records_to_preallocate; ++ u32 max_sections_per_record; ++ u64 global_capability_data; ++ u64 global_control_data; ++ u8 num_hardware_banks; ++ u8 reserved3[7]; ++}; ++ ++struct acpi_hest_ia_corrected { ++ struct acpi_hest_header header; ++ u16 reserved1; ++ u8 flags; ++ u8 enabled; ++ u32 records_to_preallocate; ++ u32 max_sections_per_record; ++ struct acpi_hest_notify notify; ++ u8 num_hardware_banks; ++ u8 reserved2[3]; ++}; ++ ++enum hest_status { ++ HEST_ENABLED = 0, ++ HEST_DISABLED = 1, ++ HEST_NOT_FOUND = 2, ++}; ++ ++typedef int (*apei_hest_func_t)(struct acpi_hest_header *, void *); ++ ++struct ghes_arr { ++ struct platform_device **ghes_devs; ++ unsigned int count; ++}; ++ ++struct acpi_table_erst { ++ struct acpi_table_header header; ++ u32 header_length; ++ u32 reserved; ++ u32 entries; ++}; ++ ++enum acpi_erst_actions { ++ ACPI_ERST_BEGIN_WRITE = 0, ++ ACPI_ERST_BEGIN_READ = 1, ++ ACPI_ERST_BEGIN_CLEAR = 2, ++ ACPI_ERST_END = 3, ++ ACPI_ERST_SET_RECORD_OFFSET = 4, ++ ACPI_ERST_EXECUTE_OPERATION = 5, ++ ACPI_ERST_CHECK_BUSY_STATUS = 6, ++ ACPI_ERST_GET_COMMAND_STATUS = 7, ++ ACPI_ERST_GET_RECORD_ID = 8, ++ ACPI_ERST_SET_RECORD_ID = 9, ++ ACPI_ERST_GET_RECORD_COUNT = 10, ++ ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, ++ ACPI_ERST_NOT_USED = 12, ++ ACPI_ERST_GET_ERROR_RANGE = 13, ++ ACPI_ERST_GET_ERROR_LENGTH = 14, ++ ACPI_ERST_GET_ERROR_ATTRIBUTES = 15, ++ ACPI_ERST_EXECUTE_TIMINGS = 16, ++ ACPI_ERST_ACTION_RESERVED = 17, ++}; ++ ++enum acpi_erst_instructions { ++ ACPI_ERST_READ_REGISTER = 0, ++ ACPI_ERST_READ_REGISTER_VALUE = 1, ++ ACPI_ERST_WRITE_REGISTER = 2, ++ ACPI_ERST_WRITE_REGISTER_VALUE = 3, ++ ACPI_ERST_NOOP = 4, ++ ACPI_ERST_LOAD_VAR1 = 5, ++ ACPI_ERST_LOAD_VAR2 = 6, ++ ACPI_ERST_STORE_VAR1 = 7, ++ ACPI_ERST_ADD = 8, ++ ACPI_ERST_SUBTRACT = 9, ++ ACPI_ERST_ADD_VALUE = 10, ++ ACPI_ERST_SUBTRACT_VALUE = 11, ++ ACPI_ERST_STALL = 12, ++ ACPI_ERST_STALL_WHILE_TRUE = 13, ++ ACPI_ERST_SKIP_NEXT_IF_TRUE = 14, ++ ACPI_ERST_GOTO = 15, ++ ACPI_ERST_SET_SRC_ADDRESS_BASE = 16, ++ ACPI_ERST_SET_DST_ADDRESS_BASE = 17, ++ ACPI_ERST_MOVE_DATA = 18, ++ ACPI_ERST_INSTRUCTION_RESERVED = 19, ++}; ++ ++struct cper_record_header { ++ char signature[4]; ++ __u16 revision; ++ __u32 signature_end; ++ __u16 section_count; ++ __u32 error_severity; ++ __u32 validation_bits; ++ __u32 record_length; ++ __u64 timestamp; ++ guid_t platform_id; ++ guid_t partition_id; ++ guid_t creator_id; ++ guid_t notification_type; ++ __u64 record_id; ++ __u32 flags; ++ __u64 persistence_information; ++ __u8 reserved[12]; ++} __attribute__((packed)); ++ ++struct cper_section_descriptor { ++ __u32 section_offset; ++ __u32 section_length; ++ __u16 revision; ++ __u8 validation_bits; ++ __u8 reserved; ++ __u32 flags; ++ guid_t section_type; ++ guid_t fru_id; ++ __u32 section_severity; ++ __u8 fru_text[20]; ++}; ++ ++struct erst_erange { ++ u64 base; ++ u64 size; ++ void *vaddr; ++ u32 attr; ++}; ++ ++struct erst_record_id_cache { ++ struct mutex lock; ++ u64 *entries; ++ int len; ++ int size; ++ int refcount; ++}; ++ ++struct cper_pstore_record { ++ struct cper_record_header hdr; ++ struct cper_section_descriptor sec_hdr; ++ char data[0]; ++}; ++ ++struct acpi_bert_region { ++ u32 block_status; ++ u32 raw_data_offset; ++ u32 raw_data_length; ++ u32 data_length; ++ u32 error_severity; ++}; ++ ++struct acpi_hest_generic_data { ++ u8 section_type[16]; ++ u32 error_severity; ++ u16 revision; ++ u8 validation_bits; ++ u8 flags; ++ u32 error_data_length; ++ u8 fru_id[16]; ++ u8 fru_text[20]; ++}; ++ ++struct acpi_hest_generic_data_v300 { ++ u8 section_type[16]; ++ u32 error_severity; ++ u16 revision; ++ u8 validation_bits; ++ u8 flags; ++ u32 error_data_length; ++ u8 fru_id[16]; ++ u8 fru_text[20]; ++ u64 time_stamp; ++}; ++ ++struct cper_sec_pcie { ++ __u64 validation_bits; ++ __u32 port_type; ++ struct { ++ __u8 minor; ++ __u8 major; ++ __u8 reserved[2]; ++ } version; ++ __u16 command; ++ __u16 status; ++ __u32 reserved; ++ struct { ++ __u16 vendor_id; ++ __u16 device_id; ++ __u8 class_code[3]; ++ __u8 function; ++ __u8 device; ++ __u16 segment; ++ __u8 bus; ++ __u8 secondary_bus; ++ __u16 slot; ++ __u8 reserved; ++ } __attribute__((packed)) device_id; ++ struct { ++ __u32 lower; ++ __u32 upper; ++ } serial_number; ++ struct { ++ __u16 secondary_status; ++ __u16 control; ++ } bridge; ++ __u8 capability[60]; ++ __u8 aer_info[96]; ++}; ++ ++struct ghes_estatus_cache { ++ u32 estatus_len; ++ atomic_t count; ++ struct acpi_hest_generic *generic; ++ long long unsigned int time_in; ++ struct callback_head rcu; ++}; ++ ++enum { ++ GHES_SEV_NO = 0, ++ GHES_SEV_CORRECTED = 1, ++ GHES_SEV_RECOVERABLE = 2, ++ GHES_SEV_PANIC = 3, ++}; ++ ++struct acpi_table_iort { ++ struct acpi_table_header header; ++ u32 node_count; ++ u32 node_offset; ++ u32 reserved; ++}; ++ ++struct acpi_iort_node { ++ u8 type; ++ u16 length; ++ u8 revision; ++ u32 reserved; ++ u32 mapping_count; ++ u32 mapping_offset; ++ char node_data[1]; ++} __attribute__((packed)); ++ ++enum acpi_iort_node_type { ++ ACPI_IORT_NODE_ITS_GROUP = 0, ++ ACPI_IORT_NODE_NAMED_COMPONENT = 1, ++ ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 2, ++ ACPI_IORT_NODE_SMMU = 3, ++ ACPI_IORT_NODE_SMMU_V3 = 4, ++ ACPI_IORT_NODE_PMCG = 5, ++}; ++ ++struct acpi_iort_id_mapping { ++ u32 input_base; ++ u32 id_count; ++ u32 output_base; ++ u32 output_reference; ++ u32 flags; ++}; ++ ++struct acpi_iort_its_group { ++ u32 its_count; ++ u32 identifiers[1]; ++}; ++ ++struct acpi_iort_named_component { ++ u32 node_flags; ++ u64 memory_properties; ++ u8 memory_address_limit; ++ char device_name[1]; ++} __attribute__((packed)); ++ ++struct acpi_iort_root_complex { ++ u64 memory_properties; ++ u32 ats_attribute; ++ u32 pci_segment_number; ++ u8 memory_address_limit; ++ u8 reserved[3]; ++} __attribute__((packed)); ++ ++struct acpi_iort_smmu { ++ u64 base_address; ++ u64 span; ++ u32 model; ++ u32 flags; ++ u32 global_interrupt_offset; ++ u32 context_interrupt_count; ++ u32 context_interrupt_offset; ++ u32 pmu_interrupt_count; ++ u32 pmu_interrupt_offset; ++ u64 interrupts[1]; ++} __attribute__((packed)); ++ ++struct acpi_iort_smmu_v3 { ++ u64 base_address; ++ u32 flags; ++ u32 reserved; ++ u64 vatos_address; ++ u32 model; ++ u32 event_gsiv; ++ u32 pri_gsiv; ++ u32 gerr_gsiv; ++ u32 sync_gsiv; ++ u32 pxm; ++ u32 id_mapping_index; ++} __attribute__((packed)); ++ ++struct acpi_iort_pmcg { ++ u64 page0_base_address; ++ u32 overflow_gsiv; ++ u32 node_reference; ++ u64 page1_base_address; ++}; ++ ++struct iort_its_msi_chip { ++ struct list_head list; ++ struct fwnode_handle *fw_node; ++ phys_addr_t base_addr; ++ u32 translation_id; ++}; ++ ++struct iort_fwnode { ++ struct list_head list; ++ struct acpi_iort_node *iort_node; ++ struct fwnode_handle *fwnode; ++}; ++ ++typedef acpi_status (*iort_find_node_callback)(struct acpi_iort_node *, void *); ++ ++struct iort_pci_alias_info { ++ struct device *dev; ++ struct acpi_iort_node *node; ++}; ++ ++struct iort_dev_config { ++ const char *name; ++ int (*dev_init)(struct acpi_iort_node *); ++ void (*dev_dma_configure)(struct device *, struct acpi_iort_node *); ++ int (*dev_count_resources)(struct acpi_iort_node *); ++ void (*dev_init_resources)(struct resource *, struct acpi_iort_node *); ++ int (*dev_set_proximity)(struct device *, struct acpi_iort_node *); ++ int (*dev_add_platdata)(struct platform_device *); ++}; ++ ++enum arch_timer_ppi_nr { ++ ARCH_TIMER_PHYS_SECURE_PPI = 0, ++ ARCH_TIMER_PHYS_NONSECURE_PPI = 1, ++ ARCH_TIMER_VIRT_PPI = 2, ++ ARCH_TIMER_HYP_PPI = 3, ++ ARCH_TIMER_MAX_TIMER_PPI = 4, ++}; ++ ++struct arch_timer_mem_frame { ++ bool valid; ++ phys_addr_t cntbase; ++ size_t size; ++ int phys_irq; ++ int virt_irq; ++}; ++ ++struct arch_timer_mem { ++ phys_addr_t cntctlbase; ++ size_t size; ++ struct arch_timer_mem_frame frame[8]; ++}; ++ ++struct acpi_table_gtdt { ++ struct acpi_table_header header; ++ u64 counter_block_addresss; ++ u32 reserved; ++ u32 secure_el1_interrupt; ++ u32 secure_el1_flags; ++ u32 non_secure_el1_interrupt; ++ u32 non_secure_el1_flags; ++ u32 virtual_timer_interrupt; ++ u32 virtual_timer_flags; ++ u32 non_secure_el2_interrupt; ++ u32 non_secure_el2_flags; ++ u64 counter_read_block_address; ++ u32 platform_timer_count; ++ u32 platform_timer_offset; ++} __attribute__((packed)); ++ ++struct acpi_gtdt_header { ++ u8 type; ++ u16 length; ++} __attribute__((packed)); ++ ++enum acpi_gtdt_type { ++ ACPI_GTDT_TYPE_TIMER_BLOCK = 0, ++ ACPI_GTDT_TYPE_WATCHDOG = 1, ++ ACPI_GTDT_TYPE_RESERVED = 2, ++}; ++ ++struct acpi_gtdt_timer_block { ++ struct acpi_gtdt_header header; ++ u8 reserved; ++ u64 block_address; ++ u32 timer_count; ++ u32 timer_offset; ++} __attribute__((packed)); ++ ++struct acpi_gtdt_timer_entry { ++ u8 frame_number; ++ u8 reserved[3]; ++ u64 base_address; ++ u64 el0_base_address; ++ u32 timer_interrupt; ++ u32 timer_flags; ++ u32 virtual_timer_interrupt; ++ u32 virtual_timer_flags; ++ u32 common_flags; ++} __attribute__((packed)); ++ ++struct acpi_gtdt_watchdog { ++ struct acpi_gtdt_header header; ++ u8 reserved; ++ u64 refresh_frame_address; ++ u64 control_frame_address; ++ u32 timer_interrupt; ++ u32 timer_flags; ++} __attribute__((packed)); ++ ++struct acpi_gtdt_descriptor { ++ struct acpi_table_gtdt *gtdt; ++ void *gtdt_end; ++ void *platform_timer; ++}; ++ ++struct acpi_mpam_header { ++ u8 type; ++ u16 length; ++ u8 reserved; ++ u64 base_address; ++ u32 overflow_interrupt; ++ u32 overflow_flags; ++ u32 error_interrupt; ++ u32 error_interrupt_flags; ++ u32 not_ready_max; ++ u32 offset; ++} __attribute__((packed)); ++ ++enum AcpiMpamType { ++ ACPI_MPAM_TYPE_SMMU = 0, ++ ACPI_MPAM_TYPE_CACHE = 1, ++ ACPI_MPAM_TYPE_MEMORY = 2, ++ ACPI_MPAM_TYPE_UNKNOWN = 3, ++}; ++ ++struct acpi_mpam_node_cache { ++ struct acpi_mpam_header header; ++ u32 PPTT_ref; ++}; ++ ++struct acpi_mpam_node_memory { ++ struct acpi_mpam_header header; ++ u8 proximity_domain; ++ u8 reserved1[3]; ++}; ++ ++struct pnp_device_id { ++ __u8 id[8]; ++ kernel_ulong_t driver_data; ++}; ++ ++struct pnp_card_device_id { ++ __u8 id[8]; ++ kernel_ulong_t driver_data; ++ struct { ++ __u8 id[8]; ++ } devs[8]; ++}; ++ ++struct pnp_protocol; ++ ++struct pnp_id; ++ ++struct pnp_card { ++ struct device dev; ++ unsigned char number; ++ struct list_head global_list; ++ struct list_head protocol_list; ++ struct list_head devices; ++ struct pnp_protocol *protocol; ++ struct pnp_id *id; ++ char name[50]; ++ unsigned char pnpver; ++ unsigned char productver; ++ unsigned int serial; ++ unsigned char checksum; ++ struct proc_dir_entry *procdir; ++}; ++ ++struct pnp_dev; ++ ++struct pnp_protocol { ++ struct list_head protocol_list; ++ char *name; ++ int (*get)(struct pnp_dev *); ++ int (*set)(struct pnp_dev *); ++ int (*disable)(struct pnp_dev *); ++ bool (*can_wakeup)(struct pnp_dev *); ++ int (*suspend)(struct pnp_dev *, pm_message_t); ++ int (*resume)(struct pnp_dev *); ++ unsigned char number; ++ struct device dev; ++ struct list_head cards; ++ struct list_head devices; ++}; ++ ++struct pnp_id { ++ char id[8]; ++ struct pnp_id *next; ++}; ++ ++struct pnp_card_driver; ++ ++struct pnp_card_link { ++ struct pnp_card *card; ++ struct pnp_card_driver *driver; ++ void *driver_data; ++ pm_message_t pm_state; ++}; ++ ++struct pnp_driver { ++ char *name; ++ const struct pnp_device_id *id_table; ++ unsigned int flags; ++ int (*probe)(struct pnp_dev *, const struct pnp_device_id *); ++ void (*remove)(struct pnp_dev *); ++ void (*shutdown)(struct pnp_dev *); ++ int (*suspend)(struct pnp_dev *, pm_message_t); ++ int (*resume)(struct pnp_dev *); ++ struct device_driver driver; ++}; ++ ++struct pnp_card_driver { ++ struct list_head global_list; ++ char *name; ++ const struct pnp_card_device_id *id_table; ++ unsigned int flags; ++ int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *); ++ void (*remove)(struct pnp_card_link *); ++ int (*suspend)(struct pnp_card_link *, pm_message_t); ++ int (*resume)(struct pnp_card_link *); ++ struct pnp_driver link; ++}; ++ ++struct pnp_dev { ++ struct device dev; ++ u64 dma_mask; ++ unsigned int number; ++ int status; ++ struct list_head global_list; ++ struct list_head protocol_list; ++ struct list_head card_list; ++ struct list_head rdev_list; ++ struct pnp_protocol *protocol; ++ struct pnp_card *card; ++ struct pnp_driver *driver; ++ struct pnp_card_link *card_link; ++ struct pnp_id *id; ++ int active; ++ int capabilities; ++ unsigned int num_dependent_sets; ++ struct list_head resources; ++ struct list_head options; ++ char name[50]; ++ int flags; ++ struct proc_dir_entry *procent; ++ void *data; ++}; ++ ++struct pnp_resource { ++ struct list_head list; ++ struct resource res; ++}; ++ ++struct pnp_port { ++ resource_size_t min; ++ resource_size_t max; ++ resource_size_t align; ++ resource_size_t size; ++ unsigned char flags; ++}; ++ ++typedef struct { ++ long unsigned int bits[4]; ++} pnp_irq_mask_t; ++ ++struct pnp_irq { ++ pnp_irq_mask_t map; ++ unsigned char flags; ++}; ++ ++struct pnp_dma { ++ unsigned char map; ++ unsigned char flags; ++}; ++ ++struct pnp_mem { ++ resource_size_t min; ++ resource_size_t max; ++ resource_size_t align; ++ resource_size_t size; ++ unsigned char flags; ++}; ++ ++struct pnp_option { ++ struct list_head list; ++ unsigned int flags; ++ long unsigned int type; ++ union { ++ struct pnp_port port; ++ struct pnp_irq irq; ++ struct pnp_dma dma; ++ struct pnp_mem mem; ++ } u; ++}; ++ ++struct pnp_info_buffer { ++ char *buffer; ++ char *curr; ++ long unsigned int size; ++ long unsigned int len; ++ int stop; ++ int error; ++}; ++ ++typedef struct pnp_info_buffer pnp_info_buffer_t; ++ ++struct pnp_fixup { ++ char id[7]; ++ void (*quirk_function)(struct pnp_dev *); ++}; ++ ++struct acpipnp_parse_option_s { ++ struct pnp_dev *dev; ++ unsigned int option_flags; ++}; ++ ++struct deferred_device { ++ struct amba_device *dev; ++ struct resource *parent; ++ struct list_head node; ++}; ++ ++struct find_data { ++ struct amba_device *dev; ++ struct device *parent; ++ const char *busid; ++ unsigned int id; ++ unsigned int mask; ++}; ++ ++struct clk_bulk_data { ++ const char *id; ++ struct clk *clk; ++}; ++ ++struct clk_bulk_devres { ++ struct clk_bulk_data *clks; ++ int num_clks; ++}; ++ ++struct clk_lookup { ++ struct list_head node; ++ const char *dev_id; ++ const char *con_id; ++ struct clk *clk; ++ struct clk_hw *clk_hw; ++}; ++ ++struct clk_lookup_alloc { ++ struct clk_lookup cl; ++ char dev_id[20]; ++ char con_id[16]; ++}; ++ ++struct clk_notifier { ++ struct clk *clk; ++ struct srcu_notifier_head notifier_head; ++ struct list_head node; ++}; ++ ++struct clk { ++ struct clk_core *core; ++ const char *dev_id; ++ const char *con_id; ++ long unsigned int min_rate; ++ long unsigned int max_rate; ++ unsigned int exclusive_count; ++ struct hlist_node clks_node; ++}; ++ ++struct clk_notifier_data { ++ struct clk *clk; ++ long unsigned int old_rate; ++ long unsigned int new_rate; ++}; ++ ++struct clk_core { ++ const char *name; ++ const struct clk_ops *ops; ++ struct clk_hw *hw; ++ struct module *owner; ++ struct device *dev; ++ struct clk_core *parent; ++ const char **parent_names; ++ struct clk_core **parents; ++ u8 num_parents; ++ u8 new_parent_index; ++ long unsigned int rate; ++ long unsigned int req_rate; ++ long unsigned int new_rate; ++ struct clk_core *new_parent; ++ struct clk_core *new_child; ++ long unsigned int flags; ++ bool orphan; ++ unsigned int enable_count; ++ unsigned int prepare_count; ++ unsigned int protect_count; ++ long unsigned int min_rate; ++ long unsigned int max_rate; ++ long unsigned int accuracy; ++ int phase; ++ struct clk_duty duty; ++ struct hlist_head children; ++ struct hlist_node child_node; ++ struct hlist_head clks; ++ unsigned int notifier_count; ++ struct dentry *dentry; ++ struct hlist_node debug_node; ++ struct kref ref; ++}; ++ ++struct clk_onecell_data { ++ struct clk **clks; ++ unsigned int clk_num; ++}; ++ ++struct clk_hw_onecell_data { ++ unsigned int num; ++ struct clk_hw *hws[0]; ++}; ++ ++struct trace_event_raw_clk { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_clk_rate { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ long unsigned int rate; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_clk_parent { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ u32 __data_loc_pname; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_clk_phase { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ int phase; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_clk_duty_cycle { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ unsigned int num; ++ unsigned int den; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_clk { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_clk_rate { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_clk_parent { ++ u32 name; ++ u32 pname; ++}; ++ ++struct trace_event_data_offsets_clk_phase { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_clk_duty_cycle { ++ u32 name; ++}; ++ ++struct of_clk_provider { ++ struct list_head link; ++ struct device_node *node; ++ struct clk * (*get)(struct of_phandle_args *, void *); ++ struct clk_hw * (*get_hw)(struct of_phandle_args *, void *); ++ void *data; ++}; ++ ++struct clock_provider { ++ void (*clk_init_cb)(struct device_node *); ++ struct device_node *np; ++ struct list_head node; ++}; ++ ++struct clk_div_table { ++ unsigned int val; ++ unsigned int div; ++}; ++ ++struct clk_divider { ++ struct clk_hw hw; ++ void *reg; ++ u8 shift; ++ u8 width; ++ u8 flags; ++ const struct clk_div_table *table; ++ spinlock_t *lock; ++}; ++ ++typedef void (*of_init_fn_1)(struct device_node *); ++ ++struct clk_fixed_factor { ++ struct clk_hw hw; ++ unsigned int mult; ++ unsigned int div; ++}; ++ ++struct clk_fixed_rate { ++ struct clk_hw hw; ++ long unsigned int fixed_rate; ++ long unsigned int fixed_accuracy; ++ u8 flags; ++}; ++ ++struct clk_gate { ++ struct clk_hw hw; ++ void *reg; ++ u8 bit_idx; ++ u8 flags; ++ spinlock_t *lock; ++}; ++ ++struct clk_multiplier { ++ struct clk_hw hw; ++ void *reg; ++ u8 shift; ++ u8 width; ++ u8 flags; ++ spinlock_t *lock; ++}; ++ ++struct clk_mux { ++ struct clk_hw hw; ++ void *reg; ++ u32 *table; ++ u32 mask; ++ u8 shift; ++ u8 flags; ++ spinlock_t *lock; ++}; ++ ++struct clk_composite { ++ struct clk_hw hw; ++ struct clk_ops ops; ++ struct clk_hw *mux_hw; ++ struct clk_hw *rate_hw; ++ struct clk_hw *gate_hw; ++ const struct clk_ops *mux_ops; ++ const struct clk_ops *rate_ops; ++ const struct clk_ops *gate_ops; ++}; ++ ++struct clk_fractional_divider { ++ struct clk_hw hw; ++ void *reg; ++ u8 mshift; ++ u8 mwidth; ++ u32 mmask; ++ u8 nshift; ++ u8 nwidth; ++ u32 nmask; ++ u8 flags; ++ void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *); ++ spinlock_t *lock; ++}; ++ ++struct gpio_desc___2; ++ ++struct clk_gpio { ++ struct clk_hw hw; ++ struct gpio_desc___2 *gpiod; ++}; ++ ++enum xgene_pll_type { ++ PLL_TYPE_PCP = 0, ++ PLL_TYPE_SOC = 1, ++}; ++ ++struct xgene_clk_pll { ++ struct clk_hw hw; ++ void *reg; ++ spinlock_t *lock; ++ u32 pll_offset; ++ enum xgene_pll_type type; ++ int version; ++}; ++ ++struct xgene_clk_pmd { ++ struct clk_hw hw; ++ void *reg; ++ u8 shift; ++ u32 mask; ++ u64 denom; ++ u32 flags; ++ spinlock_t *lock; ++}; ++ ++struct xgene_dev_parameters { ++ void *csr_reg; ++ u32 reg_clk_offset; ++ u32 reg_clk_mask; ++ u32 reg_csr_offset; ++ u32 reg_csr_mask; ++ void *divider_reg; ++ u32 reg_divider_offset; ++ u32 reg_divider_shift; ++ u32 reg_divider_width; ++}; ++ ++struct xgene_clk { ++ struct clk_hw hw; ++ spinlock_t *lock; ++ struct xgene_dev_parameters param; ++}; ++ ++struct hisi_clock_data { ++ struct clk_onecell_data clk_data; ++ void *base; ++}; ++ ++struct hisi_fixed_rate_clock { ++ unsigned int id; ++ char *name; ++ const char *parent_name; ++ long unsigned int flags; ++ long unsigned int fixed_rate; ++}; ++ ++struct hisi_fixed_factor_clock { ++ unsigned int id; ++ char *name; ++ const char *parent_name; ++ long unsigned int mult; ++ long unsigned int div; ++ long unsigned int flags; ++}; ++ ++struct hisi_mux_clock { ++ unsigned int id; ++ const char *name; ++ const char * const *parent_names; ++ u8 num_parents; ++ long unsigned int flags; ++ long unsigned int offset; ++ u8 shift; ++ u8 width; ++ u8 mux_flags; ++ u32 *table; ++ const char *alias; ++}; ++ ++struct hisi_phase_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_names; ++ long unsigned int flags; ++ long unsigned int offset; ++ u8 shift; ++ u8 width; ++ u32 *phase_degrees; ++ u32 *phase_regvals; ++ u8 phase_num; ++}; ++ ++struct hisi_divider_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_name; ++ long unsigned int flags; ++ long unsigned int offset; ++ u8 shift; ++ u8 width; ++ u8 div_flags; ++ struct clk_div_table *table; ++ const char *alias; ++}; ++ ++struct hi6220_divider_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_name; ++ long unsigned int flags; ++ long unsigned int offset; ++ u8 shift; ++ u8 width; ++ u32 mask_bit; ++ const char *alias; ++}; ++ ++struct hisi_gate_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_name; ++ long unsigned int flags; ++ long unsigned int offset; ++ u8 bit_idx; ++ u8 gate_flags; ++ const char *alias; ++}; ++ ++struct clkgate_separated { ++ struct clk_hw hw; ++ void *enable; ++ u8 bit_idx; ++ u8 flags; ++ spinlock_t *lock; ++}; ++ ++struct hi6220_clk_divider { ++ struct clk_hw hw; ++ void *reg; ++ u8 shift; ++ u8 width; ++ u32 mask; ++ const struct clk_div_table *table; ++ spinlock_t *lock; ++}; ++ ++struct clk_hisi_phase { ++ struct clk_hw hw; ++ void *reg; ++ u32 *phase_degrees; ++ u32 *phase_regvals; ++ u8 phase_num; ++ u32 mask; ++ u8 shift; ++ u8 flags; ++ spinlock_t *lock; ++}; ++ ++struct hisi_crg_funcs { ++ struct hisi_clock_data * (*register_clks)(struct platform_device *); ++ void (*unregister_clks)(struct platform_device *); ++}; ++ ++struct hisi_reset_controller; ++ ++struct hisi_crg_dev { ++ struct hisi_clock_data *clk_data; ++ struct hisi_reset_controller *rstc; ++ const struct hisi_crg_funcs *funcs; ++}; ++ ++struct hi3519_crg_data { ++ struct hisi_clock_data *clk_data; ++ struct hisi_reset_controller *rstc; ++}; ++ ++struct reset_controller_dev; ++ ++struct reset_control_ops { ++ int (*reset)(struct reset_controller_dev *, long unsigned int); ++ int (*assert)(struct reset_controller_dev *, long unsigned int); ++ int (*deassert)(struct reset_controller_dev *, long unsigned int); ++ int (*status)(struct reset_controller_dev *, long unsigned int); ++}; ++ ++struct reset_controller_dev { ++ const struct reset_control_ops *ops; ++ struct module *owner; ++ struct list_head list; ++ struct list_head reset_control_head; ++ struct device *dev; ++ struct device_node *of_node; ++ int of_reset_n_cells; ++ int (*of_xlate)(struct reset_controller_dev *, const struct of_phandle_args *); ++ unsigned int nr_resets; ++}; ++ ++struct hisi_reset_controller___2 { ++ spinlock_t lock; ++ void *membase; ++ struct reset_controller_dev rcdev; ++}; ++ ++struct mbox_chan___2; ++ ++struct hi3660_stub_clk_chan { ++ struct mbox_client cl; ++ struct mbox_chan___2 *mbox; ++}; ++ ++struct hi3660_stub_clk { ++ unsigned int id; ++ struct clk_hw hw; ++ unsigned int cmd; ++ unsigned int msg[8]; ++ unsigned int rate; ++}; ++ ++struct clk_sp810; ++ ++struct clk_sp810_timerclken { ++ struct clk_hw hw; ++ struct clk *clk; ++ struct clk_sp810 *sp810; ++ int channel; ++}; ++ ++struct clk_sp810 { ++ struct device_node *node; ++ void *base; ++ spinlock_t lock; ++ struct clk_sp810_timerclken timerclken[4]; ++}; ++ ++struct vexpress_osc { ++ struct regmap *reg; ++ struct clk_hw hw; ++ long unsigned int rate_min; ++ long unsigned int rate_max; ++}; ++ ++typedef s32 dma_cookie_t; ++ ++enum dma_status { ++ DMA_COMPLETE = 0, ++ DMA_IN_PROGRESS = 1, ++ DMA_PAUSED = 2, ++ DMA_ERROR = 3, ++}; ++ ++enum dma_transaction_type { ++ DMA_MEMCPY = 0, ++ DMA_XOR = 1, ++ DMA_PQ = 2, ++ DMA_XOR_VAL = 3, ++ DMA_PQ_VAL = 4, ++ DMA_MEMSET = 5, ++ DMA_MEMSET_SG = 6, ++ DMA_INTERRUPT = 7, ++ DMA_PRIVATE = 8, ++ DMA_ASYNC_TX = 9, ++ DMA_SLAVE = 10, ++ DMA_CYCLIC = 11, ++ DMA_INTERLEAVE = 12, ++ DMA_TX_TYPE_END = 13, ++}; ++ ++enum dma_transfer_direction { ++ DMA_MEM_TO_MEM = 0, ++ DMA_MEM_TO_DEV = 1, ++ DMA_DEV_TO_MEM = 2, ++ DMA_DEV_TO_DEV = 3, ++ DMA_TRANS_NONE = 4, ++}; ++ ++struct data_chunk { ++ size_t size; ++ size_t icg; ++ size_t dst_icg; ++ size_t src_icg; ++}; ++ ++struct dma_interleaved_template { ++ dma_addr_t src_start; ++ dma_addr_t dst_start; ++ enum dma_transfer_direction dir; ++ bool src_inc; ++ bool dst_inc; ++ bool src_sgl; ++ bool dst_sgl; ++ size_t numf; ++ size_t frame_size; ++ struct data_chunk sgl[0]; ++}; ++ ++enum dma_ctrl_flags { ++ DMA_PREP_INTERRUPT = 1, ++ DMA_CTRL_ACK = 2, ++ DMA_PREP_PQ_DISABLE_P = 4, ++ DMA_PREP_PQ_DISABLE_Q = 8, ++ DMA_PREP_CONTINUE = 16, ++ DMA_PREP_FENCE = 32, ++ DMA_CTRL_REUSE = 64, ++ DMA_PREP_CMD = 128, ++}; ++ ++enum sum_check_bits { ++ SUM_CHECK_P = 0, ++ SUM_CHECK_Q = 1, ++}; ++ ++enum sum_check_flags { ++ SUM_CHECK_P_RESULT = 1, ++ SUM_CHECK_Q_RESULT = 2, ++}; ++ ++typedef struct { ++ long unsigned int bits[1]; ++} dma_cap_mask_t; ++ ++struct dma_chan_percpu { ++ long unsigned int memcpy_count; ++ long unsigned int bytes_transferred; ++}; ++ ++struct dma_router { ++ struct device *dev; ++ void (*route_free)(struct device *, void *); ++}; ++ ++struct dma_device; ++ ++struct dma_chan_dev; ++ ++struct dma_chan { ++ struct dma_device *device; ++ dma_cookie_t cookie; ++ dma_cookie_t completed_cookie; ++ int chan_id; ++ struct dma_chan_dev *dev; ++ struct list_head device_node; ++ struct dma_chan_percpu *local; ++ int client_count; ++ int table_count; ++ struct dma_router *router; ++ void *route_data; ++ void *private; ++}; ++ ++typedef bool (*dma_filter_fn)(struct dma_chan *, void *); ++ ++struct dma_slave_map; ++ ++struct dma_filter { ++ dma_filter_fn fn; ++ int mapcnt; ++ const struct dma_slave_map *map; ++}; ++ ++enum dmaengine_alignment { ++ DMAENGINE_ALIGN_1_BYTE = 0, ++ DMAENGINE_ALIGN_2_BYTES = 1, ++ DMAENGINE_ALIGN_4_BYTES = 2, ++ DMAENGINE_ALIGN_8_BYTES = 3, ++ DMAENGINE_ALIGN_16_BYTES = 4, ++ DMAENGINE_ALIGN_32_BYTES = 5, ++ DMAENGINE_ALIGN_64_BYTES = 6, ++}; ++ ++enum dma_residue_granularity { ++ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, ++ DMA_RESIDUE_GRANULARITY_SEGMENT = 1, ++ DMA_RESIDUE_GRANULARITY_BURST = 2, ++}; ++ ++struct dma_async_tx_descriptor; ++ ++struct dma_slave_config; ++ ++struct dma_tx_state; ++ ++struct dma_device { ++ unsigned int chancnt; ++ unsigned int privatecnt; ++ struct list_head channels; ++ struct list_head global_node; ++ struct dma_filter filter; ++ dma_cap_mask_t cap_mask; ++ short unsigned int max_xor; ++ short unsigned int max_pq; ++ enum dmaengine_alignment copy_align; ++ enum dmaengine_alignment xor_align; ++ enum dmaengine_alignment pq_align; ++ enum dmaengine_alignment fill_align; ++ int dev_id; ++ struct device *dev; ++ u32 src_addr_widths; ++ u32 dst_addr_widths; ++ u32 directions; ++ u32 max_burst; ++ bool descriptor_reuse; ++ enum dma_residue_granularity residue_granularity; ++ int (*device_alloc_chan_resources)(struct dma_chan *); ++ void (*device_free_chan_resources)(struct dma_chan *); ++ struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t, dma_addr_t, size_t, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t, int, size_t, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); ++ struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, long unsigned int); ++ struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t, u64, long unsigned int); ++ int (*device_config)(struct dma_chan *, struct dma_slave_config *); ++ int (*device_pause)(struct dma_chan *); ++ int (*device_resume)(struct dma_chan *); ++ int (*device_terminate_all)(struct dma_chan *); ++ void (*device_synchronize)(struct dma_chan *); ++ enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t, struct dma_tx_state *); ++ void (*device_issue_pending)(struct dma_chan *); ++}; ++ ++struct dma_chan_dev { ++ struct dma_chan *chan; ++ struct device device; ++ int dev_id; ++ atomic_t *idr_ref; ++}; ++ ++enum dma_slave_buswidth { ++ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, ++ DMA_SLAVE_BUSWIDTH_1_BYTE = 1, ++ DMA_SLAVE_BUSWIDTH_2_BYTES = 2, ++ DMA_SLAVE_BUSWIDTH_3_BYTES = 3, ++ DMA_SLAVE_BUSWIDTH_4_BYTES = 4, ++ DMA_SLAVE_BUSWIDTH_8_BYTES = 8, ++ DMA_SLAVE_BUSWIDTH_16_BYTES = 16, ++ DMA_SLAVE_BUSWIDTH_32_BYTES = 32, ++ DMA_SLAVE_BUSWIDTH_64_BYTES = 64, ++}; ++ ++struct dma_slave_config { ++ enum dma_transfer_direction direction; ++ phys_addr_t src_addr; ++ phys_addr_t dst_addr; ++ enum dma_slave_buswidth src_addr_width; ++ enum dma_slave_buswidth dst_addr_width; ++ u32 src_maxburst; ++ u32 dst_maxburst; ++ u32 src_port_window_size; ++ u32 dst_port_window_size; ++ bool device_fc; ++ unsigned int slave_id; ++}; ++ ++struct dma_slave_caps { ++ u32 src_addr_widths; ++ u32 dst_addr_widths; ++ u32 directions; ++ u32 max_burst; ++ bool cmd_pause; ++ bool cmd_resume; ++ bool cmd_terminate; ++ enum dma_residue_granularity residue_granularity; ++ bool descriptor_reuse; ++}; ++ ++typedef void (*dma_async_tx_callback)(void *); ++ ++enum dmaengine_tx_result { ++ DMA_TRANS_NOERROR = 0, ++ DMA_TRANS_READ_FAILED = 1, ++ DMA_TRANS_WRITE_FAILED = 2, ++ DMA_TRANS_ABORTED = 3, ++}; ++ ++struct dmaengine_result { ++ enum dmaengine_tx_result result; ++ u32 residue; ++}; ++ ++typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); ++ ++struct dmaengine_unmap_data { ++ u8 map_cnt; ++ u8 to_cnt; ++ u8 from_cnt; ++ u8 bidi_cnt; ++ struct device *dev; ++ struct kref kref; ++ size_t len; ++ dma_addr_t addr[0]; ++}; ++ ++struct dma_async_tx_descriptor { ++ dma_cookie_t cookie; ++ enum dma_ctrl_flags flags; ++ dma_addr_t phys; ++ struct dma_chan *chan; ++ dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); ++ int (*desc_free)(struct dma_async_tx_descriptor *); ++ dma_async_tx_callback callback; ++ dma_async_tx_callback_result callback_result; ++ void *callback_param; ++ struct dmaengine_unmap_data *unmap; ++}; ++ ++struct dma_tx_state { ++ dma_cookie_t last; ++ dma_cookie_t used; ++ u32 residue; ++}; ++ ++struct dma_slave_map { ++ const char *devname; ++ const char *slave; ++ void *param; ++}; ++ ++struct dma_chan_tbl_ent { ++ struct dma_chan *chan; ++}; ++ ++struct dmaengine_unmap_pool { ++ struct kmem_cache *cache; ++ const char *name; ++ mempool_t *pool; ++ size_t size; ++}; ++ ++struct acpi_table_csrt { ++ struct acpi_table_header header; ++}; ++ ++struct acpi_csrt_group { ++ u32 length; ++ u32 vendor_id; ++ u32 subvendor_id; ++ u16 device_id; ++ u16 subdevice_id; ++ u16 revision; ++ u16 reserved; ++ u32 shared_info_length; ++}; ++ ++struct acpi_csrt_shared_info { ++ u16 major_version; ++ u16 minor_version; ++ u32 mmio_base_low; ++ u32 mmio_base_high; ++ u32 gsi_interrupt; ++ u8 interrupt_polarity; ++ u8 interrupt_mode; ++ u8 num_channels; ++ u8 dma_address_width; ++ u16 base_request_line; ++ u16 num_handshake_signals; ++ u32 max_block_size; ++}; ++ ++struct acpi_dma_spec { ++ int chan_id; ++ int slave_id; ++ struct device *dev; ++}; ++ ++struct acpi_dma { ++ struct list_head dma_controllers; ++ struct device *dev; ++ struct dma_chan * (*acpi_dma_xlate)(struct acpi_dma_spec *, struct acpi_dma *); ++ void *data; ++ short unsigned int base_request_line; ++ short unsigned int end_request_line; ++}; ++ ++struct acpi_dma_filter_info { ++ dma_cap_mask_t dma_cap; ++ dma_filter_fn filter_fn; ++}; ++ ++struct acpi_dma_parser_data { ++ struct acpi_dma_spec dma_spec; ++ size_t index; ++ size_t n; ++}; ++ ++struct of_dma { ++ struct list_head of_dma_controllers; ++ struct device_node *of_node; ++ struct dma_chan * (*of_dma_xlate)(struct of_phandle_args *, struct of_dma *); ++ void * (*of_dma_route_allocate)(struct of_phandle_args *, struct of_dma *); ++ struct dma_router *dma_router; ++ void *of_dma_data; ++}; ++ ++struct of_dma_filter_info { ++ dma_cap_mask_t dma_cap; ++ dma_filter_fn filter_fn; ++}; ++ ++struct reset_control_lookup { ++ struct list_head list; ++ const char *provider; ++ unsigned int index; ++ const char *dev_id; ++ const char *con_id; ++}; ++ ++struct reset_control___2 { ++ struct reset_controller_dev *rcdev; ++ struct list_head list; ++ unsigned int id; ++ struct kref refcnt; ++ bool shared; ++ bool array; ++ atomic_t deassert_count; ++ atomic_t triggered_count; ++}; ++ ++struct reset_control_array { ++ struct reset_control___2 base; ++ unsigned int num_rstcs; ++ struct reset_control___2 *rstc[0]; ++}; ++ ++enum hi6220_reset_ctrl_type { ++ PERIPHERAL = 0, ++ MEDIA = 1, ++}; ++ ++struct hi6220_reset_data { ++ struct reset_controller_dev rc_dev; ++ struct regmap *regmap; ++}; ++ ++struct hi3660_reset_controller { ++ struct reset_controller_dev rst; ++ struct regmap *map; ++}; ++ ++struct n_tty_data { ++ size_t read_head; ++ size_t commit_head; ++ size_t canon_head; ++ size_t echo_head; ++ size_t echo_commit; ++ size_t echo_mark; ++ long unsigned int char_map[4]; ++ long unsigned int overrun_time; ++ int num_overrun; ++ bool no_room; ++ unsigned char lnext: 1; ++ unsigned char erasing: 1; ++ unsigned char raw: 1; ++ unsigned char real_raw: 1; ++ unsigned char icanon: 1; ++ unsigned char push: 1; ++ char read_buf[4096]; ++ long unsigned int read_flags[64]; ++ unsigned char echo_buf[4096]; ++ size_t read_tail; ++ size_t line_start; ++ unsigned int column; ++ unsigned int canon_column; ++ size_t echo_tail; ++ struct mutex atomic_read_lock; ++ struct mutex output_lock; ++}; ++ ++enum { ++ ERASE = 0, ++ WERASE = 1, ++ KILL = 2, ++}; ++ ++struct termios { ++ tcflag_t c_iflag; ++ tcflag_t c_oflag; ++ tcflag_t c_cflag; ++ tcflag_t c_lflag; ++ cc_t c_line; ++ cc_t c_cc[19]; ++}; ++ ++struct termios2 { ++ tcflag_t c_iflag; ++ tcflag_t c_oflag; ++ tcflag_t c_cflag; ++ tcflag_t c_lflag; ++ cc_t c_line; ++ cc_t c_cc[19]; ++ speed_t c_ispeed; ++ speed_t c_ospeed; ++}; ++ ++struct termio { ++ short unsigned int c_iflag; ++ short unsigned int c_oflag; ++ short unsigned int c_cflag; ++ short unsigned int c_lflag; ++ unsigned char c_line; ++ unsigned char c_cc[8]; ++}; ++ ++struct ldsem_waiter { ++ struct list_head list; ++ struct task_struct *task; ++}; ++ ++struct pts_fs_info___2; ++ ++struct tty_audit_buf { ++ struct mutex mutex; ++ dev_t dev; ++ unsigned int icanon: 1; ++ size_t valid; ++ unsigned char *data; ++}; ++ ++struct sysrq_state { ++ struct input_handle handle; ++ struct work_struct reinject_work; ++ long unsigned int key_down[12]; ++ unsigned int alt; ++ unsigned int alt_use; ++ bool active; ++ bool need_reinject; ++ bool reinjecting; ++ bool reset_canceled; ++ bool reset_requested; ++ long unsigned int reset_keybit[12]; ++ int reset_seq_len; ++ int reset_seq_cnt; ++ int reset_seq_version; ++ struct timer_list keyreset_timer; ++}; ++ ++struct consolefontdesc { ++ short unsigned int charcount; ++ short unsigned int charheight; ++ char *chardata; ++}; ++ ++struct unipair { ++ short unsigned int unicode; ++ short unsigned int fontpos; ++}; ++ ++struct unimapdesc { ++ short unsigned int entry_ct; ++ struct unipair *entries; ++}; ++ ++struct kbdiacruc { ++ unsigned int diacr; ++ unsigned int base; ++ unsigned int result; ++}; ++ ++struct kbd_repeat { ++ int delay; ++ int period; ++}; ++ ++struct console_font_op { ++ unsigned int op; ++ unsigned int flags; ++ unsigned int width; ++ unsigned int height; ++ unsigned int charcount; ++ unsigned char *data; ++}; ++ ++struct vt_stat { ++ short unsigned int v_active; ++ short unsigned int v_signal; ++ short unsigned int v_state; ++}; ++ ++struct vt_sizes { ++ short unsigned int v_rows; ++ short unsigned int v_cols; ++ short unsigned int v_scrollsize; ++}; ++ ++struct vt_consize { ++ short unsigned int v_rows; ++ short unsigned int v_cols; ++ short unsigned int v_vlin; ++ short unsigned int v_clin; ++ short unsigned int v_vcol; ++ short unsigned int v_ccol; ++}; ++ ++struct vt_event { ++ unsigned int event; ++ unsigned int oldev; ++ unsigned int newev; ++ unsigned int pad[4]; ++}; ++ ++struct vt_setactivate { ++ unsigned int console; ++ struct vt_mode mode; ++}; ++ ++struct vt_event_wait { ++ struct list_head list; ++ struct vt_event event; ++ int done; ++}; ++ ++struct compat_consolefontdesc { ++ short unsigned int charcount; ++ short unsigned int charheight; ++ compat_caddr_t chardata; ++}; ++ ++struct compat_console_font_op { ++ compat_uint_t op; ++ compat_uint_t flags; ++ compat_uint_t width; ++ compat_uint_t height; ++ compat_uint_t charcount; ++ compat_caddr_t data; ++}; ++ ++struct compat_unimapdesc { ++ short unsigned int entry_ct; ++ compat_caddr_t entries; ++}; ++ ++struct vt_notifier_param { ++ struct vc_data *vc; ++ unsigned int c; ++}; ++ ++struct vcs_poll_data { ++ struct notifier_block notifier; ++ unsigned int cons_num; ++ bool seen_last_update; ++ wait_queue_head_t waitq; ++ struct fasync_struct *fasync; ++}; ++ ++struct tiocl_selection { ++ short unsigned int xs; ++ short unsigned int ys; ++ short unsigned int xe; ++ short unsigned int ye; ++ short unsigned int sel_mode; ++}; ++ ++enum led_brightness { ++ LED_OFF = 0, ++ LED_ON = 1, ++ LED_HALF = 127, ++ LED_FULL = 255, ++}; ++ ++struct led_trigger; ++ ++struct led_classdev { ++ const char *name; ++ enum led_brightness brightness; ++ enum led_brightness max_brightness; ++ int flags; ++ long unsigned int work_flags; ++ void (*brightness_set)(struct led_classdev *, enum led_brightness); ++ int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness); ++ enum led_brightness (*brightness_get)(struct led_classdev *); ++ int (*blink_set)(struct led_classdev *, long unsigned int *, long unsigned int *); ++ struct device *dev; ++ const struct attribute_group **groups; ++ struct list_head node; ++ const char *default_trigger; ++ long unsigned int blink_delay_on; ++ long unsigned int blink_delay_off; ++ struct timer_list blink_timer; ++ int blink_brightness; ++ int new_blink_brightness; ++ void (*flash_resume)(struct led_classdev *); ++ struct work_struct set_brightness_work; ++ int delayed_set_value; ++ struct rw_semaphore trigger_lock; ++ struct led_trigger *trigger; ++ struct list_head trig_list; ++ void *trigger_data; ++ bool activated; ++ struct mutex led_access; ++}; ++ ++struct led_trigger { ++ const char *name; ++ int (*activate)(struct led_classdev *); ++ void (*deactivate)(struct led_classdev *); ++ rwlock_t leddev_list_lock; ++ struct list_head led_cdevs; ++ struct list_head next_trig; ++ const struct attribute_group **groups; ++}; ++ ++struct keyboard_notifier_param { ++ struct vc_data *vc; ++ int down; ++ int shift; ++ int ledstate; ++ unsigned int value; ++}; ++ ++struct kbd_struct { ++ unsigned char lockstate; ++ unsigned char slockstate; ++ unsigned char ledmode: 1; ++ unsigned char ledflagstate: 4; ++ char: 3; ++ unsigned char default_ledflagstate: 4; ++ unsigned char kbdmode: 3; ++ char: 1; ++ unsigned char modeflags: 5; ++}; ++ ++struct kbentry { ++ unsigned char kb_table; ++ unsigned char kb_index; ++ short unsigned int kb_value; ++}; ++ ++struct kbsentry { ++ unsigned char kb_func; ++ unsigned char kb_string[512]; ++}; ++ ++struct kbdiacr { ++ unsigned char diacr; ++ unsigned char base; ++ unsigned char result; ++}; ++ ++struct kbdiacrs { ++ unsigned int kb_cnt; ++ struct kbdiacr kbdiacr[256]; ++}; ++ ++struct kbdiacrsuc { ++ unsigned int kb_cnt; ++ struct kbdiacruc kbdiacruc[256]; ++}; ++ ++struct kbkeycode { ++ unsigned int scancode; ++ unsigned int keycode; ++}; ++ ++typedef void k_handler_fn(struct vc_data *, unsigned char, char); ++ ++typedef void fn_handler_fn(struct vc_data *); ++ ++struct getset_keycode_data { ++ struct input_keymap_entry ke; ++ int error; ++}; ++ ++struct kbd_led_trigger { ++ struct led_trigger trigger; ++ unsigned int mask; ++}; ++ ++struct uni_pagedir { ++ u16 **uni_pgdir[32]; ++ long unsigned int refcount; ++ long unsigned int sum; ++ unsigned char *inverse_translations[4]; ++ u16 *inverse_trans_unicode; ++}; ++ ++typedef uint32_t char32_t; ++ ++struct uni_screen { ++ char32_t *lines[0]; ++}; ++ ++struct con_driver { ++ const struct consw *con; ++ const char *desc; ++ struct device *dev; ++ int node; ++ int first; ++ int last; ++ int flag; ++}; ++ ++enum { ++ blank_off = 0, ++ blank_normal_wait = 1, ++ blank_vesa_wait = 2, ++}; ++ ++struct rgb { ++ u8 r; ++ u8 g; ++ u8 b; ++}; ++ ++enum { ++ ESnormal = 0, ++ ESesc = 1, ++ ESsquare = 2, ++ ESgetpars = 3, ++ ESfunckey = 4, ++ EShash = 5, ++ ESsetG0 = 6, ++ ESsetG1 = 7, ++ ESpercent = 8, ++ ESignore = 9, ++ ESnonstd = 10, ++ ESpalette = 11, ++ ESosc = 12, ++}; ++ ++struct interval { ++ uint32_t first; ++ uint32_t last; ++}; ++ ++struct hv_ops; ++ ++struct hvc_struct { ++ struct tty_port port; ++ spinlock_t lock; ++ int index; ++ int do_wakeup; ++ char *outbuf; ++ int outbuf_size; ++ int n_outbuf; ++ uint32_t vtermno; ++ const struct hv_ops *ops; ++ int irq_requested; ++ int data; ++ struct winsize ws; ++ struct work_struct tty_resize; ++ struct list_head next; ++ long unsigned int flags; ++}; ++ ++struct hv_ops { ++ int (*get_chars)(uint32_t, char *, int); ++ int (*put_chars)(uint32_t, const char *, int); ++ int (*flush)(uint32_t, bool); ++ int (*notifier_add)(struct hvc_struct *, int); ++ void (*notifier_del)(struct hvc_struct *, int); ++ void (*notifier_hangup)(struct hvc_struct *, int); ++ int (*tiocmget)(struct hvc_struct *); ++ int (*tiocmset)(struct hvc_struct *, unsigned int, unsigned int); ++ void (*dtr_rts)(struct hvc_struct *, int); ++}; ++ ++struct uart_driver { ++ struct module *owner; ++ const char *driver_name; ++ const char *dev_name; ++ int major; ++ int minor; ++ int nr; ++ struct console *cons; ++ struct uart_state *state; ++ struct tty_driver *tty_driver; ++}; ++ ++struct uart_match { ++ struct uart_port *port; ++ struct uart_driver *driver; ++}; ++ ++enum hwparam_type { ++ hwparam_ioport = 0, ++ hwparam_iomem = 1, ++ hwparam_ioport_or_iomem = 2, ++ hwparam_irq = 3, ++ hwparam_dma = 4, ++ hwparam_dma_addr = 5, ++ hwparam_other = 6, ++}; ++ ++enum { ++ PLAT8250_DEV_LEGACY = 4294967295, ++ PLAT8250_DEV_PLATFORM = 0, ++ PLAT8250_DEV_PLATFORM1 = 1, ++ PLAT8250_DEV_PLATFORM2 = 2, ++ PLAT8250_DEV_FOURPORT = 3, ++ PLAT8250_DEV_ACCENT = 4, ++ PLAT8250_DEV_BOCA = 5, ++ PLAT8250_DEV_EXAR_ST16C554 = 6, ++ PLAT8250_DEV_HUB6 = 7, ++ PLAT8250_DEV_AU1X00 = 8, ++ PLAT8250_DEV_SM501 = 9, ++}; ++ ++struct uart_8250_port; ++ ++struct uart_8250_ops { ++ int (*setup_irq)(struct uart_8250_port *); ++ void (*release_irq)(struct uart_8250_port *); ++}; ++ ++struct uart_8250_dma; ++ ++struct uart_8250_em485; ++ ++struct uart_8250_port { ++ struct uart_port port; ++ struct timer_list timer; ++ struct list_head list; ++ u32 capabilities; ++ short unsigned int bugs; ++ bool fifo_bug; ++ unsigned int tx_loadsz; ++ unsigned char acr; ++ unsigned char fcr; ++ unsigned char ier; ++ unsigned char lcr; ++ unsigned char mcr; ++ unsigned char mcr_mask; ++ unsigned char mcr_force; ++ unsigned char cur_iotype; ++ unsigned int rpm_tx_active; ++ unsigned char canary; ++ unsigned char probe; ++ unsigned char lsr_saved_flags; ++ unsigned char msr_saved_flags; ++ struct uart_8250_dma *dma; ++ const struct uart_8250_ops *ops; ++ int (*dl_read)(struct uart_8250_port *); ++ void (*dl_write)(struct uart_8250_port *, int); ++ struct uart_8250_em485 *em485; ++ struct delayed_work overrun_backoff; ++ u32 overrun_backoff_time_ms; ++}; ++ ++struct uart_8250_em485 { ++ struct hrtimer start_tx_timer; ++ struct hrtimer stop_tx_timer; ++ struct hrtimer *active_timer; ++ struct uart_8250_port *port; ++}; ++ ++struct uart_8250_dma { ++ int (*tx_dma)(struct uart_8250_port *); ++ int (*rx_dma)(struct uart_8250_port *); ++ dma_filter_fn fn; ++ void *rx_param; ++ void *tx_param; ++ struct dma_slave_config rxconf; ++ struct dma_slave_config txconf; ++ struct dma_chan *rxchan; ++ struct dma_chan *txchan; ++ phys_addr_t rx_dma_addr; ++ phys_addr_t tx_dma_addr; ++ dma_addr_t rx_addr; ++ dma_addr_t tx_addr; ++ dma_cookie_t rx_cookie; ++ dma_cookie_t tx_cookie; ++ void *rx_buf; ++ size_t rx_size; ++ size_t tx_size; ++ unsigned char tx_running; ++ unsigned char tx_err; ++ unsigned char rx_running; ++}; ++ ++struct old_serial_port { ++ unsigned int uart; ++ unsigned int baud_base; ++ unsigned int port; ++ unsigned int irq; ++ upf_t flags; ++ unsigned char io_type; ++ unsigned char *iomem_base; ++ short unsigned int iomem_reg_shift; ++}; ++ ++struct irq_info { ++ struct hlist_node node; ++ int irq; ++ spinlock_t lock; ++ struct list_head *head; ++}; ++ ++struct serial8250_config { ++ const char *name; ++ short unsigned int fifo_size; ++ short unsigned int tx_loadsz; ++ unsigned char fcr; ++ unsigned char rxtrig_bytes[4]; ++ unsigned int flags; ++}; ++ ++struct pciserial_board { ++ unsigned int flags; ++ unsigned int num_ports; ++ unsigned int base_baud; ++ unsigned int uart_offset; ++ unsigned int reg_shift; ++ unsigned int first_offset; ++}; ++ ++struct serial_private; ++ ++struct pci_serial_quirk { ++ u32 vendor; ++ u32 device; ++ u32 subvendor; ++ u32 subdevice; ++ int (*probe)(struct pci_dev *); ++ int (*init)(struct pci_dev *); ++ int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int); ++ void (*exit)(struct pci_dev *); ++}; ++ ++struct serial_private { ++ struct pci_dev *dev; ++ unsigned int nr; ++ struct pci_serial_quirk *quirk; ++ const struct pciserial_board *board; ++ int line[0]; ++}; ++ ++struct timedia_struct { ++ int num; ++ const short unsigned int *ids; ++}; ++ ++struct quatech_feature { ++ u16 devid; ++ bool amcc; ++}; ++ ++enum pci_board_num_t { ++ pbn_default = 0, ++ pbn_b0_1_115200 = 1, ++ pbn_b0_2_115200 = 2, ++ pbn_b0_4_115200 = 3, ++ pbn_b0_5_115200 = 4, ++ pbn_b0_8_115200 = 5, ++ pbn_b0_1_921600 = 6, ++ pbn_b0_2_921600 = 7, ++ pbn_b0_4_921600 = 8, ++ pbn_b0_2_1130000 = 9, ++ pbn_b0_4_1152000 = 10, ++ pbn_b0_4_1250000 = 11, ++ pbn_b0_2_1843200 = 12, ++ pbn_b0_4_1843200 = 13, ++ pbn_b0_1_4000000 = 14, ++ pbn_b0_bt_1_115200 = 15, ++ pbn_b0_bt_2_115200 = 16, ++ pbn_b0_bt_4_115200 = 17, ++ pbn_b0_bt_8_115200 = 18, ++ pbn_b0_bt_1_460800 = 19, ++ pbn_b0_bt_2_460800 = 20, ++ pbn_b0_bt_4_460800 = 21, ++ pbn_b0_bt_1_921600 = 22, ++ pbn_b0_bt_2_921600 = 23, ++ pbn_b0_bt_4_921600 = 24, ++ pbn_b0_bt_8_921600 = 25, ++ pbn_b1_1_115200 = 26, ++ pbn_b1_2_115200 = 27, ++ pbn_b1_4_115200 = 28, ++ pbn_b1_8_115200 = 29, ++ pbn_b1_16_115200 = 30, ++ pbn_b1_1_921600 = 31, ++ pbn_b1_2_921600 = 32, ++ pbn_b1_4_921600 = 33, ++ pbn_b1_8_921600 = 34, ++ pbn_b1_2_1250000 = 35, ++ pbn_b1_bt_1_115200 = 36, ++ pbn_b1_bt_2_115200 = 37, ++ pbn_b1_bt_4_115200 = 38, ++ pbn_b1_bt_2_921600 = 39, ++ pbn_b1_1_1382400 = 40, ++ pbn_b1_2_1382400 = 41, ++ pbn_b1_4_1382400 = 42, ++ pbn_b1_8_1382400 = 43, ++ pbn_b2_1_115200 = 44, ++ pbn_b2_2_115200 = 45, ++ pbn_b2_4_115200 = 46, ++ pbn_b2_8_115200 = 47, ++ pbn_b2_1_460800 = 48, ++ pbn_b2_4_460800 = 49, ++ pbn_b2_8_460800 = 50, ++ pbn_b2_16_460800 = 51, ++ pbn_b2_1_921600 = 52, ++ pbn_b2_4_921600 = 53, ++ pbn_b2_8_921600 = 54, ++ pbn_b2_8_1152000 = 55, ++ pbn_b2_bt_1_115200 = 56, ++ pbn_b2_bt_2_115200 = 57, ++ pbn_b2_bt_4_115200 = 58, ++ pbn_b2_bt_2_921600 = 59, ++ pbn_b2_bt_4_921600 = 60, ++ pbn_b3_2_115200 = 61, ++ pbn_b3_4_115200 = 62, ++ pbn_b3_8_115200 = 63, ++ pbn_b4_bt_2_921600 = 64, ++ pbn_b4_bt_4_921600 = 65, ++ pbn_b4_bt_8_921600 = 66, ++ pbn_panacom = 67, ++ pbn_panacom2 = 68, ++ pbn_panacom4 = 69, ++ pbn_plx_romulus = 70, ++ pbn_endrun_2_4000000 = 71, ++ pbn_oxsemi = 72, ++ pbn_oxsemi_1_4000000 = 73, ++ pbn_oxsemi_2_4000000 = 74, ++ pbn_oxsemi_4_4000000 = 75, ++ pbn_oxsemi_8_4000000 = 76, ++ pbn_intel_i960 = 77, ++ pbn_sgi_ioc3 = 78, ++ pbn_computone_4 = 79, ++ pbn_computone_6 = 80, ++ pbn_computone_8 = 81, ++ pbn_sbsxrsio = 82, ++ pbn_pasemi_1682M = 83, ++ pbn_ni8430_2 = 84, ++ pbn_ni8430_4 = 85, ++ pbn_ni8430_8 = 86, ++ pbn_ni8430_16 = 87, ++ pbn_ADDIDATA_PCIe_1_3906250 = 88, ++ pbn_ADDIDATA_PCIe_2_3906250 = 89, ++ pbn_ADDIDATA_PCIe_4_3906250 = 90, ++ pbn_ADDIDATA_PCIe_8_3906250 = 91, ++ pbn_ce4100_1_115200 = 92, ++ pbn_omegapci = 93, ++ pbn_NETMOS9900_2s_115200 = 94, ++ pbn_brcm_trumanage = 95, ++ pbn_fintek_4 = 96, ++ pbn_fintek_8 = 97, ++ pbn_fintek_12 = 98, ++ pbn_wch382_2 = 99, ++ pbn_wch384_4 = 100, ++ pbn_pericom_PI7C9X7951 = 101, ++ pbn_pericom_PI7C9X7952 = 102, ++ pbn_pericom_PI7C9X7954 = 103, ++ pbn_pericom_PI7C9X7958 = 104, ++}; ++ ++struct exar8250_platform { ++ int (*rs485_config)(struct uart_port *, struct serial_rs485 *); ++ int (*register_gpio)(struct pci_dev *, struct uart_8250_port *); ++}; ++ ++struct exar8250; ++ ++struct exar8250_board { ++ unsigned int num_ports; ++ unsigned int reg_shift; ++ int (*setup)(struct exar8250 *, struct pci_dev *, struct uart_8250_port *, int); ++ void (*exit)(struct pci_dev *); ++}; ++ ++struct exar8250 { ++ unsigned int nr; ++ struct exar8250_board *board; ++ void *virt; ++ int line[0]; ++}; ++ ++struct dw8250_data { ++ u8 usr_reg; ++ u8 dlf_size; ++ int line; ++ int msr_mask_on; ++ int msr_mask_off; ++ struct clk *clk; ++ struct clk *pclk; ++ struct reset_control *rst; ++ struct uart_8250_dma dma; ++ unsigned int skip_autocfg: 1; ++ unsigned int uart_16550_compatible: 1; ++}; ++ ++struct of_serial_info { ++ struct clk *clk; ++ struct reset_control *rst; ++ int type; ++ int line; ++}; ++ ++enum amba_vendor { ++ AMBA_VENDOR_ARM = 65, ++ AMBA_VENDOR_ST = 128, ++ AMBA_VENDOR_QCOM = 81, ++ AMBA_VENDOR_LSI = 182, ++ AMBA_VENDOR_LINUX = 254, ++}; ++ ++struct amba_pl011_data { ++ bool (*dma_filter)(struct dma_chan *, void *); ++ void *dma_rx_param; ++ void *dma_tx_param; ++ bool dma_rx_poll_enable; ++ unsigned int dma_rx_poll_rate; ++ unsigned int dma_rx_poll_timeout; ++ void (*init)(); ++ void (*exit)(); ++}; ++ ++enum { ++ REG_DR = 0, ++ REG_ST_DMAWM = 1, ++ REG_ST_TIMEOUT = 2, ++ REG_FR = 3, ++ REG_LCRH_RX = 4, ++ REG_LCRH_TX = 5, ++ REG_IBRD = 6, ++ REG_FBRD = 7, ++ REG_CR = 8, ++ REG_IFLS = 9, ++ REG_IMSC = 10, ++ REG_RIS = 11, ++ REG_MIS = 12, ++ REG_ICR = 13, ++ REG_DMACR = 14, ++ REG_ST_XFCR = 15, ++ REG_ST_XON1 = 16, ++ REG_ST_XON2 = 17, ++ REG_ST_XOFF1 = 18, ++ REG_ST_XOFF2 = 19, ++ REG_ST_ITCR = 20, ++ REG_ST_ITIP = 21, ++ REG_ST_ABCR = 22, ++ REG_ST_ABIMSC = 23, ++ REG_ARRAY_SIZE = 24, ++}; ++ ++struct vendor_data { ++ const u16 *reg_offset; ++ unsigned int ifls; ++ unsigned int fr_busy; ++ unsigned int fr_dsr; ++ unsigned int fr_cts; ++ unsigned int fr_ri; ++ unsigned int inv_fr; ++ bool access_32b; ++ bool oversampling; ++ bool dma_threshold; ++ bool cts_event_workaround; ++ bool always_enabled; ++ bool fixed_options; ++ unsigned int (*get_fifosize)(struct amba_device *); ++}; ++ ++struct pl011_sgbuf { ++ struct scatterlist sg; ++ char *buf; ++}; ++ ++struct pl011_dmarx_data { ++ struct dma_chan *chan; ++ struct completion complete; ++ bool use_buf_b; ++ struct pl011_sgbuf sgbuf_a; ++ struct pl011_sgbuf sgbuf_b; ++ dma_cookie_t cookie; ++ bool running; ++ struct timer_list timer; ++ unsigned int last_residue; ++ long unsigned int last_jiffies; ++ bool auto_poll_rate; ++ unsigned int poll_rate; ++ unsigned int poll_timeout; ++}; ++ ++struct pl011_dmatx_data { ++ struct dma_chan *chan; ++ struct scatterlist sg; ++ char *buf; ++ bool queued; ++}; ++ ++struct uart_amba_port { ++ struct uart_port port; ++ const u16 *reg_offset; ++ struct clk *clk; ++ const struct vendor_data *vendor; ++ unsigned int dmacr; ++ unsigned int im; ++ unsigned int old_status; ++ unsigned int fifosize; ++ unsigned int old_cr; ++ unsigned int fixed_baud; ++ char type[12]; ++ bool using_tx_dma; ++ bool using_rx_dma; ++ struct pl011_dmarx_data dmarx; ++ struct pl011_dmatx_data dmatx; ++ bool dma_probed; ++}; ++ ++struct memdev { ++ const char *name; ++ umode_t mode; ++ const struct file_operations *fops; ++ fmode_t fmode; ++}; ++ ++typedef long unsigned int cycles_t; ++ ++struct timer_rand_state { ++ cycles_t last_time; ++ long int last_delta; ++ long int last_delta2; ++}; ++ ++struct trace_event_raw_add_device_randomness { ++ struct trace_entry ent; ++ int bytes; ++ long unsigned int IP; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_random__mix_pool_bytes { ++ struct trace_entry ent; ++ const char *pool_name; ++ int bytes; ++ long unsigned int IP; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_credit_entropy_bits { ++ struct trace_entry ent; ++ const char *pool_name; ++ int bits; ++ int entropy_count; ++ int entropy_total; ++ long unsigned int IP; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_push_to_pool { ++ struct trace_entry ent; ++ const char *pool_name; ++ int pool_bits; ++ int input_bits; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_debit_entropy { ++ struct trace_entry ent; ++ const char *pool_name; ++ int debit_bits; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_add_input_randomness { ++ struct trace_entry ent; ++ int input_bits; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_add_disk_randomness { ++ struct trace_entry ent; ++ dev_t dev; ++ int input_bits; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xfer_secondary_pool { ++ struct trace_entry ent; ++ const char *pool_name; ++ int xfer_bits; ++ int request_bits; ++ int pool_entropy; ++ int input_entropy; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_random__get_random_bytes { ++ struct trace_entry ent; ++ int nbytes; ++ long unsigned int IP; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_random__extract_entropy { ++ struct trace_entry ent; ++ const char *pool_name; ++ int nbytes; ++ int entropy_count; ++ long unsigned int IP; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_random_read { ++ struct trace_entry ent; ++ int got_bits; ++ int need_bits; ++ int pool_left; ++ int input_left; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_urandom_read { ++ struct trace_entry ent; ++ int got_bits; ++ int pool_left; ++ int input_left; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_add_device_randomness {}; ++ ++struct trace_event_data_offsets_random__mix_pool_bytes {}; ++ ++struct trace_event_data_offsets_credit_entropy_bits {}; ++ ++struct trace_event_data_offsets_push_to_pool {}; ++ ++struct trace_event_data_offsets_debit_entropy {}; ++ ++struct trace_event_data_offsets_add_input_randomness {}; ++ ++struct trace_event_data_offsets_add_disk_randomness {}; ++ ++struct trace_event_data_offsets_xfer_secondary_pool {}; ++ ++struct trace_event_data_offsets_random__get_random_bytes {}; ++ ++struct trace_event_data_offsets_random__extract_entropy {}; ++ ++struct trace_event_data_offsets_random_read {}; ++ ++struct trace_event_data_offsets_urandom_read {}; ++ ++struct poolinfo { ++ int poolbitshift; ++ int poolwords; ++ int poolbytes; ++ int poolbits; ++ int poolfracbits; ++ int tap1; ++ int tap2; ++ int tap3; ++ int tap4; ++ int tap5; ++}; ++ ++struct crng_state { ++ __u32 state[16]; ++ long unsigned int init_time; ++ spinlock_t lock; ++}; ++ ++struct entropy_store { ++ const struct poolinfo *poolinfo; ++ __u32 *pool; ++ const char *name; ++ struct entropy_store *pull; ++ struct work_struct push_work; ++ long unsigned int last_pulled; ++ spinlock_t lock; ++ short unsigned int add_ptr; ++ short unsigned int input_rotate; ++ int entropy_count; ++ int entropy_total; ++ unsigned int initialized: 1; ++ unsigned int last_data_init: 1; ++ __u8 last_data[10]; ++}; ++ ++struct fast_pool { ++ __u32 pool[4]; ++ long unsigned int last; ++ short unsigned int reg_idx; ++ unsigned char count; ++}; ++ ++struct batched_entropy { ++ union { ++ u64 entropy_u64[8]; ++ u32 entropy_u32[16]; ++ }; ++ unsigned int position; ++ spinlock_t batch_lock; ++}; ++ ++struct raw_config_request { ++ int raw_minor; ++ __u64 block_major; ++ __u64 block_minor; ++}; ++ ++struct raw_device_data { ++ struct block_device *binding; ++ int inuse; ++}; ++ ++struct raw32_config_request { ++ compat_int_t raw_minor; ++ compat_u64 block_major; ++ compat_u64 block_minor; ++}; ++ ++struct hwrng { ++ const char *name; ++ int (*init)(struct hwrng *); ++ void (*cleanup)(struct hwrng *); ++ int (*data_present)(struct hwrng *, int); ++ int (*data_read)(struct hwrng *, u32 *); ++ int (*read)(struct hwrng *, void *, size_t, bool); ++ long unsigned int priv; ++ short unsigned int quality; ++ struct list_head list; ++ struct kref ref; ++ struct completion cleanup_done; ++}; ++ ++struct hisi_rng { ++ void *base; ++ struct hwrng rng; ++}; ++ ++struct xgene_rng_dev { ++ u32 irq; ++ void *csr_base; ++ u32 revision; ++ u32 datum_size; ++ u32 failure_cnt; ++ long unsigned int failure_ts; ++ struct timer_list failure_timer; ++ struct device *dev; ++ struct clk *clk; ++}; ++ ++struct cavium_rng_pf { ++ void *control_status; ++}; ++ ++struct cavium_rng { ++ struct hwrng ops; ++ void *result; ++}; ++ ++struct core_device { ++ struct device dev; ++ struct iommu_group *group; ++ struct iommu_domain *domain; ++ u8 smmu_bypass; ++ struct list_head entry; ++}; ++ ++struct svm_device { ++ long long unsigned int id; ++ struct miscdevice miscdev; ++ struct device *dev; ++ phys_addr_t l2buff; ++ long unsigned int l2size; ++}; ++ ++struct svm_bind_process { ++ pid_t vpid; ++ u64 ttbr; ++ u64 tcr; ++ int pasid; ++ u32 flags; ++}; ++ ++struct svm_process { ++ struct pid *pid; ++ struct mm_struct *mm; ++ long unsigned int asid; ++ struct rb_node rb_node; ++ struct mmu_notifier notifier; ++ struct callback_head rcu; ++ int pasid; ++ struct mutex mutex; ++ struct rb_root sdma_list; ++ struct svm_device *sdev; ++}; ++ ++struct svm_sdma { ++ struct rb_node node; ++ long unsigned int addr; ++ int nr_pages; ++ struct page **pages; ++ atomic64_t ref; ++}; ++ ++struct svm_proc_mem { ++ u32 dev_id; ++ u32 len; ++ u64 pid; ++ u64 vaddr; ++ u64 buf; ++}; ++ ++struct meminfo { ++ long unsigned int hugetlbfree; ++ long unsigned int hugetlbtotal; ++}; ++ ++struct iommu_group { ++ struct kobject kobj; ++ struct kobject *devices_kobj; ++ struct list_head devices; ++ struct mutex mutex; ++ struct blocking_notifier_head notifier; ++ void *iommu_data; ++ void (*iommu_data_release)(void *); ++ char *name; ++ int id; ++ struct iommu_domain *default_domain; ++ struct iommu_domain *domain; ++ atomic_t domain_shared_ref; ++}; ++ ++struct iommu_device { ++ struct list_head list; ++ const struct iommu_ops *ops; ++ struct fwnode_handle *fwnode; ++ struct device *dev; ++}; ++ ++struct iommu_callback_data { ++ const struct iommu_ops *ops; ++}; ++ ++struct group_device { ++ struct list_head list; ++ struct device *dev; ++ char *name; ++}; ++ ++struct iommu_group_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct iommu_group *, char *); ++ ssize_t (*store)(struct iommu_group *, const char *, size_t); ++}; ++ ++struct group_for_pci_data { ++ struct pci_dev *pdev; ++ struct iommu_group *group; ++}; ++ ++struct trace_event_raw_iommu_group_event { ++ struct trace_entry ent; ++ int gid; ++ u32 __data_loc_device; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_iommu_device_event { ++ struct trace_entry ent; ++ u32 __data_loc_device; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_map { ++ struct trace_entry ent; ++ u64 iova; ++ u64 paddr; ++ size_t size; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_unmap { ++ struct trace_entry ent; ++ u64 iova; ++ size_t size; ++ size_t unmapped_size; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_iommu_error { ++ struct trace_entry ent; ++ u32 __data_loc_device; ++ u32 __data_loc_driver; ++ u64 iova; ++ int flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dev_fault { ++ struct trace_entry ent; ++ u32 __data_loc_device; ++ int type; ++ int reason; ++ u64 addr; ++ u32 pasid; ++ u32 pgid; ++ u32 last_req; ++ u32 prot; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_dev_page_response { ++ struct trace_entry ent; ++ u32 __data_loc_device; ++ int code; ++ u64 addr; ++ u32 pasid; ++ u32 pgid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sva_invalidate { ++ struct trace_entry ent; ++ u32 __data_loc_device; ++ int type; ++ u32 granu; ++ u32 flags; ++ u8 size; ++ u32 pasid; ++ u64 addr; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_iommu_group_event { ++ u32 device; ++}; ++ ++struct trace_event_data_offsets_iommu_device_event { ++ u32 device; ++}; ++ ++struct trace_event_data_offsets_map {}; ++ ++struct trace_event_data_offsets_unmap {}; ++ ++struct trace_event_data_offsets_iommu_error { ++ u32 device; ++ u32 driver; ++}; ++ ++struct trace_event_data_offsets_dev_fault { ++ u32 device; ++}; ++ ++struct trace_event_data_offsets_dev_page_response { ++ u32 device; ++}; ++ ++struct trace_event_data_offsets_sva_invalidate { ++ u32 device; ++}; ++ ++struct iova { ++ struct rb_node node; ++ long unsigned int pfn_hi; ++ long unsigned int pfn_lo; ++}; ++ ++struct iova_magazine; ++ ++struct iova_cpu_rcache; ++ ++struct iova_rcache { ++ spinlock_t lock; ++ long unsigned int depot_size; ++ struct iova_magazine *depot[32]; ++ struct iova_cpu_rcache *cpu_rcaches; ++}; ++ ++struct iova_domain; ++ ++typedef void (*iova_flush_cb)(struct iova_domain *); ++ ++typedef void (*iova_entry_dtor)(long unsigned int); ++ ++struct iova_fq; ++ ++struct iova_domain { ++ spinlock_t iova_rbtree_lock; ++ struct rb_root rbroot; ++ struct rb_node *cached_node; ++ struct rb_node *cached32_node; ++ long unsigned int granule; ++ long unsigned int start_pfn; ++ long unsigned int dma_32bit_pfn; ++ long unsigned int max32_alloc_size; ++ struct iova_fq *fq; ++ atomic64_t fq_flush_start_cnt; ++ atomic64_t fq_flush_finish_cnt; ++ struct iova anchor; ++ struct iova_rcache rcaches[6]; ++ iova_flush_cb flush_cb; ++ iova_entry_dtor entry_dtor; ++ struct timer_list fq_timer; ++ atomic_t fq_timer_on; ++ struct work_struct free_iova_work; ++}; ++ ++struct iova_fq_entry { ++ long unsigned int iova_pfn; ++ long unsigned int pages; ++ long unsigned int data; ++ u64 counter; ++}; ++ ++struct iova_fq { ++ struct iova_fq_entry entries[256]; ++ unsigned int head; ++ unsigned int tail; ++ spinlock_t lock; ++}; ++ ++struct iommu_dma_msi_page { ++ struct list_head list; ++ dma_addr_t iova; ++ phys_addr_t phys; ++}; ++ ++enum iommu_dma_cookie_type { ++ IOMMU_DMA_IOVA_COOKIE = 0, ++ IOMMU_DMA_MSI_COOKIE = 1, ++}; ++ ++struct iommu_dma_cookie { ++ enum iommu_dma_cookie_type type; ++ union { ++ struct iova_domain iovad; ++ dma_addr_t msi_iova; ++ }; ++ struct list_head msi_page_list; ++ spinlock_t msi_lock; ++ struct iommu_domain *fq_domain; ++}; ++ ++struct iommu_bond { ++ struct io_mm *io_mm; ++ struct device *dev; ++ struct iommu_domain *domain; ++ struct list_head mm_head; ++ struct list_head dev_head; ++ struct list_head domain_head; ++ refcount_t refs; ++ struct wait_queue_head mm_exit_wq; ++ bool mm_exit_active; ++ void *drvdata; ++}; ++ ++typedef int (*iopf_queue_flush_t)(void *, struct device *); ++ ++enum page_request_handle_t { ++ IOMMU_PAGE_RESP_HANDLED = 0, ++ IOMMU_PAGE_RESP_CONTINUE = 1, ++}; ++ ++struct iopf_queue; ++ ++struct iopf_device_param { ++ struct iopf_queue *queue; ++ struct list_head partial; ++}; ++ ++struct iopf_queue { ++ struct workqueue_struct *wq; ++ iopf_queue_flush_t flush; ++ void *flush_arg; ++ refcount_t refs; ++}; ++ ++struct iopf_context { ++ struct device *dev; ++ struct iommu_fault_event evt; ++ struct list_head head; ++}; ++ ++struct iopf_group { ++ struct iopf_context last_fault; ++ struct list_head faults; ++ struct work_struct work; ++}; ++ ++enum io_pgtable_fmt { ++ ARM_32_LPAE_S1 = 0, ++ ARM_32_LPAE_S2 = 1, ++ ARM_64_LPAE_S1 = 2, ++ ARM_64_LPAE_S2 = 3, ++ ARM_V7S = 4, ++ IO_PGTABLE_NUM_FMTS = 5, ++}; ++ ++struct iommu_gather_ops { ++ void (*tlb_flush_all)(void *); ++ void (*tlb_add_flush)(long unsigned int, size_t, size_t, bool, void *); ++ void (*tlb_sync)(void *); ++}; ++ ++struct io_pgtable_cfg { ++ long unsigned int quirks; ++ long unsigned int pgsize_bitmap; ++ unsigned int ias; ++ unsigned int oas; ++ const struct iommu_gather_ops *tlb; ++ struct device *iommu_dev; ++ union { ++ struct { ++ u64 ttbr[2]; ++ u64 tcr; ++ u64 mair[2]; ++ } arm_lpae_s1_cfg; ++ struct { ++ u64 vttbr; ++ u64 vtcr; ++ } arm_lpae_s2_cfg; ++ struct { ++ u32 ttbr[2]; ++ u32 tcr; ++ u32 nmrr; ++ u32 prrr; ++ } arm_v7s_cfg; ++ }; ++}; ++ ++struct io_pgtable_ops { ++ int (*map)(struct io_pgtable_ops *, long unsigned int, phys_addr_t, size_t, int); ++ size_t (*unmap)(struct io_pgtable_ops *, long unsigned int, size_t); ++ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *, long unsigned int); ++}; ++ ++struct io_pgtable { ++ enum io_pgtable_fmt fmt; ++ void *cookie; ++ struct io_pgtable_cfg cfg; ++ struct io_pgtable_ops ops; ++}; ++ ++struct io_pgtable_init_fns { ++ struct io_pgtable * (*alloc)(struct io_pgtable_cfg *, void *); ++ void (*free)(struct io_pgtable *); ++}; ++ ++struct arm_lpae_io_pgtable { ++ struct io_pgtable iop; ++ int levels; ++ size_t pgd_size; ++ long unsigned int pg_shift; ++ long unsigned int bits_per_level; ++ void *pgd; ++}; ++ ++typedef u64 arm_lpae_iopte; ++ ++enum iommu_pasid_table_fmt { ++ PASID_TABLE_ARM_SMMU_V3 = 0, ++ PASID_TABLE_NUM_FMTS = 1, ++}; ++ ++struct iommu_pasid_entry { ++ u64 tag; ++ void (*release)(struct iommu_pasid_entry *); ++}; ++ ++struct iommu_pasid_table_ops { ++ struct iommu_pasid_entry * (*alloc_shared_entry)(struct iommu_pasid_table_ops *, struct mm_struct *); ++ struct iommu_pasid_entry * (*alloc_priv_entry)(struct iommu_pasid_table_ops *, enum io_pgtable_fmt, struct io_pgtable_cfg *); ++ int (*set_entry)(struct iommu_pasid_table_ops *, int, struct iommu_pasid_entry *); ++ void (*clear_entry)(struct iommu_pasid_table_ops *, int, struct iommu_pasid_entry *); ++}; ++ ++struct iommu_pasid_sync_ops { ++ void (*cfg_flush)(void *, int, bool); ++ void (*cfg_flush_all)(void *); ++ void (*tlb_flush)(void *, int, struct iommu_pasid_entry *); ++}; ++ ++struct arm_smmu_context_cfg { ++ u8 stall: 1; ++ u8 asid_bits; ++ u8 hw_dirty: 1; ++ u8 hw_access: 1; ++ u8 s1fmt; ++}; ++ ++struct iommu_pasid_table_cfg { ++ struct device *iommu_dev; ++ size_t order; ++ const struct iommu_pasid_sync_ops *sync; ++ dma_addr_t base; ++ union { ++ struct arm_smmu_context_cfg arm_smmu; ++ }; ++}; ++ ++struct iommu_pasid_table { ++ enum iommu_pasid_table_fmt fmt; ++ void *cookie; ++ struct iommu_pasid_table_cfg cfg; ++ struct iommu_pasid_table_ops ops; ++}; ++ ++struct iommu_pasid_init_fns { ++ struct iommu_pasid_table * (*alloc)(struct iommu_pasid_table_cfg *, void *); ++ void (*free)(struct iommu_pasid_table *); ++}; ++ ++struct arm_smmu_cd_tables; ++ ++struct arm_smmu_cd { ++ struct iommu_pasid_entry entry; ++ u64 ttbr; ++ u64 tcr; ++ u64 mair; ++ int pasid; ++ refcount_t refs; ++ unsigned int users; ++ struct mm_struct *mm; ++ struct arm_smmu_cd_tables *tbl; ++}; ++ ++struct arm_smmu_cd_table { ++ __le64 *ptr; ++ dma_addr_t ptr_dma; ++}; ++ ++struct arm_smmu_cd_tables { ++ struct iommu_pasid_table pasid; ++ bool linear; ++ union { ++ struct arm_smmu_cd_table table; ++ struct { ++ __le64 *ptr; ++ dma_addr_t ptr_dma; ++ size_t num_entries; ++ struct arm_smmu_cd_table *tables; ++ } l1; ++ }; ++}; ++ ++struct iova_magazine { ++ long unsigned int size; ++ long unsigned int pfns[128]; ++}; ++ ++struct iova_cpu_rcache { ++ spinlock_t lock; ++ struct iova_magazine *loaded; ++ struct iova_magazine *prev; ++}; ++ ++struct of_pci_iommu_alias_info { ++ struct device *dev; ++ struct device_node *np; ++}; ++ ++struct of_phandle_iterator { ++ const char *cells_name; ++ int cell_count; ++ const struct device_node *parent; ++ const __be32 *list_end; ++ const __be32 *phandle_end; ++ const __be32 *cur; ++ uint32_t cur_count; ++ phandle phandle; ++ struct device_node *node; ++}; ++ ++enum arm_smmu_s2cr_type { ++ S2CR_TYPE_TRANS = 0, ++ S2CR_TYPE_BYPASS = 1, ++ S2CR_TYPE_FAULT = 2, ++}; ++ ++enum arm_smmu_s2cr_privcfg { ++ S2CR_PRIVCFG_DEFAULT = 0, ++ S2CR_PRIVCFG_DIPAN = 1, ++ S2CR_PRIVCFG_UNPRIV = 2, ++ S2CR_PRIVCFG_PRIV = 3, ++}; ++ ++enum arm_smmu_arch_version { ++ ARM_SMMU_V1 = 0, ++ ARM_SMMU_V1_64K = 1, ++ ARM_SMMU_V2 = 2, ++}; ++ ++enum arm_smmu_implementation { ++ GENERIC_SMMU = 0, ++ ARM_MMU500 = 1, ++ CAVIUM_SMMUV2 = 2, ++ QCOM_SMMUV2 = 3, ++}; ++ ++struct arm_smmu_s2cr { ++ struct iommu_group *group; ++ int count; ++ enum arm_smmu_s2cr_type type; ++ enum arm_smmu_s2cr_privcfg privcfg; ++ u8 cbndx; ++}; ++ ++struct arm_smmu_smr { ++ u16 mask; ++ u16 id; ++ bool valid; ++}; ++ ++struct arm_smmu_cfg; ++ ++struct arm_smmu_cb { ++ u64 ttbr[2]; ++ u32 tcr[2]; ++ u32 mair[2]; ++ struct arm_smmu_cfg *cfg; ++}; ++ ++enum arm_smmu_context_fmt { ++ ARM_SMMU_CTX_FMT_NONE = 0, ++ ARM_SMMU_CTX_FMT_AARCH64 = 1, ++ ARM_SMMU_CTX_FMT_AARCH32_L = 2, ++ ARM_SMMU_CTX_FMT_AARCH32_S = 3, ++}; ++ ++struct arm_smmu_cfg { ++ u8 cbndx; ++ u8 irptndx; ++ union { ++ u16 asid; ++ u16 vmid; ++ }; ++ u32 cbar; ++ enum arm_smmu_context_fmt fmt; ++}; ++ ++struct arm_smmu_device; ++ ++struct arm_smmu_master_cfg { ++ struct arm_smmu_device *smmu; ++ s16 smendx[0]; ++}; ++ ++struct arm_smmu_device { ++ struct device *dev; ++ void *base; ++ void *cb_base; ++ long unsigned int pgshift; ++ u32 features; ++ u32 options; ++ enum arm_smmu_arch_version version; ++ enum arm_smmu_implementation model; ++ u32 num_context_banks; ++ u32 num_s2_context_banks; ++ long unsigned int context_map[2]; ++ struct arm_smmu_cb *cbs; ++ atomic_t irptndx; ++ u32 num_mapping_groups; ++ u16 streamid_mask; ++ u16 smr_mask_mask; ++ struct arm_smmu_smr *smrs; ++ struct arm_smmu_s2cr *s2crs; ++ struct mutex stream_map_mutex; ++ long unsigned int va_size; ++ long unsigned int ipa_size; ++ long unsigned int pa_size; ++ long unsigned int pgsize_bitmap; ++ u32 num_global_irqs; ++ u32 num_context_irqs; ++ unsigned int *irqs; ++ u32 cavium_id_base; ++ spinlock_t global_sync_lock; ++ struct iommu_device iommu; ++}; ++ ++enum arm_smmu_domain_stage { ++ ARM_SMMU_DOMAIN_S1 = 0, ++ ARM_SMMU_DOMAIN_S2 = 1, ++ ARM_SMMU_DOMAIN_NESTED = 2, ++ ARM_SMMU_DOMAIN_BYPASS = 3, ++}; ++ ++struct arm_smmu_domain { ++ struct arm_smmu_device *smmu; ++ struct io_pgtable_ops *pgtbl_ops; ++ const struct iommu_gather_ops *tlb_ops; ++ struct arm_smmu_cfg cfg; ++ enum arm_smmu_domain_stage stage; ++ bool non_strict; ++ struct mutex init_mutex; ++ spinlock_t cb_lock; ++ struct iommu_domain domain; ++}; ++ ++struct arm_smmu_option_prop { ++ u32 opt; ++ const char *prop; ++}; ++ ++struct arm_smmu_match_data { ++ enum arm_smmu_arch_version version; ++ enum arm_smmu_implementation model; ++}; ++ ++struct smmu_bypass_device { ++ short unsigned int vendor; ++ short unsigned int device; ++}; ++ ++enum pri_resp { ++ PRI_RESP_DENY = 0, ++ PRI_RESP_FAIL = 1, ++ PRI_RESP_SUCC = 2, ++}; ++ ++enum arm_smmu_msi_index { ++ EVTQ_MSI_INDEX = 0, ++ GERROR_MSI_INDEX = 1, ++ PRIQ_MSI_INDEX = 2, ++ ARM_SMMU_MAX_MSIS = 3, ++}; ++ ++struct arm_smmu_cmdq_ent { ++ u8 opcode; ++ bool substream_valid; ++ union { ++ struct { ++ u32 sid; ++ u8 size; ++ u64 addr; ++ } prefetch; ++ struct { ++ u32 sid; ++ u32 ssid; ++ union { ++ bool leaf; ++ u8 span; ++ }; ++ } cfgi; ++ struct { ++ u16 asid; ++ u16 vmid; ++ bool leaf; ++ u64 addr; ++ } tlbi; ++ struct { ++ u32 sid; ++ u32 ssid; ++ u16 grpid; ++ enum pri_resp resp; ++ } pri; ++ struct { ++ u32 sid; ++ u16 stag; ++ enum page_response_code resp; ++ } resume; ++ struct { ++ u32 msidata; ++ u64 msiaddr; ++ } sync; ++ }; ++}; ++ ++struct arm_smmu_queue { ++ int irq; ++ __le64 *base; ++ dma_addr_t base_dma; ++ u64 q_base; ++ size_t ent_dwords; ++ u32 max_n_shift; ++ u32 prod; ++ u32 cons; ++ u32 *prod_reg; ++ u32 *cons_reg; ++ u64 batch; ++ wait_queue_head_t wq; ++}; ++ ++struct arm_smmu_cmdq { ++ struct arm_smmu_queue q; ++ spinlock_t lock; ++}; ++ ++struct arm_smmu_evtq { ++ struct arm_smmu_queue q; ++ u32 max_stalls; ++}; ++ ++struct arm_smmu_priq { ++ struct arm_smmu_queue q; ++}; ++ ++struct arm_smmu_strtab_l1_desc { ++ u8 span; ++ __le64 *l2ptr; ++ dma_addr_t l2ptr_dma; ++}; ++ ++struct arm_smmu_s1_cfg { ++ struct iommu_pasid_table_cfg tables; ++ struct iommu_pasid_table_ops *ops; ++ struct iommu_pasid_entry *cd0; ++}; ++ ++struct arm_smmu_s2_cfg { ++ u16 vmid; ++ u64 vttbr; ++ u64 vtcr; ++}; ++ ++struct arm_smmu_strtab_ent { ++ bool assigned; ++ struct arm_smmu_s1_cfg *s1_cfg; ++ struct arm_smmu_s2_cfg *s2_cfg; ++ bool can_stall; ++}; ++ ++struct arm_smmu_strtab_cfg { ++ __le64 *strtab; ++ dma_addr_t strtab_dma; ++ struct arm_smmu_strtab_l1_desc *l1_desc; ++ unsigned int num_l1_ents; ++ u64 strtab_base; ++ u32 strtab_base_cfg; ++}; ++ ++struct iopf_queue___2; ++ ++struct arm_smmu_device___2 { ++ struct device *dev; ++ void *base; ++ u32 features; ++ u32 options; ++ u64 spi_base; ++ struct arm_smmu_cmdq cmdq; ++ struct arm_smmu_evtq evtq; ++ struct arm_smmu_priq priq; ++ int gerr_irq; ++ int combined_irq; ++ u32 sync_nr; ++ u8 prev_cmd_opcode; ++ long unsigned int ias; ++ long unsigned int oas; ++ long unsigned int pgsize_bitmap; ++ unsigned int asid_bits; ++ unsigned int vmid_bits; ++ long unsigned int vmid_map[1024]; ++ unsigned int ssid_bits; ++ unsigned int sid_bits; ++ struct arm_smmu_strtab_cfg strtab_cfg; ++ union { ++ u32 sync_count; ++ u64 padding; ++ }; ++ struct iommu_device iommu; ++ struct rb_root streams; ++ struct mutex streams_mutex; ++ struct iopf_queue___2 *iopf_queue; ++}; ++ ++struct arm_smmu_master_data; ++ ++struct arm_smmu_stream { ++ u32 id; ++ struct arm_smmu_master_data *master; ++ struct rb_node node; ++}; ++ ++struct arm_smmu_domain___2; ++ ++struct arm_smmu_master_data { ++ struct arm_smmu_device___2 *smmu; ++ struct arm_smmu_strtab_ent ste; ++ struct arm_smmu_domain___2 *domain; ++ struct list_head list; ++ struct arm_smmu_stream *streams; ++ struct device *dev; ++ size_t ssid_bits; ++ bool can_fault; ++}; ++ ++struct arm_smmu_domain___2 { ++ struct arm_smmu_device___2 *smmu; ++ struct mutex init_mutex; ++ struct io_pgtable_ops *pgtbl_ops; ++ bool non_strict; ++ enum arm_smmu_domain_stage stage; ++ union { ++ struct arm_smmu_s1_cfg s1_cfg; ++ struct arm_smmu_s2_cfg s2_cfg; ++ }; ++ struct iommu_domain domain; ++ struct list_head devices; ++ spinlock_t devices_lock; ++}; ++ ++struct arm_smmu_mm { ++ struct io_mm io_mm; ++ struct iommu_pasid_entry *cd; ++}; ++ ++enum chipset_type { ++ NOT_SUPPORTED = 0, ++ SUPPORTED = 1, ++}; ++ ++struct agp_version { ++ u16 major; ++ u16 minor; ++}; ++ ++struct agp_kern_info { ++ struct agp_version version; ++ struct pci_dev *device; ++ enum chipset_type chipset; ++ long unsigned int mode; ++ long unsigned int aper_base; ++ size_t aper_size; ++ int max_memory; ++ int current_memory; ++ bool cant_use_aperture; ++ long unsigned int page_mask; ++ const struct vm_operations_struct *vm_ops; ++}; ++ ++struct agp_bridge_data; ++ ++struct dma_fence_ops; ++ ++struct dma_fence { ++ struct kref refcount; ++ const struct dma_fence_ops *ops; ++ struct callback_head rcu; ++ struct list_head cb_list; ++ spinlock_t *lock; ++ u64 context; ++ unsigned int seqno; ++ long unsigned int flags; ++ ktime_t timestamp; ++ int error; ++}; ++ ++struct dma_fence_ops { ++ const char * (*get_driver_name)(struct dma_fence *); ++ const char * (*get_timeline_name)(struct dma_fence *); ++ bool (*enable_signaling)(struct dma_fence *); ++ bool (*signaled)(struct dma_fence *); ++ long int (*wait)(struct dma_fence *, bool, long int); ++ void (*release)(struct dma_fence *); ++ void (*fence_value_str)(struct dma_fence *, char *, int); ++ void (*timeline_value_str)(struct dma_fence *, char *, int); ++}; ++ ++typedef unsigned int drm_magic_t; ++ ++struct drm_clip_rect { ++ short unsigned int x1; ++ short unsigned int y1; ++ short unsigned int x2; ++ short unsigned int y2; ++}; ++ ++struct drm_hw_lock { ++ volatile unsigned int lock; ++ char padding[60]; ++}; ++ ++enum drm_mode_subconnector { ++ DRM_MODE_SUBCONNECTOR_Automatic = 0, ++ DRM_MODE_SUBCONNECTOR_Unknown = 0, ++ DRM_MODE_SUBCONNECTOR_DVID = 3, ++ DRM_MODE_SUBCONNECTOR_DVIA = 4, ++ DRM_MODE_SUBCONNECTOR_Composite = 5, ++ DRM_MODE_SUBCONNECTOR_SVIDEO = 6, ++ DRM_MODE_SUBCONNECTOR_Component = 8, ++ DRM_MODE_SUBCONNECTOR_SCART = 9, ++}; ++ ++struct drm_mode_fb_cmd2 { ++ __u32 fb_id; ++ __u32 width; ++ __u32 height; ++ __u32 pixel_format; ++ __u32 flags; ++ __u32 handles[4]; ++ __u32 pitches[4]; ++ __u32 offsets[4]; ++ __u64 modifier[4]; ++}; ++ ++struct drm_mode_create_dumb { ++ __u32 height; ++ __u32 width; ++ __u32 bpp; ++ __u32 flags; ++ __u32 handle; ++ __u32 pitch; ++ __u64 size; ++}; ++ ++struct drm_event { ++ __u32 type; ++ __u32 length; ++}; ++ ++struct drm_event_vblank { ++ struct drm_event base; ++ __u64 user_data; ++ __u32 tv_sec; ++ __u32 tv_usec; ++ __u32 sequence; ++ __u32 crtc_id; ++}; ++ ++struct drm_event_crtc_sequence { ++ struct drm_event base; ++ __u64 user_data; ++ __s64 time_ns; ++ __u64 sequence; ++}; ++ ++struct drm_agp_head { ++ struct agp_kern_info agp_info; ++ struct list_head memory; ++ long unsigned int mode; ++ struct agp_bridge_data *bridge; ++ int enabled; ++ int acquired; ++ long unsigned int base; ++ int agp_mtrr; ++ int cant_use_aperture; ++ long unsigned int page_mask; ++}; ++ ++struct drm_modeset_lock; ++ ++struct drm_modeset_acquire_ctx { ++ struct ww_acquire_ctx ww_ctx; ++ struct drm_modeset_lock *contended; ++ struct list_head locked; ++ bool trylock_only; ++ bool interruptible; ++}; ++ ++struct drm_modeset_lock { ++ struct ww_mutex mutex; ++ struct list_head head; ++}; ++ ++struct drm_rect { ++ int x1; ++ int y1; ++ int x2; ++ int y2; ++}; ++ ++struct drm_object_properties; ++ ++struct drm_mode_object { ++ uint32_t id; ++ uint32_t type; ++ struct drm_object_properties *properties; ++ struct kref refcount; ++ void (*free_cb)(struct kref *); ++}; ++ ++struct drm_property; ++ ++struct drm_object_properties { ++ int count; ++ struct drm_property *properties[24]; ++ uint64_t values[24]; ++}; ++ ++struct drm_device; ++ ++struct drm_property { ++ struct list_head head; ++ struct drm_mode_object base; ++ uint32_t flags; ++ char name[32]; ++ uint32_t num_values; ++ uint64_t *values; ++ struct drm_device *dev; ++ struct list_head enum_list; ++}; ++ ++struct drm_framebuffer; ++ ++struct drm_file; ++ ++struct drm_framebuffer_funcs { ++ void (*destroy)(struct drm_framebuffer *); ++ int (*create_handle)(struct drm_framebuffer *, struct drm_file *, unsigned int *); ++ int (*dirty)(struct drm_framebuffer *, struct drm_file *, unsigned int, unsigned int, struct drm_clip_rect *, unsigned int); ++}; ++ ++struct drm_format_info; ++ ++struct drm_gem_object; ++ ++struct drm_framebuffer { ++ struct drm_device *dev; ++ struct list_head head; ++ struct drm_mode_object base; ++ char comm[16]; ++ const struct drm_format_info *format; ++ const struct drm_framebuffer_funcs *funcs; ++ unsigned int pitches[4]; ++ unsigned int offsets[4]; ++ uint64_t modifier; ++ unsigned int width; ++ unsigned int height; ++ int flags; ++ int hot_x; ++ int hot_y; ++ struct list_head filp_head; ++ struct drm_gem_object *obj[4]; ++}; ++ ++struct drm_prime_file_private { ++ struct mutex lock; ++ struct rb_root dmabufs; ++ struct rb_root handles; ++}; ++ ++struct drm_master; ++ ++struct drm_minor; ++ ++struct drm_file { ++ unsigned int authenticated: 1; ++ unsigned int stereo_allowed: 1; ++ unsigned int universal_planes: 1; ++ unsigned int atomic: 1; ++ unsigned int aspect_ratio_allowed: 1; ++ unsigned int writeback_connectors: 1; ++ unsigned int is_master: 1; ++ struct drm_master *master; ++ struct pid *pid; ++ drm_magic_t magic; ++ struct list_head lhead; ++ struct drm_minor *minor; ++ struct idr object_idr; ++ spinlock_t table_lock; ++ struct idr syncobj_idr; ++ spinlock_t syncobj_table_lock; ++ struct file *filp; ++ void *driver_priv; ++ struct list_head fbs; ++ struct mutex fbs_lock; ++ struct list_head blobs; ++ wait_queue_head_t event_wait; ++ struct list_head pending_event_list; ++ struct list_head event_list; ++ int event_space; ++ struct mutex event_read_lock; ++ struct drm_prime_file_private prime; ++ long unsigned int lock_count; ++}; ++ ++struct drm_open_hash { ++ struct hlist_head *table; ++ u8 order; ++}; ++ ++struct drm_mode_config_funcs; ++ ++struct drm_atomic_state; ++ ++struct drm_mode_config_helper_funcs; ++ ++struct drm_mode_config { ++ struct mutex mutex; ++ struct drm_modeset_lock connection_mutex; ++ struct drm_modeset_acquire_ctx *acquire_ctx; ++ struct mutex idr_mutex; ++ struct idr crtc_idr; ++ struct idr tile_idr; ++ struct mutex fb_lock; ++ int num_fb; ++ struct list_head fb_list; ++ spinlock_t connector_list_lock; ++ int num_connector; ++ struct ida connector_ida; ++ struct list_head connector_list; ++ struct llist_head connector_free_list; ++ struct work_struct connector_free_work; ++ int num_encoder; ++ struct list_head encoder_list; ++ int num_total_plane; ++ struct list_head plane_list; ++ int num_crtc; ++ struct list_head crtc_list; ++ struct list_head property_list; ++ int min_width; ++ int min_height; ++ int max_width; ++ int max_height; ++ const struct drm_mode_config_funcs *funcs; ++ resource_size_t fb_base; ++ bool poll_enabled; ++ bool poll_running; ++ bool delayed_event; ++ struct delayed_work output_poll_work; ++ struct mutex blob_lock; ++ struct list_head property_blob_list; ++ struct drm_property *edid_property; ++ struct drm_property *dpms_property; ++ struct drm_property *path_property; ++ struct drm_property *tile_property; ++ struct drm_property *link_status_property; ++ struct drm_property *plane_type_property; ++ struct drm_property *prop_src_x; ++ struct drm_property *prop_src_y; ++ struct drm_property *prop_src_w; ++ struct drm_property *prop_src_h; ++ struct drm_property *prop_crtc_x; ++ struct drm_property *prop_crtc_y; ++ struct drm_property *prop_crtc_w; ++ struct drm_property *prop_crtc_h; ++ struct drm_property *prop_fb_id; ++ struct drm_property *prop_in_fence_fd; ++ struct drm_property *prop_out_fence_ptr; ++ struct drm_property *prop_crtc_id; ++ struct drm_property *prop_active; ++ struct drm_property *prop_mode_id; ++ struct drm_property *dvi_i_subconnector_property; ++ struct drm_property *dvi_i_select_subconnector_property; ++ struct drm_property *tv_subconnector_property; ++ struct drm_property *tv_select_subconnector_property; ++ struct drm_property *tv_mode_property; ++ struct drm_property *tv_left_margin_property; ++ struct drm_property *tv_right_margin_property; ++ struct drm_property *tv_top_margin_property; ++ struct drm_property *tv_bottom_margin_property; ++ struct drm_property *tv_brightness_property; ++ struct drm_property *tv_contrast_property; ++ struct drm_property *tv_flicker_reduction_property; ++ struct drm_property *tv_overscan_property; ++ struct drm_property *tv_saturation_property; ++ struct drm_property *tv_hue_property; ++ struct drm_property *scaling_mode_property; ++ struct drm_property *aspect_ratio_property; ++ struct drm_property *content_type_property; ++ struct drm_property *degamma_lut_property; ++ struct drm_property *degamma_lut_size_property; ++ struct drm_property *ctm_property; ++ struct drm_property *gamma_lut_property; ++ struct drm_property *gamma_lut_size_property; ++ struct drm_property *suggested_x_property; ++ struct drm_property *suggested_y_property; ++ struct drm_property *non_desktop_property; ++ struct drm_property *panel_orientation_property; ++ struct drm_property *writeback_fb_id_property; ++ struct drm_property *writeback_pixel_formats_property; ++ struct drm_property *writeback_out_fence_ptr_property; ++ uint32_t preferred_depth; ++ uint32_t prefer_shadow; ++ bool async_page_flip; ++ bool allow_fb_modifiers; ++ bool normalize_zpos; ++ struct drm_property *modifiers_property; ++ uint32_t cursor_width; ++ uint32_t cursor_height; ++ struct drm_atomic_state *suspend_state; ++ const struct drm_mode_config_helper_funcs *helper_private; ++}; ++ ++struct drm_driver; ++ ++struct drm_device_dma; ++ ++struct drm_vblank_crtc; ++ ++struct drm_sg_mem; ++ ++struct drm_local_map; ++ ++struct drm_vma_offset_manager; ++ ++struct drm_fb_helper; ++ ++struct drm_device { ++ struct list_head legacy_dev_list; ++ int if_version; ++ struct kref ref; ++ struct device *dev; ++ struct drm_driver *driver; ++ void *dev_private; ++ struct drm_minor *primary; ++ struct drm_minor *render; ++ bool registered; ++ struct drm_master *master; ++ bool unplugged; ++ struct inode *anon_inode; ++ char *unique; ++ struct mutex struct_mutex; ++ struct mutex master_mutex; ++ int open_count; ++ spinlock_t buf_lock; ++ int buf_use; ++ atomic_t buf_alloc; ++ struct mutex filelist_mutex; ++ struct list_head filelist; ++ struct list_head filelist_internal; ++ struct mutex clientlist_mutex; ++ struct list_head clientlist; ++ struct list_head maplist; ++ struct drm_open_hash map_hash; ++ struct list_head ctxlist; ++ struct mutex ctxlist_mutex; ++ struct idr ctx_idr; ++ struct list_head vmalist; ++ struct drm_device_dma *dma; ++ volatile long int context_flag; ++ int last_context; ++ bool irq_enabled; ++ int irq; ++ bool vblank_disable_immediate; ++ struct drm_vblank_crtc *vblank; ++ spinlock_t vblank_time_lock; ++ spinlock_t vbl_lock; ++ u32 max_vblank_count; ++ struct list_head vblank_event_list; ++ spinlock_t event_lock; ++ struct drm_agp_head *agp; ++ struct pci_dev *pdev; ++ struct drm_sg_mem *sg; ++ unsigned int num_crtcs; ++ struct { ++ int context; ++ struct drm_hw_lock *lock; ++ } sigdata; ++ struct drm_local_map *agp_buffer_map; ++ unsigned int agp_buffer_token; ++ struct drm_mode_config mode_config; ++ struct mutex object_name_lock; ++ struct idr object_name_idr; ++ struct drm_vma_offset_manager *vma_offset_manager; ++ int switch_power_state; ++ struct drm_fb_helper *fb_helper; ++}; ++ ++struct drm_format_info { ++ u32 format; ++ u8 depth; ++ u8 num_planes; ++ u8 cpp[3]; ++ u8 hsub; ++ u8 vsub; ++ bool has_alpha; ++ bool is_yuv; ++}; ++ ++struct drm_mm; ++ ++struct drm_mm_node { ++ long unsigned int color; ++ u64 start; ++ u64 size; ++ struct drm_mm *mm; ++ struct list_head node_list; ++ struct list_head hole_stack; ++ struct rb_node rb; ++ struct rb_node rb_hole_size; ++ struct rb_node rb_hole_addr; ++ u64 __subtree_last; ++ u64 hole_size; ++ bool allocated: 1; ++ bool scanned_block: 1; ++}; ++ ++struct drm_vma_offset_node { ++ rwlock_t vm_lock; ++ struct drm_mm_node vm_node; ++ struct rb_root vm_files; ++ bool readonly: 1; ++}; ++ ++struct dma_buf; ++ ++struct dma_buf_attachment; ++ ++struct drm_gem_object { ++ struct kref refcount; ++ unsigned int handle_count; ++ struct drm_device *dev; ++ struct file *filp; ++ struct drm_vma_offset_node vma_node; ++ size_t size; ++ int name; ++ struct dma_buf *dma_buf; ++ struct dma_buf_attachment *import_attach; ++}; ++ ++enum drm_connector_force { ++ DRM_FORCE_UNSPECIFIED = 0, ++ DRM_FORCE_OFF = 1, ++ DRM_FORCE_ON = 2, ++ DRM_FORCE_ON_DIGITAL = 3, ++}; ++ ++enum drm_connector_status { ++ connector_status_connected = 1, ++ connector_status_disconnected = 2, ++ connector_status_unknown = 3, ++}; ++ ++enum subpixel_order { ++ SubPixelUnknown = 0, ++ SubPixelHorizontalRGB = 1, ++ SubPixelHorizontalBGR = 2, ++ SubPixelVerticalRGB = 3, ++ SubPixelVerticalBGR = 4, ++ SubPixelNone = 5, ++}; ++ ++struct drm_scrambling { ++ bool supported; ++ bool low_rates; ++}; ++ ++struct drm_scdc { ++ bool supported; ++ bool read_request; ++ struct drm_scrambling scrambling; ++}; ++ ++struct drm_hdmi_info { ++ struct drm_scdc scdc; ++ long unsigned int y420_vdb_modes[2]; ++ long unsigned int y420_cmdb_modes[2]; ++ u64 y420_cmdb_map; ++ u8 y420_dc_modes; ++}; ++ ++enum drm_link_status { ++ DRM_LINK_STATUS_GOOD = 0, ++ DRM_LINK_STATUS_BAD = 1, ++}; ++ ++struct drm_display_info { ++ char name[32]; ++ unsigned int width_mm; ++ unsigned int height_mm; ++ unsigned int pixel_clock; ++ unsigned int bpc; ++ enum subpixel_order subpixel_order; ++ int panel_orientation; ++ u32 color_formats; ++ const u32 *bus_formats; ++ unsigned int num_bus_formats; ++ u32 bus_flags; ++ int max_tmds_clock; ++ bool dvi_dual; ++ bool has_hdmi_infoframe; ++ u8 edid_hdmi_dc_modes; ++ u8 cea_rev; ++ struct drm_hdmi_info hdmi; ++ bool non_desktop; ++}; ++ ++struct drm_tv_connector_state { ++ enum drm_mode_subconnector subconnector; ++ struct { ++ unsigned int left; ++ unsigned int right; ++ unsigned int top; ++ unsigned int bottom; ++ } margins; ++ unsigned int mode; ++ unsigned int brightness; ++ unsigned int contrast; ++ unsigned int flicker_reduction; ++ unsigned int overscan; ++ unsigned int saturation; ++ unsigned int hue; ++}; ++ ++struct drm_connector; ++ ++struct drm_crtc; ++ ++struct drm_encoder; ++ ++struct drm_crtc_commit; ++ ++struct drm_writeback_job; ++ ++struct drm_connector_state { ++ struct drm_connector *connector; ++ struct drm_crtc *crtc; ++ struct drm_encoder *best_encoder; ++ enum drm_link_status link_status; ++ struct drm_atomic_state *state; ++ struct drm_crtc_commit *commit; ++ struct drm_tv_connector_state tv; ++ enum hdmi_picture_aspect picture_aspect_ratio; ++ unsigned int content_type; ++ unsigned int scaling_mode; ++ unsigned int content_protection; ++ struct drm_writeback_job *writeback_job; ++}; ++ ++struct drm_cmdline_mode { ++ bool specified; ++ bool refresh_specified; ++ bool bpp_specified; ++ int xres; ++ int yres; ++ int bpp; ++ int refresh; ++ bool rb; ++ bool interlace; ++ bool cvt; ++ bool margins; ++ enum drm_connector_force force; ++}; ++ ++struct drm_connector_funcs; ++ ++struct drm_property_blob; ++ ++struct drm_connector_helper_funcs; ++ ++struct drm_tile_group; ++ ++struct drm_connector { ++ struct drm_device *dev; ++ struct device *kdev; ++ struct device_attribute *attr; ++ struct list_head head; ++ struct drm_mode_object base; ++ char *name; ++ struct mutex mutex; ++ unsigned int index; ++ int connector_type; ++ int connector_type_id; ++ bool interlace_allowed; ++ bool doublescan_allowed; ++ bool stereo_allowed; ++ bool ycbcr_420_allowed; ++ bool registered; ++ struct list_head modes; ++ enum drm_connector_status status; ++ struct list_head probed_modes; ++ struct drm_display_info display_info; ++ const struct drm_connector_funcs *funcs; ++ struct drm_property_blob *edid_blob_ptr; ++ struct drm_object_properties properties; ++ struct drm_property *scaling_mode_property; ++ struct drm_property *content_protection_property; ++ struct drm_property_blob *path_blob_ptr; ++ uint8_t polled; ++ int dpms; ++ const struct drm_connector_helper_funcs *helper_private; ++ struct drm_cmdline_mode cmdline_mode; ++ enum drm_connector_force force; ++ bool override_edid; ++ uint32_t encoder_ids[3]; ++ struct drm_encoder *encoder; ++ uint8_t eld[128]; ++ bool latency_present[2]; ++ int video_latency[2]; ++ int audio_latency[2]; ++ int null_edid_counter; ++ unsigned int bad_edid_counter; ++ bool edid_corrupt; ++ struct dentry *debugfs_entry; ++ struct drm_connector_state *state; ++ struct drm_property_blob *tile_blob_ptr; ++ bool has_tile; ++ struct drm_tile_group *tile_group; ++ bool tile_is_single_monitor; ++ uint8_t num_h_tile; ++ uint8_t num_v_tile; ++ uint8_t tile_h_loc; ++ uint8_t tile_v_loc; ++ uint16_t tile_h_size; ++ uint16_t tile_v_size; ++ struct llist_node free_node; ++}; ++ ++enum drm_mode_status { ++ MODE_OK = 0, ++ MODE_HSYNC = 1, ++ MODE_VSYNC = 2, ++ MODE_H_ILLEGAL = 3, ++ MODE_V_ILLEGAL = 4, ++ MODE_BAD_WIDTH = 5, ++ MODE_NOMODE = 6, ++ MODE_NO_INTERLACE = 7, ++ MODE_NO_DBLESCAN = 8, ++ MODE_NO_VSCAN = 9, ++ MODE_MEM = 10, ++ MODE_VIRTUAL_X = 11, ++ MODE_VIRTUAL_Y = 12, ++ MODE_MEM_VIRT = 13, ++ MODE_NOCLOCK = 14, ++ MODE_CLOCK_HIGH = 15, ++ MODE_CLOCK_LOW = 16, ++ MODE_CLOCK_RANGE = 17, ++ MODE_BAD_HVALUE = 18, ++ MODE_BAD_VVALUE = 19, ++ MODE_BAD_VSCAN = 20, ++ MODE_HSYNC_NARROW = 21, ++ MODE_HSYNC_WIDE = 22, ++ MODE_HBLANK_NARROW = 23, ++ MODE_HBLANK_WIDE = 24, ++ MODE_VSYNC_NARROW = 25, ++ MODE_VSYNC_WIDE = 26, ++ MODE_VBLANK_NARROW = 27, ++ MODE_VBLANK_WIDE = 28, ++ MODE_PANEL = 29, ++ MODE_INTERLACE_WIDTH = 30, ++ MODE_ONE_WIDTH = 31, ++ MODE_ONE_HEIGHT = 32, ++ MODE_ONE_SIZE = 33, ++ MODE_NO_REDUCED = 34, ++ MODE_NO_STEREO = 35, ++ MODE_NO_420 = 36, ++ MODE_STALE = 4294967293, ++ MODE_BAD = 4294967294, ++ MODE_ERROR = 4294967295, ++}; ++ ++struct drm_display_mode { ++ struct list_head head; ++ struct drm_mode_object base; ++ char name[32]; ++ enum drm_mode_status status; ++ unsigned int type; ++ int clock; ++ int hdisplay; ++ int hsync_start; ++ int hsync_end; ++ int htotal; ++ int hskew; ++ int vdisplay; ++ int vsync_start; ++ int vsync_end; ++ int vtotal; ++ int vscan; ++ unsigned int flags; ++ int width_mm; ++ int height_mm; ++ int crtc_clock; ++ int crtc_hdisplay; ++ int crtc_hblank_start; ++ int crtc_hblank_end; ++ int crtc_hsync_start; ++ int crtc_hsync_end; ++ int crtc_htotal; ++ int crtc_hskew; ++ int crtc_vdisplay; ++ int crtc_vblank_start; ++ int crtc_vblank_end; ++ int crtc_vsync_start; ++ int crtc_vsync_end; ++ int crtc_vtotal; ++ int *private; ++ int private_flags; ++ int vrefresh; ++ int hsync; ++ enum hdmi_picture_aspect picture_aspect_ratio; ++ struct list_head export_head; ++}; ++ ++struct drm_crtc_crc_entry; ++ ++struct drm_crtc_crc { ++ spinlock_t lock; ++ const char *source; ++ bool opened; ++ bool overflow; ++ struct drm_crtc_crc_entry *entries; ++ int head; ++ int tail; ++ size_t values_cnt; ++ wait_queue_head_t wq; ++}; ++ ++struct drm_plane; ++ ++struct drm_crtc_funcs; ++ ++struct drm_crtc_helper_funcs; ++ ++struct drm_crtc_state; ++ ++struct drm_crtc { ++ struct drm_device *dev; ++ struct device_node *port; ++ struct list_head head; ++ char *name; ++ struct drm_modeset_lock mutex; ++ struct drm_mode_object base; ++ struct drm_plane *primary; ++ struct drm_plane *cursor; ++ unsigned int index; ++ int cursor_x; ++ int cursor_y; ++ bool enabled; ++ struct drm_display_mode mode; ++ struct drm_display_mode hwmode; ++ int x; ++ int y; ++ const struct drm_crtc_funcs *funcs; ++ uint32_t gamma_size; ++ uint16_t *gamma_store; ++ const struct drm_crtc_helper_funcs *helper_private; ++ struct drm_object_properties properties; ++ struct drm_crtc_state *state; ++ struct list_head commit_list; ++ spinlock_t commit_lock; ++ struct dentry *debugfs_entry; ++ struct drm_crtc_crc crc; ++ unsigned int fence_context; ++ spinlock_t fence_lock; ++ long unsigned int fence_seqno; ++ char timeline_name[32]; ++}; ++ ++struct drm_bridge; ++ ++struct drm_encoder_funcs; ++ ++struct drm_encoder_helper_funcs; ++ ++struct drm_encoder { ++ struct drm_device *dev; ++ struct list_head head; ++ struct drm_mode_object base; ++ char *name; ++ int encoder_type; ++ unsigned int index; ++ uint32_t possible_crtcs; ++ uint32_t possible_clones; ++ struct drm_crtc *crtc; ++ struct drm_bridge *bridge; ++ const struct drm_encoder_funcs *funcs; ++ const struct drm_encoder_helper_funcs *helper_private; ++}; ++ ++struct __drm_planes_state; ++ ++struct __drm_crtcs_state; ++ ++struct __drm_connnectors_state; ++ ++struct __drm_private_objs_state; ++ ++struct drm_atomic_state { ++ struct kref ref; ++ struct drm_device *dev; ++ bool allow_modeset: 1; ++ bool legacy_cursor_update: 1; ++ bool async_update: 1; ++ struct __drm_planes_state *planes; ++ struct __drm_crtcs_state *crtcs; ++ int num_connector; ++ struct __drm_connnectors_state *connectors; ++ int num_private_objs; ++ struct __drm_private_objs_state *private_objs; ++ struct drm_modeset_acquire_ctx *acquire_ctx; ++ struct drm_crtc_commit *fake_commit; ++ struct work_struct commit_work; ++}; ++ ++struct drm_pending_vblank_event; ++ ++struct drm_crtc_commit { ++ struct drm_crtc *crtc; ++ struct kref ref; ++ struct completion flip_done; ++ struct completion hw_done; ++ struct completion cleanup_done; ++ struct list_head commit_entry; ++ struct drm_pending_vblank_event *event; ++ bool abort_completion; ++}; ++ ++struct drm_printer; ++ ++struct drm_connector_funcs { ++ int (*dpms)(struct drm_connector *, int); ++ void (*reset)(struct drm_connector *); ++ enum drm_connector_status (*detect)(struct drm_connector *, bool); ++ void (*force)(struct drm_connector *); ++ int (*fill_modes)(struct drm_connector *, uint32_t, uint32_t); ++ int (*set_property)(struct drm_connector *, struct drm_property *, uint64_t); ++ int (*late_register)(struct drm_connector *); ++ void (*early_unregister)(struct drm_connector *); ++ void (*destroy)(struct drm_connector *); ++ struct drm_connector_state * (*atomic_duplicate_state)(struct drm_connector *); ++ void (*atomic_destroy_state)(struct drm_connector *, struct drm_connector_state *); ++ int (*atomic_set_property)(struct drm_connector *, struct drm_connector_state *, struct drm_property *, uint64_t); ++ int (*atomic_get_property)(struct drm_connector *, const struct drm_connector_state *, struct drm_property *, uint64_t *); ++ void (*atomic_print_state)(struct drm_printer *, const struct drm_connector_state *); ++}; ++ ++struct drm_printer { ++ void (*printfn)(struct drm_printer *, struct va_format *); ++ void (*puts)(struct drm_printer *, const char *); ++ void *arg; ++ const char *prefix; ++}; ++ ++struct drm_property_blob { ++ struct drm_mode_object base; ++ struct drm_device *dev; ++ struct list_head head_global; ++ struct list_head head_file; ++ size_t length; ++ void *data; ++}; ++ ++struct drm_connector_helper_funcs { ++ int (*get_modes)(struct drm_connector *); ++ int (*detect_ctx)(struct drm_connector *, struct drm_modeset_acquire_ctx *, bool); ++ enum drm_mode_status (*mode_valid)(struct drm_connector *, struct drm_display_mode *); ++ struct drm_encoder * (*best_encoder)(struct drm_connector *); ++ struct drm_encoder * (*atomic_best_encoder)(struct drm_connector *, struct drm_connector_state *); ++ int (*atomic_check)(struct drm_connector *, struct drm_connector_state *); ++ void (*atomic_commit)(struct drm_connector *, struct drm_connector_state *); ++}; ++ ++struct drm_tile_group { ++ struct kref refcount; ++ struct drm_device *dev; ++ int id; ++ u8 group_data[8]; ++}; ++ ++struct drm_connector_list_iter { ++ struct drm_device *dev; ++ struct drm_connector *conn; ++}; ++ ++struct drm_bridge_funcs { ++ int (*attach)(struct drm_bridge *); ++ void (*detach)(struct drm_bridge *); ++ enum drm_mode_status (*mode_valid)(struct drm_bridge *, const struct drm_display_mode *); ++ bool (*mode_fixup)(struct drm_bridge *, const struct drm_display_mode *, struct drm_display_mode *); ++ void (*disable)(struct drm_bridge *); ++ void (*post_disable)(struct drm_bridge *); ++ void (*mode_set)(struct drm_bridge *, struct drm_display_mode *, struct drm_display_mode *); ++ void (*pre_enable)(struct drm_bridge *); ++ void (*enable)(struct drm_bridge *); ++}; ++ ++struct drm_bridge_timings; ++ ++struct drm_bridge { ++ struct drm_device *dev; ++ struct drm_encoder *encoder; ++ struct drm_bridge *next; ++ struct device_node *of_node; ++ struct list_head list; ++ const struct drm_bridge_timings *timings; ++ const struct drm_bridge_funcs *funcs; ++ void *driver_private; ++}; ++ ++struct drm_bridge_timings { ++ u32 sampling_edge; ++ u32 setup_time_ps; ++ u32 hold_time_ps; ++}; ++ ++enum drm_color_encoding { ++ DRM_COLOR_YCBCR_BT601 = 0, ++ DRM_COLOR_YCBCR_BT709 = 1, ++ DRM_COLOR_YCBCR_BT2020 = 2, ++ DRM_COLOR_ENCODING_MAX = 3, ++}; ++ ++enum drm_color_range { ++ DRM_COLOR_YCBCR_LIMITED_RANGE = 0, ++ DRM_COLOR_YCBCR_FULL_RANGE = 1, ++ DRM_COLOR_RANGE_MAX = 2, ++}; ++ ++struct drm_plane_state { ++ struct drm_plane *plane; ++ struct drm_crtc *crtc; ++ struct drm_framebuffer *fb; ++ struct dma_fence *fence; ++ int32_t crtc_x; ++ int32_t crtc_y; ++ uint32_t crtc_w; ++ uint32_t crtc_h; ++ uint32_t src_x; ++ uint32_t src_y; ++ uint32_t src_h; ++ uint32_t src_w; ++ u16 alpha; ++ unsigned int rotation; ++ unsigned int zpos; ++ unsigned int normalized_zpos; ++ enum drm_color_encoding color_encoding; ++ enum drm_color_range color_range; ++ struct drm_rect src; ++ struct drm_rect dst; ++ bool visible; ++ struct drm_crtc_commit *commit; ++ struct drm_atomic_state *state; ++}; ++ ++enum drm_plane_type { ++ DRM_PLANE_TYPE_OVERLAY = 0, ++ DRM_PLANE_TYPE_PRIMARY = 1, ++ DRM_PLANE_TYPE_CURSOR = 2, ++}; ++ ++struct drm_plane_funcs; ++ ++struct drm_plane_helper_funcs; ++ ++struct drm_plane { ++ struct drm_device *dev; ++ struct list_head head; ++ char *name; ++ struct drm_modeset_lock mutex; ++ struct drm_mode_object base; ++ uint32_t possible_crtcs; ++ uint32_t *format_types; ++ unsigned int format_count; ++ bool format_default; ++ uint64_t *modifiers; ++ unsigned int modifier_count; ++ struct drm_crtc *crtc; ++ struct drm_framebuffer *fb; ++ struct drm_framebuffer *old_fb; ++ const struct drm_plane_funcs *funcs; ++ struct drm_object_properties properties; ++ enum drm_plane_type type; ++ unsigned int index; ++ const struct drm_plane_helper_funcs *helper_private; ++ struct drm_plane_state *state; ++ struct drm_property *alpha_property; ++ struct drm_property *zpos_property; ++ struct drm_property *rotation_property; ++ struct drm_property *color_encoding_property; ++ struct drm_property *color_range_property; ++}; ++ ++struct drm_plane_funcs { ++ int (*update_plane)(struct drm_plane *, struct drm_crtc *, struct drm_framebuffer *, int, int, unsigned int, unsigned int, uint32_t, uint32_t, uint32_t, uint32_t, struct drm_modeset_acquire_ctx *); ++ int (*disable_plane)(struct drm_plane *, struct drm_modeset_acquire_ctx *); ++ void (*destroy)(struct drm_plane *); ++ void (*reset)(struct drm_plane *); ++ int (*set_property)(struct drm_plane *, struct drm_property *, uint64_t); ++ struct drm_plane_state * (*atomic_duplicate_state)(struct drm_plane *); ++ void (*atomic_destroy_state)(struct drm_plane *, struct drm_plane_state *); ++ int (*atomic_set_property)(struct drm_plane *, struct drm_plane_state *, struct drm_property *, uint64_t); ++ int (*atomic_get_property)(struct drm_plane *, const struct drm_plane_state *, struct drm_property *, uint64_t *); ++ int (*late_register)(struct drm_plane *); ++ void (*early_unregister)(struct drm_plane *); ++ void (*atomic_print_state)(struct drm_printer *, const struct drm_plane_state *); ++ bool (*format_mod_supported)(struct drm_plane *, uint32_t, uint64_t); ++}; ++ ++struct drm_plane_helper_funcs { ++ int (*prepare_fb)(struct drm_plane *, struct drm_plane_state *); ++ void (*cleanup_fb)(struct drm_plane *, struct drm_plane_state *); ++ int (*atomic_check)(struct drm_plane *, struct drm_plane_state *); ++ void (*atomic_update)(struct drm_plane *, struct drm_plane_state *); ++ void (*atomic_disable)(struct drm_plane *, struct drm_plane_state *); ++ int (*atomic_async_check)(struct drm_plane *, struct drm_plane_state *); ++ void (*atomic_async_update)(struct drm_plane *, struct drm_plane_state *); ++}; ++ ++struct drm_crtc_crc_entry { ++ bool has_frame_counter; ++ uint32_t frame; ++ uint32_t crcs[10]; ++}; ++ ++struct drm_mode_config_funcs { ++ struct drm_framebuffer * (*fb_create)(struct drm_device *, struct drm_file *, const struct drm_mode_fb_cmd2 *); ++ const struct drm_format_info * (*get_format_info)(const struct drm_mode_fb_cmd2 *); ++ void (*output_poll_changed)(struct drm_device *); ++ enum drm_mode_status (*mode_valid)(struct drm_device *, const struct drm_display_mode *); ++ int (*atomic_check)(struct drm_device *, struct drm_atomic_state *); ++ int (*atomic_commit)(struct drm_device *, struct drm_atomic_state *, bool); ++ struct drm_atomic_state * (*atomic_state_alloc)(struct drm_device *); ++ void (*atomic_state_clear)(struct drm_atomic_state *); ++ void (*atomic_state_free)(struct drm_atomic_state *); ++}; ++ ++struct drm_mode_config_helper_funcs { ++ void (*atomic_commit_tail)(struct drm_atomic_state *); ++}; ++ ++struct drm_crtc_state { ++ struct drm_crtc *crtc; ++ bool enable; ++ bool active; ++ bool planes_changed: 1; ++ bool mode_changed: 1; ++ bool active_changed: 1; ++ bool connectors_changed: 1; ++ bool zpos_changed: 1; ++ bool color_mgmt_changed: 1; ++ bool no_vblank: 1; ++ u32 plane_mask; ++ u32 connector_mask; ++ u32 encoder_mask; ++ struct drm_display_mode adjusted_mode; ++ struct drm_display_mode mode; ++ struct drm_property_blob *mode_blob; ++ struct drm_property_blob *degamma_lut; ++ struct drm_property_blob *ctm; ++ struct drm_property_blob *gamma_lut; ++ u32 target_vblank; ++ u32 pageflip_flags; ++ struct drm_pending_vblank_event *event; ++ struct drm_crtc_commit *commit; ++ struct drm_atomic_state *state; ++}; ++ ++struct drm_pending_event { ++ struct completion *completion; ++ void (*completion_release)(struct completion *); ++ struct drm_event *event; ++ struct dma_fence *fence; ++ struct drm_file *file_priv; ++ struct list_head link; ++ struct list_head pending_link; ++}; ++ ++struct drm_pending_vblank_event { ++ struct drm_pending_event base; ++ unsigned int pipe; ++ u64 sequence; ++ union { ++ struct drm_event base; ++ struct drm_event_vblank vbl; ++ struct drm_event_crtc_sequence seq; ++ } event; ++}; ++ ++struct drm_mode_set; ++ ++struct drm_crtc_funcs { ++ void (*reset)(struct drm_crtc *); ++ int (*cursor_set)(struct drm_crtc *, struct drm_file *, uint32_t, uint32_t, uint32_t); ++ int (*cursor_set2)(struct drm_crtc *, struct drm_file *, uint32_t, uint32_t, uint32_t, int32_t, int32_t); ++ int (*cursor_move)(struct drm_crtc *, int, int); ++ int (*gamma_set)(struct drm_crtc *, u16 *, u16 *, u16 *, uint32_t, struct drm_modeset_acquire_ctx *); ++ void (*destroy)(struct drm_crtc *); ++ int (*set_config)(struct drm_mode_set *, struct drm_modeset_acquire_ctx *); ++ int (*page_flip)(struct drm_crtc *, struct drm_framebuffer *, struct drm_pending_vblank_event *, uint32_t, struct drm_modeset_acquire_ctx *); ++ int (*page_flip_target)(struct drm_crtc *, struct drm_framebuffer *, struct drm_pending_vblank_event *, uint32_t, uint32_t, struct drm_modeset_acquire_ctx *); ++ int (*set_property)(struct drm_crtc *, struct drm_property *, uint64_t); ++ struct drm_crtc_state * (*atomic_duplicate_state)(struct drm_crtc *); ++ void (*atomic_destroy_state)(struct drm_crtc *, struct drm_crtc_state *); ++ int (*atomic_set_property)(struct drm_crtc *, struct drm_crtc_state *, struct drm_property *, uint64_t); ++ int (*atomic_get_property)(struct drm_crtc *, const struct drm_crtc_state *, struct drm_property *, uint64_t *); ++ int (*late_register)(struct drm_crtc *); ++ void (*early_unregister)(struct drm_crtc *); ++ int (*set_crc_source)(struct drm_crtc *, const char *, size_t *); ++ void (*atomic_print_state)(struct drm_printer *, const struct drm_crtc_state *); ++ u32 (*get_vblank_counter)(struct drm_crtc *); ++ int (*enable_vblank)(struct drm_crtc *); ++ void (*disable_vblank)(struct drm_crtc *); ++}; ++ ++struct drm_mode_set { ++ struct drm_framebuffer *fb; ++ struct drm_crtc *crtc; ++ struct drm_display_mode *mode; ++ uint32_t x; ++ uint32_t y; ++ struct drm_connector **connectors; ++ size_t num_connectors; ++}; ++ ++enum mode_set_atomic { ++ LEAVE_ATOMIC_MODE_SET = 0, ++ ENTER_ATOMIC_MODE_SET = 1, ++}; ++ ++struct drm_crtc_helper_funcs { ++ void (*dpms)(struct drm_crtc *, int); ++ void (*prepare)(struct drm_crtc *); ++ void (*commit)(struct drm_crtc *); ++ enum drm_mode_status (*mode_valid)(struct drm_crtc *, const struct drm_display_mode *); ++ bool (*mode_fixup)(struct drm_crtc *, const struct drm_display_mode *, struct drm_display_mode *); ++ int (*mode_set)(struct drm_crtc *, struct drm_display_mode *, struct drm_display_mode *, int, int, struct drm_framebuffer *); ++ void (*mode_set_nofb)(struct drm_crtc *); ++ int (*mode_set_base)(struct drm_crtc *, int, int, struct drm_framebuffer *); ++ int (*mode_set_base_atomic)(struct drm_crtc *, struct drm_framebuffer *, int, int, enum mode_set_atomic); ++ void (*disable)(struct drm_crtc *); ++ int (*atomic_check)(struct drm_crtc *, struct drm_crtc_state *); ++ void (*atomic_begin)(struct drm_crtc *, struct drm_crtc_state *); ++ void (*atomic_flush)(struct drm_crtc *, struct drm_crtc_state *); ++ void (*atomic_enable)(struct drm_crtc *, struct drm_crtc_state *); ++ void (*atomic_disable)(struct drm_crtc *, struct drm_crtc_state *); ++}; ++ ++struct reservation_object; ++ ++struct drm_ioctl_desc; ++ ++struct drm_driver { ++ int (*load)(struct drm_device *, long unsigned int); ++ int (*open)(struct drm_device *, struct drm_file *); ++ void (*postclose)(struct drm_device *, struct drm_file *); ++ void (*lastclose)(struct drm_device *); ++ void (*unload)(struct drm_device *); ++ void (*release)(struct drm_device *); ++ u32 (*get_vblank_counter)(struct drm_device *, unsigned int); ++ int (*enable_vblank)(struct drm_device *, unsigned int); ++ void (*disable_vblank)(struct drm_device *, unsigned int); ++ bool (*get_scanout_position)(struct drm_device *, unsigned int, bool, int *, int *, ktime_t *, ktime_t *, const struct drm_display_mode *); ++ bool (*get_vblank_timestamp)(struct drm_device *, unsigned int, int *, ktime_t *, bool); ++ irqreturn_t (*irq_handler)(int, void *); ++ void (*irq_preinstall)(struct drm_device *); ++ int (*irq_postinstall)(struct drm_device *); ++ void (*irq_uninstall)(struct drm_device *); ++ int (*master_create)(struct drm_device *, struct drm_master *); ++ void (*master_destroy)(struct drm_device *, struct drm_master *); ++ int (*master_set)(struct drm_device *, struct drm_file *, bool); ++ void (*master_drop)(struct drm_device *, struct drm_file *); ++ int (*debugfs_init)(struct drm_minor *); ++ void (*gem_free_object)(struct drm_gem_object *); ++ void (*gem_free_object_unlocked)(struct drm_gem_object *); ++ int (*gem_open_object)(struct drm_gem_object *, struct drm_file *); ++ void (*gem_close_object)(struct drm_gem_object *, struct drm_file *); ++ void (*gem_print_info)(struct drm_printer *, unsigned int, const struct drm_gem_object *); ++ struct drm_gem_object * (*gem_create_object)(struct drm_device *, size_t); ++ int (*prime_handle_to_fd)(struct drm_device *, struct drm_file *, uint32_t, uint32_t, int *); ++ int (*prime_fd_to_handle)(struct drm_device *, struct drm_file *, int, uint32_t *); ++ struct dma_buf * (*gem_prime_export)(struct drm_device *, struct drm_gem_object *, int); ++ struct drm_gem_object * (*gem_prime_import)(struct drm_device *, struct dma_buf *); ++ int (*gem_prime_pin)(struct drm_gem_object *); ++ void (*gem_prime_unpin)(struct drm_gem_object *); ++ struct reservation_object * (*gem_prime_res_obj)(struct drm_gem_object *); ++ struct sg_table * (*gem_prime_get_sg_table)(struct drm_gem_object *); ++ struct drm_gem_object * (*gem_prime_import_sg_table)(struct drm_device *, struct dma_buf_attachment *, struct sg_table *); ++ void * (*gem_prime_vmap)(struct drm_gem_object *); ++ void (*gem_prime_vunmap)(struct drm_gem_object *, void *); ++ int (*gem_prime_mmap)(struct drm_gem_object *, struct vm_area_struct *); ++ int (*dumb_create)(struct drm_file *, struct drm_device *, struct drm_mode_create_dumb *); ++ int (*dumb_map_offset)(struct drm_file *, struct drm_device *, uint32_t, uint64_t *); ++ int (*dumb_destroy)(struct drm_file *, struct drm_device *, uint32_t); ++ const struct vm_operations_struct *gem_vm_ops; ++ int major; ++ int minor; ++ int patchlevel; ++ char *name; ++ char *desc; ++ char *date; ++ u32 driver_features; ++ const struct drm_ioctl_desc *ioctls; ++ int num_ioctls; ++ const struct file_operations *fops; ++ struct list_head legacy_dev_list; ++ int (*firstopen)(struct drm_device *); ++ void (*preclose)(struct drm_device *, struct drm_file *); ++ int (*dma_ioctl)(struct drm_device *, void *, struct drm_file *); ++ int (*dma_quiescent)(struct drm_device *); ++ int (*context_dtor)(struct drm_device *, int); ++ int dev_priv_size; ++}; ++ ++struct drm_minor { ++ int index; ++ int type; ++ struct device *kdev; ++ struct drm_device *dev; ++ struct dentry *debugfs_root; ++ struct list_head debugfs_list; ++ struct mutex debugfs_lock; ++}; ++ ++struct drm_vblank_crtc { ++ struct drm_device *dev; ++ wait_queue_head_t queue; ++ struct timer_list disable_timer; ++ seqlock_t seqlock; ++ u64 count; ++ ktime_t time; ++ atomic_t refcount; ++ u32 last; ++ u32 max_vblank_count; ++ unsigned int inmodeset; ++ unsigned int pipe; ++ int framedur_ns; ++ int linedur_ns; ++ struct drm_display_mode hwmode; ++ bool enabled; ++}; ++ ++struct drm_client_funcs; ++ ++struct drm_client_dev { ++ struct drm_device *dev; ++ const char *name; ++ struct list_head list; ++ const struct drm_client_funcs *funcs; ++ struct drm_file *file; ++}; ++ ++struct drm_client_buffer; ++ ++struct drm_fb_helper_crtc; ++ ++struct drm_fb_helper_connector; ++ ++struct drm_fb_helper_funcs; ++ ++struct drm_fb_helper { ++ struct drm_client_dev client; ++ struct drm_client_buffer *buffer; ++ struct drm_framebuffer *fb; ++ struct drm_device *dev; ++ int crtc_count; ++ struct drm_fb_helper_crtc *crtc_info; ++ int connector_count; ++ int connector_info_alloc_count; ++ int sw_rotations; ++ struct drm_fb_helper_connector **connector_info; ++ const struct drm_fb_helper_funcs *funcs; ++ struct fb_info *fbdev; ++ u32 pseudo_palette[17]; ++ struct drm_clip_rect dirty_clip; ++ spinlock_t dirty_lock; ++ struct work_struct dirty_work; ++ struct work_struct resume_work; ++ struct mutex lock; ++ struct list_head kernel_fb_list; ++ bool delayed_hotplug; ++ bool deferred_setup; ++ int preferred_bpp; ++}; ++ ++enum drm_ioctl_flags { ++ DRM_AUTH = 1, ++ DRM_MASTER = 2, ++ DRM_ROOT_ONLY = 4, ++ DRM_UNLOCKED = 16, ++ DRM_RENDER_ALLOW = 32, ++}; ++ ++typedef int drm_ioctl_t(struct drm_device *, void *, struct drm_file *); ++ ++struct drm_ioctl_desc { ++ unsigned int cmd; ++ enum drm_ioctl_flags flags; ++ drm_ioctl_t *func; ++ const char *name; ++}; ++ ++struct __drm_planes_state { ++ struct drm_plane *ptr; ++ struct drm_plane_state *state; ++ struct drm_plane_state *old_state; ++ struct drm_plane_state *new_state; ++}; ++ ++struct __drm_crtcs_state { ++ struct drm_crtc *ptr; ++ struct drm_crtc_state *state; ++ struct drm_crtc_state *old_state; ++ struct drm_crtc_state *new_state; ++ struct drm_crtc_commit *commit; ++ s32 *out_fence_ptr; ++ u64 last_vblank_count; ++}; ++ ++struct __drm_connnectors_state { ++ struct drm_connector *ptr; ++ struct drm_connector_state *state; ++ struct drm_connector_state *old_state; ++ struct drm_connector_state *new_state; ++ s32 *out_fence_ptr; ++}; ++ ++struct drm_private_state; ++ ++struct drm_private_obj; ++ ++struct drm_private_state_funcs { ++ struct drm_private_state * (*atomic_duplicate_state)(struct drm_private_obj *); ++ void (*atomic_destroy_state)(struct drm_private_obj *, struct drm_private_state *); ++}; ++ ++struct drm_private_state { ++ struct drm_atomic_state *state; ++}; ++ ++struct drm_private_obj { ++ struct drm_private_state *state; ++ const struct drm_private_state_funcs *funcs; ++}; ++ ++struct __drm_private_objs_state { ++ struct drm_private_obj *ptr; ++ struct drm_private_state *state; ++ struct drm_private_state *old_state; ++ struct drm_private_state *new_state; ++}; ++ ++struct drm_encoder_funcs { ++ void (*reset)(struct drm_encoder *); ++ void (*destroy)(struct drm_encoder *); ++ int (*late_register)(struct drm_encoder *); ++ void (*early_unregister)(struct drm_encoder *); ++}; ++ ++struct drm_encoder_helper_funcs { ++ void (*dpms)(struct drm_encoder *, int); ++ enum drm_mode_status (*mode_valid)(struct drm_encoder *, const struct drm_display_mode *); ++ bool (*mode_fixup)(struct drm_encoder *, const struct drm_display_mode *, struct drm_display_mode *); ++ void (*prepare)(struct drm_encoder *); ++ void (*commit)(struct drm_encoder *); ++ void (*mode_set)(struct drm_encoder *, struct drm_display_mode *, struct drm_display_mode *); ++ void (*atomic_mode_set)(struct drm_encoder *, struct drm_crtc_state *, struct drm_connector_state *); ++ struct drm_crtc * (*get_crtc)(struct drm_encoder *); ++ enum drm_connector_status (*detect)(struct drm_encoder *, struct drm_connector *); ++ void (*disable)(struct drm_encoder *); ++ void (*enable)(struct drm_encoder *); ++ int (*atomic_check)(struct drm_encoder *, struct drm_crtc_state *, struct drm_connector_state *); ++}; ++ ++struct drm_client_funcs { ++ struct module *owner; ++ void (*unregister)(struct drm_client_dev *); ++ int (*restore)(struct drm_client_dev *); ++ int (*hotplug)(struct drm_client_dev *); ++}; ++ ++struct drm_client_buffer { ++ struct drm_client_dev *client; ++ u32 handle; ++ u32 pitch; ++ struct drm_gem_object *gem; ++ void *vaddr; ++ struct drm_framebuffer *fb; ++}; ++ ++struct drm_fb_offset { ++ int x; ++ int y; ++}; ++ ++struct drm_fb_helper_crtc { ++ struct drm_mode_set mode_set; ++ struct drm_display_mode *desired_mode; ++ int x; ++ int y; ++ int rotation; ++}; ++ ++struct drm_fb_helper_surface_size { ++ u32 fb_width; ++ u32 fb_height; ++ u32 surface_width; ++ u32 surface_height; ++ u32 surface_bpp; ++ u32 surface_depth; ++}; ++ ++struct drm_fb_helper_funcs { ++ int (*fb_probe)(struct drm_fb_helper *, struct drm_fb_helper_surface_size *); ++ bool (*initial_config)(struct drm_fb_helper *, struct drm_fb_helper_crtc **, struct drm_display_mode **, struct drm_fb_offset *, bool *, int, int); ++}; ++ ++struct drm_fb_helper_connector { ++ struct drm_connector *connector; ++}; ++ ++struct i2c_msg { ++ __u16 addr; ++ __u16 flags; ++ __u16 len; ++ __u8 *buf; ++}; ++ ++union i2c_smbus_data { ++ __u8 byte; ++ __u16 word; ++ __u8 block[34]; ++}; ++ ++enum i2c_slave_event { ++ I2C_SLAVE_READ_REQUESTED = 0, ++ I2C_SLAVE_WRITE_REQUESTED = 1, ++ I2C_SLAVE_READ_PROCESSED = 2, ++ I2C_SLAVE_WRITE_RECEIVED = 3, ++ I2C_SLAVE_STOP = 4, ++}; ++ ++struct i2c_client; ++ ++typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); ++ ++struct i2c_adapter; ++ ++struct i2c_client { ++ short unsigned int flags; ++ short unsigned int addr; ++ char name[20]; ++ struct i2c_adapter *adapter; ++ struct device dev; ++ int init_irq; ++ int irq; ++ struct list_head detected; ++ i2c_slave_cb_t slave_cb; ++}; ++ ++struct i2c_algorithm; ++ ++struct i2c_lock_operations; ++ ++struct i2c_bus_recovery_info; ++ ++struct i2c_adapter_quirks; ++ ++struct i2c_adapter { ++ struct module *owner; ++ unsigned int class; ++ const struct i2c_algorithm *algo; ++ void *algo_data; ++ const struct i2c_lock_operations *lock_ops; ++ struct rt_mutex bus_lock; ++ struct rt_mutex mux_lock; ++ int timeout; ++ int retries; ++ struct device dev; ++ int nr; ++ char name[48]; ++ struct completion dev_released; ++ struct mutex userspace_clients_lock; ++ struct list_head userspace_clients; ++ struct i2c_bus_recovery_info *bus_recovery_info; ++ const struct i2c_adapter_quirks *quirks; ++ struct irq_domain *host_notify_domain; ++}; ++ ++struct i2c_algorithm { ++ int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); ++ int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); ++ u32 (*functionality)(struct i2c_adapter *); ++ int (*reg_slave)(struct i2c_client *); ++ int (*unreg_slave)(struct i2c_client *); ++}; ++ ++struct i2c_lock_operations { ++ void (*lock_bus)(struct i2c_adapter *, unsigned int); ++ int (*trylock_bus)(struct i2c_adapter *, unsigned int); ++ void (*unlock_bus)(struct i2c_adapter *, unsigned int); ++}; ++ ++struct i2c_bus_recovery_info { ++ int (*recover_bus)(struct i2c_adapter *); ++ int (*get_scl)(struct i2c_adapter *); ++ void (*set_scl)(struct i2c_adapter *, int); ++ int (*get_sda)(struct i2c_adapter *); ++ void (*set_sda)(struct i2c_adapter *, int); ++ int (*get_bus_free)(struct i2c_adapter *); ++ void (*prepare_recovery)(struct i2c_adapter *); ++ void (*unprepare_recovery)(struct i2c_adapter *); ++ struct gpio_desc___2 *scl_gpiod; ++ struct gpio_desc___2 *sda_gpiod; ++}; ++ ++struct i2c_adapter_quirks { ++ u64 flags; ++ int max_num_msgs; ++ u16 max_write_len; ++ u16 max_read_len; ++ u16 max_comb_1st_msg_len; ++ u16 max_comb_2nd_msg_len; ++}; ++ ++struct drm_dp_aux_msg { ++ unsigned int address; ++ u8 request; ++ u8 reply; ++ void *buffer; ++ size_t size; ++}; ++ ++struct cec_adapter; ++ ++struct drm_dp_aux_cec { ++ struct mutex lock; ++ struct cec_adapter *adap; ++ const char *name; ++ struct device *parent; ++ struct delayed_work unregister_work; ++}; ++ ++struct drm_dp_aux { ++ const char *name; ++ struct i2c_adapter ddc; ++ struct device *dev; ++ struct drm_crtc *crtc; ++ struct mutex hw_mutex; ++ struct work_struct crc_work; ++ u8 crc_count; ++ ssize_t (*transfer)(struct drm_dp_aux *, struct drm_dp_aux_msg *); ++ unsigned int i2c_nack_count; ++ unsigned int i2c_defer_count; ++ struct drm_dp_aux_cec cec; ++}; ++ ++struct drm_dp_link { ++ unsigned char revision; ++ unsigned int rate; ++ unsigned int num_lanes; ++ long unsigned int capabilities; ++}; ++ ++struct drm_dp_dpcd_ident { ++ u8 oui[3]; ++ u8 device_id[6]; ++ u8 hw_rev; ++ u8 sw_major_rev; ++ u8 sw_minor_rev; ++}; ++ ++struct drm_dp_desc { ++ struct drm_dp_dpcd_ident ident; ++ u32 quirks; ++}; ++ ++enum drm_dp_quirk { ++ DP_DPCD_QUIRK_LIMITED_M_N = 0, ++}; ++ ++struct dpcd_quirk { ++ u8 oui[3]; ++ bool is_branch; ++ u32 quirks; ++}; ++ ++struct est_timings { ++ u8 t1; ++ u8 t2; ++ u8 mfg_rsvd; ++}; ++ ++struct std_timing { ++ u8 hsize; ++ u8 vfreq_aspect; ++}; ++ ++struct detailed_pixel_timing { ++ u8 hactive_lo; ++ u8 hblank_lo; ++ u8 hactive_hblank_hi; ++ u8 vactive_lo; ++ u8 vblank_lo; ++ u8 vactive_vblank_hi; ++ u8 hsync_offset_lo; ++ u8 hsync_pulse_width_lo; ++ u8 vsync_offset_pulse_width_lo; ++ u8 hsync_vsync_offset_pulse_width_hi; ++ u8 width_mm_lo; ++ u8 height_mm_lo; ++ u8 width_height_mm_hi; ++ u8 hborder; ++ u8 vborder; ++ u8 misc; ++}; ++ ++struct detailed_data_string { ++ u8 str[13]; ++}; ++ ++struct detailed_data_monitor_range { ++ u8 min_vfreq; ++ u8 max_vfreq; ++ u8 min_hfreq_khz; ++ u8 max_hfreq_khz; ++ u8 pixel_clock_mhz; ++ u8 flags; ++ union { ++ struct { ++ u8 reserved; ++ u8 hfreq_start_khz; ++ u8 c; ++ __le16 m; ++ u8 k; ++ u8 j; ++ } __attribute__((packed)) gtf2; ++ struct { ++ u8 version; ++ u8 data1; ++ u8 data2; ++ u8 supported_aspects; ++ u8 flags; ++ u8 supported_scalings; ++ u8 preferred_refresh; ++ } cvt; ++ } formula; ++} __attribute__((packed)); ++ ++struct detailed_data_wpindex { ++ u8 white_yx_lo; ++ u8 white_x_hi; ++ u8 white_y_hi; ++ u8 gamma; ++}; ++ ++struct cvt_timing { ++ u8 code[3]; ++}; ++ ++struct detailed_non_pixel { ++ u8 pad1; ++ u8 type; ++ u8 pad2; ++ union { ++ struct detailed_data_string str; ++ struct detailed_data_monitor_range range; ++ struct detailed_data_wpindex color; ++ struct std_timing timings[6]; ++ struct cvt_timing cvt[4]; ++ } data; ++} __attribute__((packed)); ++ ++struct detailed_timing { ++ __le16 pixel_clock; ++ union { ++ struct detailed_pixel_timing pixel_data; ++ struct detailed_non_pixel other_data; ++ } data; ++}; ++ ++struct edid { ++ u8 header[8]; ++ u8 mfg_id[2]; ++ u8 prod_code[2]; ++ u32 serial; ++ u8 mfg_week; ++ u8 mfg_year; ++ u8 version; ++ u8 revision; ++ u8 input; ++ u8 width_cm; ++ u8 height_cm; ++ u8 gamma; ++ u8 features; ++ u8 red_green_lo; ++ u8 black_white_lo; ++ u8 red_x; ++ u8 red_y; ++ u8 green_x; ++ u8 green_y; ++ u8 blue_x; ++ u8 blue_y; ++ u8 white_x; ++ u8 white_y; ++ struct est_timings established_timings; ++ struct std_timing standard_timings[8]; ++ struct detailed_timing detailed_timings[4]; ++ u8 extensions; ++ u8 checksum; ++}; ++ ++struct drm_dp_vcpi { ++ int vcpi; ++ int pbn; ++ int aligned_pbn; ++ int num_slots; ++}; ++ ++struct drm_dp_mst_branch; ++ ++struct drm_dp_mst_topology_mgr; ++ ++struct drm_dp_mst_port { ++ struct kref kref; ++ u8 port_num; ++ bool input; ++ bool mcs; ++ bool ddps; ++ u8 pdt; ++ bool ldps; ++ u8 dpcd_rev; ++ u8 num_sdp_streams; ++ u8 num_sdp_stream_sinks; ++ uint16_t available_pbn; ++ struct list_head next; ++ struct drm_dp_mst_branch *mstb; ++ struct drm_dp_aux aux; ++ struct drm_dp_mst_branch *parent; ++ struct drm_dp_vcpi vcpi; ++ struct drm_connector *connector; ++ struct drm_dp_mst_topology_mgr *mgr; ++ struct edid *cached_edid; ++ bool has_audio; ++}; ++ ++struct drm_dp_sideband_msg_tx; ++ ++struct drm_dp_mst_branch { ++ struct kref kref; ++ u8 rad[8]; ++ u8 lct; ++ int num_ports; ++ int msg_slots; ++ struct list_head ports; ++ struct drm_dp_mst_port *port_parent; ++ struct drm_dp_mst_topology_mgr *mgr; ++ struct drm_dp_sideband_msg_tx *tx_slots[2]; ++ int last_seqno; ++ bool link_address_sent; ++ u8 guid[16]; ++}; ++ ++struct drm_dp_sideband_msg_hdr { ++ u8 lct; ++ u8 lcr; ++ u8 rad[8]; ++ bool broadcast; ++ bool path_msg; ++ u8 msg_len; ++ bool somt; ++ bool eomt; ++ bool seqno; ++}; ++ ++struct drm_dp_sideband_msg_rx { ++ u8 chunk[48]; ++ u8 msg[256]; ++ u8 curchunk_len; ++ u8 curchunk_idx; ++ u8 curchunk_hdrlen; ++ u8 curlen; ++ bool have_somt; ++ bool have_eomt; ++ struct drm_dp_sideband_msg_hdr initial_hdr; ++}; ++ ++struct drm_dp_mst_topology_cbs; ++ ++struct drm_dp_mst_topology_state; ++ ++struct drm_dp_payload; ++ ++struct drm_dp_mst_topology_mgr { ++ struct drm_private_obj base; ++ struct drm_device *dev; ++ const struct drm_dp_mst_topology_cbs *cbs; ++ int max_dpcd_transaction_bytes; ++ struct drm_dp_aux *aux; ++ int max_payloads; ++ int conn_base_id; ++ struct drm_dp_sideband_msg_rx down_rep_recv; ++ struct drm_dp_sideband_msg_rx up_req_recv; ++ struct mutex lock; ++ bool mst_state; ++ struct drm_dp_mst_branch *mst_primary; ++ u8 dpcd[15]; ++ u8 sink_count; ++ int pbn_div; ++ struct drm_dp_mst_topology_state *state; ++ const struct drm_private_state_funcs *funcs; ++ struct mutex qlock; ++ struct list_head tx_msg_downq; ++ struct mutex payload_lock; ++ struct drm_dp_vcpi **proposed_vcpis; ++ struct drm_dp_payload *payloads; ++ long unsigned int payload_mask; ++ long unsigned int vcpi_mask; ++ wait_queue_head_t tx_waitq; ++ struct work_struct work; ++ struct work_struct tx_work; ++ struct list_head destroy_connector_list; ++ struct mutex destroy_connector_lock; ++ struct work_struct destroy_connector_work; ++}; ++ ++struct drm_dp_nak_reply { ++ u8 guid[16]; ++ u8 reason; ++ u8 nak_data; ++}; ++ ++struct drm_dp_link_addr_reply_port { ++ bool input_port; ++ u8 peer_device_type; ++ u8 port_number; ++ bool mcs; ++ bool ddps; ++ bool legacy_device_plug_status; ++ u8 dpcd_revision; ++ u8 peer_guid[16]; ++ u8 num_sdp_streams; ++ u8 num_sdp_stream_sinks; ++}; ++ ++struct drm_dp_link_address_ack_reply { ++ u8 guid[16]; ++ u8 nports; ++ struct drm_dp_link_addr_reply_port ports[16]; ++}; ++ ++struct drm_dp_port_number_rep { ++ u8 port_number; ++}; ++ ++struct drm_dp_enum_path_resources_ack_reply { ++ u8 port_number; ++ u16 full_payload_bw_number; ++ u16 avail_payload_bw_number; ++}; ++ ++struct drm_dp_allocate_payload_ack_reply { ++ u8 port_number; ++ u8 vcpi; ++ u16 allocated_pbn; ++}; ++ ++struct drm_dp_query_payload_ack_reply { ++ u8 port_number; ++ u16 allocated_pbn; ++}; ++ ++struct drm_dp_remote_dpcd_read_ack_reply { ++ u8 port_number; ++ u8 num_bytes; ++ u8 bytes[255]; ++}; ++ ++struct drm_dp_remote_dpcd_write_ack_reply { ++ u8 port_number; ++}; ++ ++struct drm_dp_remote_dpcd_write_nak_reply { ++ u8 port_number; ++ u8 reason; ++ u8 bytes_written_before_failure; ++}; ++ ++struct drm_dp_remote_i2c_read_ack_reply { ++ u8 port_number; ++ u8 num_bytes; ++ u8 bytes[255]; ++}; ++ ++struct drm_dp_remote_i2c_read_nak_reply { ++ u8 port_number; ++ u8 nak_reason; ++ u8 i2c_nak_transaction; ++}; ++ ++struct drm_dp_remote_i2c_write_ack_reply { ++ u8 port_number; ++}; ++ ++union ack_replies { ++ struct drm_dp_nak_reply nak; ++ struct drm_dp_link_address_ack_reply link_addr; ++ struct drm_dp_port_number_rep port_number; ++ struct drm_dp_enum_path_resources_ack_reply path_resources; ++ struct drm_dp_allocate_payload_ack_reply allocate_payload; ++ struct drm_dp_query_payload_ack_reply query_payload; ++ struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; ++ struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; ++ struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; ++ struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; ++ struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; ++ struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; ++}; ++ ++struct drm_dp_sideband_msg_reply_body { ++ u8 reply_type; ++ u8 req_type; ++ union ack_replies u; ++}; ++ ++struct drm_dp_sideband_msg_tx { ++ u8 msg[256]; ++ u8 chunk[48]; ++ u8 cur_offset; ++ u8 cur_len; ++ struct drm_dp_mst_branch *dst; ++ struct list_head next; ++ int seqno; ++ int state; ++ bool path_msg; ++ struct drm_dp_sideband_msg_reply_body reply; ++}; ++ ++struct drm_dp_allocate_payload { ++ u8 port_number; ++ u8 number_sdp_streams; ++ u8 vcpi; ++ u16 pbn; ++ u8 sdp_stream_sink[16]; ++}; ++ ++struct drm_dp_connection_status_notify { ++ u8 guid[16]; ++ u8 port_number; ++ bool legacy_device_plug_status; ++ bool displayport_device_plug_status; ++ bool message_capability_status; ++ bool input_port; ++ u8 peer_device_type; ++}; ++ ++struct drm_dp_remote_dpcd_read { ++ u8 port_number; ++ u32 dpcd_address; ++ u8 num_bytes; ++}; ++ ++struct drm_dp_remote_dpcd_write { ++ u8 port_number; ++ u32 dpcd_address; ++ u8 num_bytes; ++ u8 *bytes; ++}; ++ ++struct drm_dp_remote_i2c_read { ++ u8 num_transactions; ++ u8 port_number; ++ struct { ++ u8 i2c_dev_id; ++ u8 num_bytes; ++ u8 *bytes; ++ u8 no_stop_bit; ++ u8 i2c_transaction_delay; ++ } transactions[4]; ++ u8 read_i2c_device_id; ++ u8 num_bytes_read; ++}; ++ ++struct drm_dp_remote_i2c_write { ++ u8 port_number; ++ u8 write_i2c_device_id; ++ u8 num_bytes; ++ u8 *bytes; ++}; ++ ++struct drm_dp_port_number_req { ++ u8 port_number; ++}; ++ ++struct drm_dp_query_payload { ++ u8 port_number; ++ u8 vcpi; ++}; ++ ++struct drm_dp_resource_status_notify { ++ u8 port_number; ++ u8 guid[16]; ++ u16 available_pbn; ++}; ++ ++union ack_req { ++ struct drm_dp_connection_status_notify conn_stat; ++ struct drm_dp_port_number_req port_num; ++ struct drm_dp_resource_status_notify resource_stat; ++ struct drm_dp_query_payload query_payload; ++ struct drm_dp_allocate_payload allocate_payload; ++ struct drm_dp_remote_dpcd_read dpcd_read; ++ struct drm_dp_remote_dpcd_write dpcd_write; ++ struct drm_dp_remote_i2c_read i2c_read; ++ struct drm_dp_remote_i2c_write i2c_write; ++}; ++ ++struct drm_dp_sideband_msg_req_body { ++ u8 req_type; ++ union ack_req u; ++}; ++ ++struct drm_dp_mst_topology_cbs { ++ struct drm_connector * (*add_connector)(struct drm_dp_mst_topology_mgr *, struct drm_dp_mst_port *, const char *); ++ void (*register_connector)(struct drm_connector *); ++ void (*destroy_connector)(struct drm_dp_mst_topology_mgr *, struct drm_connector *); ++ void (*hotplug)(struct drm_dp_mst_topology_mgr *); ++}; ++ ++struct drm_dp_payload { ++ int payload_state; ++ int start_slot; ++ int num_slots; ++ int vcpi; ++}; ++ ++struct drm_dp_mst_topology_state { ++ struct drm_private_state base; ++ int avail_slots; ++ struct drm_atomic_state *state; ++ struct drm_dp_mst_topology_mgr *mgr; ++}; ++ ++struct drm_color_lut { ++ __u16 red; ++ __u16 green; ++ __u16 blue; ++ __u16 reserved; ++}; ++ ++struct drm_writeback_job { ++ struct work_struct cleanup_work; ++ struct list_head list_entry; ++ struct drm_framebuffer *fb; ++ struct dma_fence *out_fence; ++}; ++ ++enum drm_lspcon_mode { ++ DRM_LSPCON_MODE_INVALID = 0, ++ DRM_LSPCON_MODE_LS = 1, ++ DRM_LSPCON_MODE_PCON = 2, ++}; ++ ++enum drm_dp_dual_mode_type { ++ DRM_DP_DUAL_MODE_NONE = 0, ++ DRM_DP_DUAL_MODE_UNKNOWN = 1, ++ DRM_DP_DUAL_MODE_TYPE1_DVI = 2, ++ DRM_DP_DUAL_MODE_TYPE1_HDMI = 3, ++ DRM_DP_DUAL_MODE_TYPE2_DVI = 4, ++ DRM_DP_DUAL_MODE_TYPE2_HDMI = 5, ++ DRM_DP_DUAL_MODE_LSPCON = 6, ++}; ++ ++struct drm_simple_display_pipe; ++ ++struct drm_simple_display_pipe_funcs { ++ enum drm_mode_status (*mode_valid)(struct drm_crtc *, const struct drm_display_mode *); ++ void (*enable)(struct drm_simple_display_pipe *, struct drm_crtc_state *, struct drm_plane_state *); ++ void (*disable)(struct drm_simple_display_pipe *); ++ int (*check)(struct drm_simple_display_pipe *, struct drm_plane_state *, struct drm_crtc_state *); ++ void (*update)(struct drm_simple_display_pipe *, struct drm_plane_state *); ++ int (*prepare_fb)(struct drm_simple_display_pipe *, struct drm_plane_state *); ++ void (*cleanup_fb)(struct drm_simple_display_pipe *, struct drm_plane_state *); ++ int (*enable_vblank)(struct drm_simple_display_pipe *); ++ void (*disable_vblank)(struct drm_simple_display_pipe *); ++}; ++ ++struct drm_simple_display_pipe { ++ struct drm_crtc crtc; ++ struct drm_plane plane; ++ struct drm_encoder encoder; ++ struct drm_connector *connector; ++ const struct drm_simple_display_pipe_funcs *funcs; ++}; ++ ++struct dma_fence_cb; ++ ++typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); ++ ++struct dma_fence_cb { ++ struct list_head node; ++ dma_fence_func_t func; ++}; ++ ++struct dma_buf_ops { ++ int (*attach)(struct dma_buf *, struct dma_buf_attachment *); ++ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); ++ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); ++ void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); ++ void (*release)(struct dma_buf *); ++ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); ++ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); ++ void * (*map)(struct dma_buf *, long unsigned int); ++ void (*unmap)(struct dma_buf *, long unsigned int, void *); ++ int (*mmap)(struct dma_buf *, struct vm_area_struct *); ++ void * (*vmap)(struct dma_buf *); ++ void (*vunmap)(struct dma_buf *, void *); ++}; ++ ++struct dma_buf_poll_cb_t { ++ struct dma_fence_cb cb; ++ wait_queue_head_t *poll; ++ __poll_t active; ++}; ++ ++struct dma_buf { ++ size_t size; ++ struct file *file; ++ struct list_head attachments; ++ const struct dma_buf_ops *ops; ++ struct mutex lock; ++ unsigned int vmapping_counter; ++ void *vmap_ptr; ++ const char *exp_name; ++ struct module *owner; ++ struct list_head list_node; ++ void *priv; ++ struct reservation_object *resv; ++ wait_queue_head_t poll; ++ struct dma_buf_poll_cb_t cb_excl; ++ struct dma_buf_poll_cb_t cb_shared; ++}; ++ ++struct dma_buf_attachment { ++ struct dma_buf *dmabuf; ++ struct device *dev; ++ struct list_head node; ++ void *priv; ++}; ++ ++struct reservation_object_list; ++ ++struct reservation_object { ++ struct ww_mutex lock; ++ seqcount_t seq; ++ struct dma_fence *fence_excl; ++ struct reservation_object_list *fence; ++ struct reservation_object_list *staged; ++}; ++ ++struct ww_class { ++ atomic_long_t stamp; ++ struct lock_class_key acquire_key; ++ struct lock_class_key mutex_key; ++ const char *acquire_name; ++ const char *mutex_name; ++ unsigned int is_wait_die; ++}; ++ ++struct reservation_object_list { ++ struct callback_head rcu; ++ u32 shared_count; ++ u32 shared_max; ++ struct dma_fence *shared[0]; ++}; ++ ++struct drm_mm { ++ void (*color_adjust)(const struct drm_mm_node *, long unsigned int, u64 *, u64 *); ++ struct list_head hole_stack; ++ struct drm_mm_node head_node; ++ struct rb_root_cached interval_tree; ++ struct rb_root_cached holes_size; ++ struct rb_root holes_addr; ++ long unsigned int scan_active; ++}; ++ ++struct drm_vma_offset_manager { ++ rwlock_t vm_lock; ++ struct drm_mm vm_addr_space_mm; ++}; ++ ++struct display_timing___2; ++ ++struct drm_panel; ++ ++struct drm_panel_funcs { ++ int (*disable)(struct drm_panel *); ++ int (*unprepare)(struct drm_panel *); ++ int (*prepare)(struct drm_panel *); ++ int (*enable)(struct drm_panel *); ++ int (*get_modes)(struct drm_panel *); ++ int (*get_timings)(struct drm_panel *, unsigned int, struct display_timing___2 *); ++}; ++ ++struct drm_panel { ++ struct drm_device *drm; ++ struct drm_connector *connector; ++ struct device *dev; ++ const struct drm_panel_funcs *funcs; ++ struct list_head list; ++}; ++ ++struct panel_bridge { ++ struct drm_bridge bridge; ++ struct drm_connector connector; ++ struct drm_panel *panel; ++ u32 connector_type; ++}; ++ ++struct drm_dp_aux_dev { ++ unsigned int index; ++ struct drm_dp_aux *aux; ++ struct device *dev; ++ struct kref refcount; ++ atomic_t usecount; ++}; ++ ++enum drm_map_type { ++ _DRM_FRAME_BUFFER = 0, ++ _DRM_REGISTERS = 1, ++ _DRM_SHM = 2, ++ _DRM_AGP = 3, ++ _DRM_SCATTER_GATHER = 4, ++ _DRM_CONSISTENT = 5, ++}; ++ ++enum drm_map_flags { ++ _DRM_RESTRICTED = 1, ++ _DRM_READ_ONLY = 2, ++ _DRM_LOCKED = 4, ++ _DRM_KERNEL = 8, ++ _DRM_WRITE_COMBINING = 16, ++ _DRM_CONTAINS_LOCK = 32, ++ _DRM_REMOVABLE = 64, ++ _DRM_DRIVER = 128, ++}; ++ ++struct drm_auth { ++ drm_magic_t magic; ++}; ++ ++struct drm_lock_data { ++ struct drm_hw_lock *hw_lock; ++ struct drm_file *file_priv; ++ wait_queue_head_t lock_queue; ++ long unsigned int lock_time; ++ spinlock_t spinlock; ++ uint32_t kernel_waiters; ++ uint32_t user_waiters; ++ int idle_has_lock; ++}; ++ ++struct drm_master { ++ struct kref refcount; ++ struct drm_device *dev; ++ char *unique; ++ int unique_len; ++ struct idr magic_map; ++ struct drm_lock_data lock; ++ void *driver_priv; ++ struct drm_master *lessor; ++ int lessee_id; ++ struct list_head lessee_list; ++ struct list_head lessees; ++ struct idr leases; ++ struct idr lessee_idr; ++}; ++ ++struct drm_buf; ++ ++struct drm_dma_handle; ++ ++struct drm_buf_entry { ++ int buf_size; ++ int buf_count; ++ struct drm_buf *buflist; ++ int seg_count; ++ int page_order; ++ struct drm_dma_handle **seglist; ++ int low_mark; ++ int high_mark; ++}; ++ ++struct drm_device_dma { ++ struct drm_buf_entry bufs[23]; ++ int buf_count; ++ struct drm_buf **buflist; ++ int seg_count; ++ int page_count; ++ long unsigned int *pagelist; ++ long unsigned int byte_count; ++ enum { ++ _DRM_DMA_USE_AGP = 1, ++ _DRM_DMA_USE_SG = 2, ++ _DRM_DMA_USE_FB = 4, ++ _DRM_DMA_USE_PCI_RO = 8, ++ } flags; ++}; ++ ++struct drm_sg_mem { ++ long unsigned int handle; ++ void *virtual; ++ int pages; ++ struct page **pagelist; ++ dma_addr_t *busaddr; ++}; ++ ++struct drm_local_map { ++ resource_size_t offset; ++ long unsigned int size; ++ enum drm_map_type type; ++ enum drm_map_flags flags; ++ void *handle; ++ int mtrr; ++}; ++ ++struct drm_buf { ++ int idx; ++ int total; ++ int order; ++ int used; ++ long unsigned int offset; ++ void *address; ++ long unsigned int bus_address; ++ struct drm_buf *next; ++ volatile int waiting; ++ volatile int pending; ++ struct drm_file *file_priv; ++ int context; ++ int while_locked; ++ enum { ++ DRM_LIST_NONE = 0, ++ DRM_LIST_FREE = 1, ++ DRM_LIST_WAIT = 2, ++ DRM_LIST_PEND = 3, ++ DRM_LIST_PRIO = 4, ++ DRM_LIST_RECLAIM = 5, ++ } list; ++ int dev_priv_size; ++ void *dev_private; ++}; ++ ++struct drm_dma_handle { ++ dma_addr_t busaddr; ++ void *vaddr; ++ size_t size; ++}; ++ ++struct agp_memory { ++ struct agp_memory *next; ++ struct agp_memory *prev; ++ struct agp_bridge_data *bridge; ++ struct page **pages; ++ size_t page_count; ++ int key; ++ int num_scratch_pages; ++ off_t pg_start; ++ u32 type; ++ u32 physical; ++ bool is_bound; ++ bool is_flushed; ++ struct list_head mapped_list; ++ struct scatterlist *sg_list; ++ int num_sg; ++}; ++ ++struct drm_map { ++ long unsigned int offset; ++ long unsigned int size; ++ enum drm_map_type type; ++ enum drm_map_flags flags; ++ void *handle; ++ int mtrr; ++}; ++ ++struct drm_buf_desc { ++ int count; ++ int size; ++ int low_mark; ++ int high_mark; ++ enum { ++ _DRM_PAGE_ALIGN = 1, ++ _DRM_AGP_BUFFER = 2, ++ _DRM_SG_BUFFER = 4, ++ _DRM_FB_BUFFER = 8, ++ _DRM_PCI_BUFFER_RO = 16, ++ } flags; ++ long unsigned int agp_start; ++}; ++ ++struct drm_buf_info { ++ int count; ++ struct drm_buf_desc *list; ++}; ++ ++struct drm_buf_free { ++ int count; ++ int *list; ++}; ++ ++struct drm_buf_pub { ++ int idx; ++ int total; ++ int used; ++ void *address; ++}; ++ ++struct drm_buf_map { ++ int count; ++ void *virtual; ++ struct drm_buf_pub *list; ++}; ++ ++struct drm_hash_item { ++ struct hlist_node head; ++ long unsigned int key; ++}; ++ ++typedef struct drm_dma_handle drm_dma_handle_t; ++ ++struct drm_map_list { ++ struct list_head head; ++ struct drm_hash_item hash; ++ struct drm_local_map *map; ++ uint64_t user_token; ++ struct drm_master *master; ++}; ++ ++struct drm_agp_mem { ++ long unsigned int handle; ++ struct agp_memory *memory; ++ long unsigned int bound; ++ int pages; ++ struct list_head head; ++}; ++ ++typedef unsigned int drm_context_t; ++ ++struct drm_ctx_priv_map { ++ unsigned int ctx_id; ++ void *handle; ++}; ++ ++enum drm_ctx_flags { ++ _DRM_CONTEXT_PRESERVED = 1, ++ _DRM_CONTEXT_2DONLY = 2, ++}; ++ ++struct drm_ctx { ++ drm_context_t handle; ++ enum drm_ctx_flags flags; ++}; ++ ++struct drm_ctx_res { ++ int count; ++ struct drm_ctx *contexts; ++}; ++ ++struct drm_ctx_list { ++ struct list_head head; ++ drm_context_t handle; ++ struct drm_file *tag; ++}; ++ ++enum drm_minor_type { ++ DRM_MINOR_PRIMARY = 0, ++ DRM_MINOR_CONTROL = 1, ++ DRM_MINOR_RENDER = 2, ++}; ++ ++struct drm_gem_close { ++ __u32 handle; ++ __u32 pad; ++}; ++ ++struct drm_gem_flink { ++ __u32 handle; ++ __u32 name; ++}; ++ ++struct drm_gem_open { ++ __u32 name; ++ __u32 handle; ++ __u64 size; ++}; ++ ++struct drm_version { ++ int version_major; ++ int version_minor; ++ int version_patchlevel; ++ __kernel_size_t name_len; ++ char *name; ++ __kernel_size_t date_len; ++ char *date; ++ __kernel_size_t desc_len; ++ char *desc; ++}; ++ ++struct drm_unique { ++ __kernel_size_t unique_len; ++ char *unique; ++}; ++ ++struct drm_client { ++ int idx; ++ int auth; ++ long unsigned int pid; ++ long unsigned int uid; ++ long unsigned int magic; ++ long unsigned int iocs; ++}; ++ ++enum drm_stat_type { ++ _DRM_STAT_LOCK = 0, ++ _DRM_STAT_OPENS = 1, ++ _DRM_STAT_CLOSES = 2, ++ _DRM_STAT_IOCTLS = 3, ++ _DRM_STAT_LOCKS = 4, ++ _DRM_STAT_UNLOCKS = 5, ++ _DRM_STAT_VALUE = 6, ++ _DRM_STAT_BYTE = 7, ++ _DRM_STAT_COUNT = 8, ++ _DRM_STAT_IRQ = 9, ++ _DRM_STAT_PRIMARY = 10, ++ _DRM_STAT_SECONDARY = 11, ++ _DRM_STAT_DMA = 12, ++ _DRM_STAT_SPECIAL = 13, ++ _DRM_STAT_MISSED = 14, ++}; ++ ++struct drm_stats { ++ long unsigned int count; ++ struct { ++ long unsigned int value; ++ enum drm_stat_type type; ++ } data[15]; ++}; ++ ++struct drm_set_version { ++ int drm_di_major; ++ int drm_di_minor; ++ int drm_dd_major; ++ int drm_dd_minor; ++}; ++ ++struct drm_get_cap { ++ __u64 capability; ++ __u64 value; ++}; ++ ++struct drm_set_client_cap { ++ __u64 capability; ++ __u64 value; ++}; ++ ++struct drm_control { ++ enum { ++ DRM_ADD_COMMAND = 0, ++ DRM_RM_COMMAND = 1, ++ DRM_INST_HANDLER = 2, ++ DRM_UNINST_HANDLER = 3, ++ } func; ++ int irq; ++}; ++ ++enum drm_lock_flags { ++ _DRM_LOCK_READY = 1, ++ _DRM_LOCK_QUIESCENT = 2, ++ _DRM_LOCK_FLUSH = 4, ++ _DRM_LOCK_FLUSH_ALL = 8, ++ _DRM_HALT_ALL_QUEUES = 16, ++ _DRM_HALT_CUR_QUEUES = 32, ++}; ++ ++struct drm_lock { ++ int context; ++ enum drm_lock_flags flags; ++}; ++ ++struct drm_scatter_gather { ++ long unsigned int size; ++ long unsigned int handle; ++}; ++ ++struct drm_irq_busid { ++ int irq; ++ int busnum; ++ int devnum; ++ int funcnum; ++}; ++ ++struct class_attribute_string { ++ struct class_attribute attr; ++ char *str; ++}; ++ ++enum drm_mm_insert_mode { ++ DRM_MM_INSERT_BEST = 0, ++ DRM_MM_INSERT_LOW = 1, ++ DRM_MM_INSERT_HIGH = 2, ++ DRM_MM_INSERT_EVICT = 3, ++ DRM_MM_INSERT_ONCE = 2147483648, ++ DRM_MM_INSERT_HIGHEST = 2147483650, ++ DRM_MM_INSERT_LOWEST = 2147483649, ++}; ++ ++struct drm_mm_scan { ++ struct drm_mm *mm; ++ u64 size; ++ u64 alignment; ++ u64 remainder_mask; ++ u64 range_start; ++ u64 range_end; ++ u64 hit_start; ++ u64 hit_end; ++ long unsigned int color; ++ enum drm_mm_insert_mode mode; ++}; ++ ++struct drm_mode_modeinfo { ++ __u32 clock; ++ __u16 hdisplay; ++ __u16 hsync_start; ++ __u16 hsync_end; ++ __u16 htotal; ++ __u16 hskew; ++ __u16 vdisplay; ++ __u16 vsync_start; ++ __u16 vsync_end; ++ __u16 vtotal; ++ __u16 vscan; ++ __u32 vrefresh; ++ __u32 flags; ++ __u32 type; ++ char name[32]; ++}; ++ ++struct drm_mode_crtc { ++ __u64 set_connectors_ptr; ++ __u32 count_connectors; ++ __u32 crtc_id; ++ __u32 fb_id; ++ __u32 x; ++ __u32 y; ++ __u32 gamma_size; ++ __u32 mode_valid; ++ struct drm_mode_modeinfo mode; ++}; ++ ++struct drm_format_name_buf { ++ char str[32]; ++}; ++ ++struct cea_sad { ++ u8 format; ++ u8 channels; ++ u8 freq; ++ u8 byte2; ++}; ++ ++struct displayid_hdr { ++ u8 rev; ++ u8 bytes; ++ u8 prod_id; ++ u8 ext_count; ++}; ++ ++struct displayid_block { ++ u8 tag; ++ u8 rev; ++ u8 num_bytes; ++}; ++ ++struct displayid_tiled_block { ++ struct displayid_block base; ++ u8 tile_cap; ++ u8 topo[3]; ++ u8 tile_size[4]; ++ u8 tile_pixel_bezel[5]; ++ u8 topology_id[8]; ++}; ++ ++struct displayid_detailed_timings_1 { ++ u8 pixel_clock[3]; ++ u8 flags; ++ u8 hactive[2]; ++ u8 hblank[2]; ++ u8 hsync[2]; ++ u8 hsw[2]; ++ u8 vactive[2]; ++ u8 vblank[2]; ++ u8 vsync[2]; ++ u8 vsw[2]; ++}; ++ ++struct displayid_detailed_timing_block { ++ struct displayid_block base; ++ struct displayid_detailed_timings_1 timings[0]; ++}; ++ ++struct detailed_mode_closure { ++ struct drm_connector *connector; ++ struct edid *edid; ++ bool preferred; ++ u32 quirks; ++ int modes; ++}; ++ ++struct edid_quirk { ++ char vendor[4]; ++ int product_id; ++ u32 quirks; ++}; ++ ++struct minimode { ++ short int w; ++ short int h; ++ short int r; ++ short int rb; ++}; ++ ++typedef void detailed_cb(struct detailed_timing *, void *); ++ ++struct stereo_mandatory_mode { ++ int width; ++ int height; ++ int vrefresh; ++ unsigned int flags; ++}; ++ ++struct drm_info_list { ++ const char *name; ++ int (*show)(struct seq_file *, void *); ++ u32 driver_features; ++ void *data; ++}; ++ ++struct drm_info_node { ++ struct drm_minor *minor; ++ const struct drm_info_list *info_ent; ++ struct list_head list; ++ struct dentry *dent; ++}; ++ ++struct i2c_device_id { ++ char name[20]; ++ kernel_ulong_t driver_data; ++}; ++ ++enum i2c_alert_protocol { ++ I2C_PROTOCOL_SMBUS_ALERT = 0, ++ I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, ++}; ++ ++struct i2c_board_info; ++ ++struct i2c_driver { ++ unsigned int class; ++ int (*probe)(struct i2c_client *, const struct i2c_device_id *); ++ int (*remove)(struct i2c_client *); ++ int (*probe_new)(struct i2c_client *); ++ void (*shutdown)(struct i2c_client *); ++ void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); ++ int (*command)(struct i2c_client *, unsigned int, void *); ++ struct device_driver driver; ++ const struct i2c_device_id *id_table; ++ int (*detect)(struct i2c_client *, struct i2c_board_info *); ++ const short unsigned int *address_list; ++ struct list_head clients; ++ bool disable_i2c_core_irq_mapping; ++}; ++ ++struct i2c_board_info { ++ char type[20]; ++ short unsigned int flags; ++ short unsigned int addr; ++ const char *dev_name; ++ void *platform_data; ++ struct device_node *of_node; ++ struct fwnode_handle *fwnode; ++ const struct property_entry *properties; ++ const struct resource *resources; ++ unsigned int num_resources; ++ int irq; ++}; ++ ++struct drm_encoder_slave_funcs { ++ void (*set_config)(struct drm_encoder *, void *); ++ void (*destroy)(struct drm_encoder *); ++ void (*dpms)(struct drm_encoder *, int); ++ void (*save)(struct drm_encoder *); ++ void (*restore)(struct drm_encoder *); ++ bool (*mode_fixup)(struct drm_encoder *, const struct drm_display_mode *, struct drm_display_mode *); ++ int (*mode_valid)(struct drm_encoder *, struct drm_display_mode *); ++ void (*mode_set)(struct drm_encoder *, struct drm_display_mode *, struct drm_display_mode *); ++ enum drm_connector_status (*detect)(struct drm_encoder *, struct drm_connector *); ++ int (*get_modes)(struct drm_encoder *, struct drm_connector *); ++ int (*create_resources)(struct drm_encoder *, struct drm_connector *); ++ int (*set_property)(struct drm_encoder *, struct drm_connector *, struct drm_property *, uint64_t); ++}; ++ ++struct drm_encoder_slave { ++ struct drm_encoder base; ++ const struct drm_encoder_slave_funcs *slave_funcs; ++ void *slave_priv; ++ void *bus_priv; ++}; ++ ++struct drm_i2c_encoder_driver { ++ struct i2c_driver i2c_driver; ++ int (*encoder_init)(struct i2c_client *, struct drm_device *, struct drm_encoder_slave *); ++}; ++ ++struct trace_event_raw_drm_vblank_event { ++ struct trace_entry ent; ++ int crtc; ++ unsigned int seq; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_drm_vblank_event_queued { ++ struct trace_entry ent; ++ struct drm_file *file; ++ int crtc; ++ unsigned int seq; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_drm_vblank_event_delivered { ++ struct trace_entry ent; ++ struct drm_file *file; ++ int crtc; ++ unsigned int seq; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_drm_vblank_event {}; ++ ++struct trace_event_data_offsets_drm_vblank_event_queued {}; ++ ++struct trace_event_data_offsets_drm_vblank_event_delivered {}; ++ ++enum drm_global_types { ++ DRM_GLOBAL_TTM_MEM = 0, ++ DRM_GLOBAL_TTM_BO = 1, ++ DRM_GLOBAL_TTM_OBJECT = 2, ++ DRM_GLOBAL_NUM = 3, ++}; ++ ++struct drm_global_reference { ++ enum drm_global_types global_type; ++ size_t size; ++ void *object; ++ int (*init)(struct drm_global_reference *); ++ void (*release)(struct drm_global_reference *); ++}; ++ ++struct drm_global_item { ++ struct mutex mutex; ++ void *object; ++ int refcount; ++}; ++ ++struct dma_buf_export_info { ++ const char *exp_name; ++ struct module *owner; ++ const struct dma_buf_ops *ops; ++ size_t size; ++ int flags; ++ struct reservation_object *resv; ++ void *priv; ++}; ++ ++struct drm_prime_handle { ++ __u32 handle; ++ __u32 flags; ++ __s32 fd; ++}; ++ ++struct drm_prime_member { ++ struct dma_buf *dma_buf; ++ uint32_t handle; ++ struct rb_node dmabuf_rb; ++ struct rb_node handle_rb; ++}; ++ ++struct drm_prime_attachment { ++ struct sg_table *sgt; ++ enum dma_data_direction dir; ++}; ++ ++struct drm_vma_offset_file { ++ struct rb_node vm_rb; ++ struct drm_file *vm_tag; ++ long unsigned int vm_count; ++}; ++ ++struct drm_flip_work; ++ ++typedef void (*drm_flip_func_t)(struct drm_flip_work *, void *); ++ ++struct drm_flip_work { ++ const char *name; ++ drm_flip_func_t func; ++ struct work_struct worker; ++ struct list_head queued; ++ struct list_head commited; ++ spinlock_t lock; ++}; ++ ++struct drm_flip_task { ++ struct list_head node; ++ void *data; ++}; ++ ++struct drm_mode_atomic { ++ __u32 flags; ++ __u32 count_objs; ++ __u64 objs_ptr; ++ __u64 count_props_ptr; ++ __u64 props_ptr; ++ __u64 prop_values_ptr; ++ __u64 reserved; ++ __u64 user_data; ++}; ++ ++struct drm_writeback_connector { ++ struct drm_connector base; ++ struct drm_encoder encoder; ++ struct drm_property_blob *pixel_formats_blob_ptr; ++ spinlock_t job_lock; ++ struct list_head job_queue; ++ unsigned int fence_context; ++ spinlock_t fence_lock; ++ long unsigned int fence_seqno; ++ char timeline_name[32]; ++}; ++ ++struct sync_file { ++ struct file *file; ++ char user_name[32]; ++ struct list_head sync_file_list; ++ wait_queue_head_t wq; ++ long unsigned int flags; ++ struct dma_fence *fence; ++ struct dma_fence_cb cb; ++}; ++ ++struct drm_out_fence_state { ++ s32 *out_fence_ptr; ++ struct sync_file *sync_file; ++ int fd; ++}; ++ ++struct drm_mode_fb_cmd { ++ __u32 fb_id; ++ __u32 width; ++ __u32 height; ++ __u32 pitch; ++ __u32 bpp; ++ __u32 depth; ++ __u32 handle; ++}; ++ ++struct drm_mode_fb_dirty_cmd { ++ __u32 fb_id; ++ __u32 flags; ++ __u32 color; ++ __u32 num_clips; ++ __u64 clips_ptr; ++}; ++ ++struct drm_mode_rmfb_work { ++ struct work_struct work; ++ struct list_head fbs; ++}; ++ ++struct drm_mode_get_connector { ++ __u64 encoders_ptr; ++ __u64 modes_ptr; ++ __u64 props_ptr; ++ __u64 prop_values_ptr; ++ __u32 count_modes; ++ __u32 count_props; ++ __u32 count_encoders; ++ __u32 encoder_id; ++ __u32 connector_id; ++ __u32 connector_type; ++ __u32 connector_type_id; ++ __u32 connection; ++ __u32 mm_width; ++ __u32 mm_height; ++ __u32 subpixel; ++ __u32 pad; ++}; ++ ++struct drm_mode_connector_set_property { ++ __u64 value; ++ __u32 prop_id; ++ __u32 connector_id; ++}; ++ ++struct drm_mode_obj_set_property { ++ __u64 value; ++ __u32 prop_id; ++ __u32 obj_id; ++ __u32 obj_type; ++}; ++ ++struct drm_prop_enum_list { ++ int type; ++ const char *name; ++}; ++ ++struct drm_conn_prop_enum_list { ++ int type; ++ const char *name; ++ struct ida ida; ++}; ++ ++struct drm_mode_get_encoder { ++ __u32 encoder_id; ++ __u32 encoder_type; ++ __u32 crtc_id; ++ __u32 possible_crtcs; ++ __u32 possible_clones; ++}; ++ ++struct drm_mode_obj_get_properties { ++ __u64 props_ptr; ++ __u64 prop_values_ptr; ++ __u32 count_props; ++ __u32 obj_id; ++ __u32 obj_type; ++}; ++ ++struct drm_mode_property_enum { ++ __u64 value; ++ char name[32]; ++}; ++ ++struct drm_mode_get_property { ++ __u64 values_ptr; ++ __u64 enum_blob_ptr; ++ __u32 prop_id; ++ __u32 flags; ++ char name[32]; ++ __u32 count_values; ++ __u32 count_enum_blobs; ++}; ++ ++struct drm_mode_get_blob { ++ __u32 blob_id; ++ __u32 length; ++ __u64 data; ++}; ++ ++struct drm_mode_create_blob { ++ __u64 data; ++ __u32 length; ++ __u32 blob_id; ++}; ++ ++struct drm_mode_destroy_blob { ++ __u32 blob_id; ++}; ++ ++struct drm_property_enum { ++ uint64_t value; ++ struct list_head head; ++ char name[32]; ++}; ++ ++struct drm_mode_set_plane { ++ __u32 plane_id; ++ __u32 crtc_id; ++ __u32 fb_id; ++ __u32 flags; ++ __s32 crtc_x; ++ __s32 crtc_y; ++ __u32 crtc_w; ++ __u32 crtc_h; ++ __u32 src_x; ++ __u32 src_y; ++ __u32 src_h; ++ __u32 src_w; ++}; ++ ++struct drm_mode_get_plane { ++ __u32 plane_id; ++ __u32 crtc_id; ++ __u32 fb_id; ++ __u32 possible_crtcs; ++ __u32 gamma_size; ++ __u32 count_format_types; ++ __u64 format_type_ptr; ++}; ++ ++struct drm_mode_get_plane_res { ++ __u64 plane_id_ptr; ++ __u32 count_planes; ++}; ++ ++struct drm_mode_cursor { ++ __u32 flags; ++ __u32 crtc_id; ++ __s32 x; ++ __s32 y; ++ __u32 width; ++ __u32 height; ++ __u32 handle; ++}; ++ ++struct drm_mode_cursor2 { ++ __u32 flags; ++ __u32 crtc_id; ++ __s32 x; ++ __s32 y; ++ __u32 width; ++ __u32 height; ++ __u32 handle; ++ __s32 hot_x; ++ __s32 hot_y; ++}; ++ ++struct drm_mode_crtc_page_flip_target { ++ __u32 crtc_id; ++ __u32 fb_id; ++ __u32 flags; ++ __u32 sequence; ++ __u64 user_data; ++}; ++ ++struct drm_format_modifier_blob { ++ __u32 version; ++ __u32 flags; ++ __u32 count_formats; ++ __u32 formats_offset; ++ __u32 count_modifiers; ++ __u32 modifiers_offset; ++}; ++ ++struct drm_format_modifier { ++ __u64 formats; ++ __u32 offset; ++ __u32 pad; ++ __u64 modifier; ++}; ++ ++struct drm_mode_crtc_lut { ++ __u32 crtc_id; ++ __u32 gamma_size; ++ __u64 red; ++ __u64 green; ++ __u64 blue; ++}; ++ ++struct drm_print_iterator { ++ void *data; ++ ssize_t start; ++ ssize_t remain; ++ ssize_t offset; ++}; ++ ++struct drm_mode_map_dumb { ++ __u32 handle; ++ __u32 pad; ++ __u64 offset; ++}; ++ ++struct drm_mode_destroy_dumb { ++ __u32 handle; ++}; ++ ++struct drm_mode_card_res { ++ __u64 fb_id_ptr; ++ __u64 crtc_id_ptr; ++ __u64 connector_id_ptr; ++ __u64 encoder_id_ptr; ++ __u32 count_fbs; ++ __u32 count_crtcs; ++ __u32 count_connectors; ++ __u32 count_encoders; ++ __u32 min_width; ++ __u32 max_width; ++ __u32 min_height; ++ __u32 max_height; ++}; ++ ++enum drm_vblank_seq_type { ++ _DRM_VBLANK_ABSOLUTE = 0, ++ _DRM_VBLANK_RELATIVE = 1, ++ _DRM_VBLANK_HIGH_CRTC_MASK = 62, ++ _DRM_VBLANK_EVENT = 67108864, ++ _DRM_VBLANK_FLIP = 134217728, ++ _DRM_VBLANK_NEXTONMISS = 268435456, ++ _DRM_VBLANK_SECONDARY = 536870912, ++ _DRM_VBLANK_SIGNAL = 1073741824, ++}; ++ ++struct drm_wait_vblank_request { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ long unsigned int signal; ++}; ++ ++struct drm_wait_vblank_reply { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ long int tval_sec; ++ long int tval_usec; ++}; ++ ++union drm_wait_vblank { ++ struct drm_wait_vblank_request request; ++ struct drm_wait_vblank_reply reply; ++}; ++ ++struct drm_modeset_ctl { ++ __u32 crtc; ++ __u32 cmd; ++}; ++ ++struct drm_crtc_get_sequence { ++ __u32 crtc_id; ++ __u32 active; ++ __u64 sequence; ++ __s64 sequence_ns; ++}; ++ ++struct drm_crtc_queue_sequence { ++ __u32 crtc_id; ++ __u32 flags; ++ __u64 sequence; ++ __u64 user_data; ++}; ++ ++enum dma_fence_flag_bits { ++ DMA_FENCE_FLAG_SIGNALED_BIT = 0, ++ DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, ++ DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, ++ DMA_FENCE_FLAG_USER_BITS = 3, ++}; ++ ++struct drm_syncobj_create { ++ __u32 handle; ++ __u32 flags; ++}; ++ ++struct drm_syncobj_destroy { ++ __u32 handle; ++ __u32 pad; ++}; ++ ++struct drm_syncobj_handle { ++ __u32 handle; ++ __u32 flags; ++ __s32 fd; ++ __u32 pad; ++}; ++ ++struct drm_syncobj_wait { ++ __u64 handles; ++ __s64 timeout_nsec; ++ __u32 count_handles; ++ __u32 flags; ++ __u32 first_signaled; ++ __u32 pad; ++}; ++ ++struct drm_syncobj_array { ++ __u64 handles; ++ __u32 count_handles; ++ __u32 pad; ++}; ++ ++struct drm_syncobj { ++ struct kref refcount; ++ struct dma_fence *fence; ++ struct list_head cb_list; ++ spinlock_t lock; ++ struct file *file; ++}; ++ ++struct drm_syncobj_cb; ++ ++typedef void (*drm_syncobj_func_t)(struct drm_syncobj *, struct drm_syncobj_cb *); ++ ++struct drm_syncobj_cb { ++ struct list_head node; ++ drm_syncobj_func_t func; ++}; ++ ++struct drm_syncobj_null_fence { ++ struct dma_fence base; ++ spinlock_t lock; ++}; ++ ++struct syncobj_wait_entry { ++ struct task_struct *task; ++ struct dma_fence *fence; ++ struct dma_fence_cb fence_cb; ++ struct drm_syncobj_cb syncobj_cb; ++}; ++ ++struct drm_mode_create_lease { ++ __u64 object_ids; ++ __u32 object_count; ++ __u32 flags; ++ __u32 lessee_id; ++ __u32 fd; ++}; ++ ++struct drm_mode_list_lessees { ++ __u32 count_lessees; ++ __u32 pad; ++ __u64 lessees_ptr; ++}; ++ ++struct drm_mode_get_lease { ++ __u32 count_objects; ++ __u32 pad; ++ __u64 objects_ptr; ++}; ++ ++struct drm_mode_revoke_lease { ++ __u32 lessee_id; ++}; ++ ++struct drm_vma_entry { ++ struct list_head head; ++ struct vm_area_struct *vma; ++ pid_t pid; ++}; ++ ++enum drm_dma_flags { ++ _DRM_DMA_BLOCK = 1, ++ _DRM_DMA_WHILE_LOCKED = 2, ++ _DRM_DMA_PRIORITY = 4, ++ _DRM_DMA_WAIT = 16, ++ _DRM_DMA_SMALLER_OK = 32, ++ _DRM_DMA_LARGER_OK = 64, ++}; ++ ++struct drm_dma { ++ int context; ++ int send_count; ++ int *send_indices; ++ int *send_sizes; ++ enum drm_dma_flags flags; ++ int request_count; ++ int request_size; ++ int *request_indices; ++ int *request_sizes; ++ int granted_count; ++}; ++ ++typedef int drm_ioctl_compat_t(struct file *, unsigned int, long unsigned int); ++ ++struct drm_version_32 { ++ int version_major; ++ int version_minor; ++ int version_patchlevel; ++ u32 name_len; ++ u32 name; ++ u32 date_len; ++ u32 date; ++ u32 desc_len; ++ u32 desc; ++}; ++ ++typedef struct drm_version_32 drm_version32_t; ++ ++struct drm_unique32 { ++ u32 unique_len; ++ u32 unique; ++}; ++ ++typedef struct drm_unique32 drm_unique32_t; ++ ++struct drm_map32 { ++ u32 offset; ++ u32 size; ++ enum drm_map_type type; ++ enum drm_map_flags flags; ++ u32 handle; ++ int mtrr; ++}; ++ ++typedef struct drm_map32 drm_map32_t; ++ ++struct drm_client32 { ++ int idx; ++ int auth; ++ u32 pid; ++ u32 uid; ++ u32 magic; ++ u32 iocs; ++}; ++ ++typedef struct drm_client32 drm_client32_t; ++ ++struct drm_stats32 { ++ u32 count; ++ struct { ++ u32 value; ++ enum drm_stat_type type; ++ } data[15]; ++}; ++ ++typedef struct drm_stats32 drm_stats32_t; ++ ++struct drm_buf_desc32 { ++ int count; ++ int size; ++ int low_mark; ++ int high_mark; ++ int flags; ++ u32 agp_start; ++}; ++ ++typedef struct drm_buf_desc32 drm_buf_desc32_t; ++ ++struct drm_buf_info32 { ++ int count; ++ u32 list; ++}; ++ ++typedef struct drm_buf_info32 drm_buf_info32_t; ++ ++struct drm_buf_pub32 { ++ int idx; ++ int total; ++ int used; ++ u32 address; ++}; ++ ++typedef struct drm_buf_pub32 drm_buf_pub32_t; ++ ++struct drm_buf_map32 { ++ int count; ++ u32 virtual; ++ u32 list; ++}; ++ ++typedef struct drm_buf_map32 drm_buf_map32_t; ++ ++struct drm_buf_free32 { ++ int count; ++ u32 list; ++}; ++ ++typedef struct drm_buf_free32 drm_buf_free32_t; ++ ++struct drm_ctx_priv_map32 { ++ unsigned int ctx_id; ++ u32 handle; ++}; ++ ++typedef struct drm_ctx_priv_map32 drm_ctx_priv_map32_t; ++ ++struct drm_ctx_res32 { ++ int count; ++ u32 contexts; ++}; ++ ++typedef struct drm_ctx_res32 drm_ctx_res32_t; ++ ++struct drm_dma32 { ++ int context; ++ int send_count; ++ u32 send_indices; ++ u32 send_sizes; ++ enum drm_dma_flags flags; ++ int request_count; ++ int request_size; ++ u32 request_indices; ++ u32 request_sizes; ++ int granted_count; ++}; ++ ++typedef struct drm_dma32 drm_dma32_t; ++ ++struct drm_scatter_gather32 { ++ u32 size; ++ u32 handle; ++}; ++ ++typedef struct drm_scatter_gather32 drm_scatter_gather32_t; ++ ++struct drm_wait_vblank_request32 { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ u32 signal; ++}; ++ ++struct drm_wait_vblank_reply32 { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ s32 tval_sec; ++ s32 tval_usec; ++}; ++ ++union drm_wait_vblank32 { ++ struct drm_wait_vblank_request32 request; ++ struct drm_wait_vblank_reply32 reply; ++}; ++ ++typedef union drm_wait_vblank32 drm_wait_vblank32_t; ++ ++struct drm_ati_pcigart_info { ++ int gart_table_location; ++ int gart_reg_if; ++ void *addr; ++ dma_addr_t bus_addr; ++ dma_addr_t table_mask; ++ struct drm_dma_handle *table_handle; ++ struct drm_local_map mapping; ++ int table_size; ++}; ++ ++struct component_master_ops { ++ int (*bind)(struct device *); ++ void (*unbind)(struct device *); ++}; ++ ++struct of_endpoint { ++ unsigned int port; ++ unsigned int id; ++ const struct device_node *local_node; ++}; ++ ++struct component_match; ++ ++struct firmware { ++ size_t size; ++ const u8 *data; ++ struct page **pages; ++ void *priv; ++}; ++ ++struct drm_dmi_panel_orientation_data { ++ int width; ++ int height; ++ const char * const *bios_dates; ++ int orientation; ++}; ++ ++struct ttm_bus_placement { ++ void *addr; ++ phys_addr_t base; ++ long unsigned int size; ++ long unsigned int offset; ++ bool is_iomem; ++ bool io_reserved_vm; ++ uint64_t io_reserved_count; ++}; ++ ++struct ttm_mem_reg { ++ void *mm_node; ++ long unsigned int start; ++ long unsigned int size; ++ long unsigned int num_pages; ++ uint32_t page_alignment; ++ uint32_t mem_type; ++ uint32_t placement; ++ struct ttm_bus_placement bus; ++}; ++ ++enum ttm_bo_type { ++ ttm_bo_type_device = 0, ++ ttm_bo_type_kernel = 1, ++ ttm_bo_type_sg = 2, ++}; ++ ++struct ttm_bo_device; ++ ++struct ttm_tt; ++ ++struct ttm_buffer_object { ++ struct ttm_bo_device *bdev; ++ enum ttm_bo_type type; ++ void (*destroy)(struct ttm_buffer_object *); ++ long unsigned int num_pages; ++ size_t acc_size; ++ struct kref kref; ++ struct kref list_kref; ++ struct ttm_mem_reg mem; ++ struct file *persistent_swap_storage; ++ struct ttm_tt *ttm; ++ bool evicted; ++ atomic_t cpu_writers; ++ struct list_head lru; ++ struct list_head ddestroy; ++ struct list_head swap; ++ struct list_head io_reserve_lru; ++ struct dma_fence *moving; ++ struct drm_vma_offset_node vma_node; ++ unsigned int priority; ++ uint64_t offset; ++ struct sg_table *sg; ++ struct reservation_object *resv; ++ struct reservation_object ttm_resv; ++ struct mutex wu_mutex; ++}; ++ ++struct ttm_mem_type_manager_func; ++ ++struct ttm_mem_type_manager { ++ struct ttm_bo_device *bdev; ++ bool has_type; ++ bool use_type; ++ uint32_t flags; ++ uint64_t gpu_offset; ++ uint64_t size; ++ uint32_t available_caching; ++ uint32_t default_caching; ++ const struct ttm_mem_type_manager_func *func; ++ void *priv; ++ struct mutex io_reserve_mutex; ++ bool use_io_reserve_lru; ++ bool io_reserve_fastpath; ++ spinlock_t move_lock; ++ struct list_head io_reserve_lru; ++ struct list_head lru[4]; ++ struct dma_fence *move; ++}; ++ ++struct ttm_bo_global; ++ ++struct ttm_bo_driver; ++ ++struct ttm_bo_device { ++ struct list_head device_list; ++ struct ttm_bo_global *glob; ++ struct ttm_bo_driver *driver; ++ struct ttm_mem_type_manager man[8]; ++ struct drm_vma_offset_manager vma_manager; ++ struct list_head ddestroy; ++ struct address_space *dev_mapping; ++ struct delayed_work wq; ++ bool need_dma32; ++ bool no_retry; ++}; ++ ++enum ttm_caching_state { ++ tt_uncached = 0, ++ tt_wc = 1, ++ tt_cached = 2, ++}; ++ ++struct ttm_backend_func; ++ ++struct ttm_tt { ++ struct ttm_bo_device *bdev; ++ struct ttm_backend_func *func; ++ struct page **pages; ++ uint32_t page_flags; ++ long unsigned int num_pages; ++ struct sg_table *sg; ++ struct file *swap_storage; ++ enum ttm_caching_state caching_state; ++ enum { ++ tt_bound = 0, ++ tt_unbound = 1, ++ tt_unpopulated = 2, ++ } state; ++}; ++ ++struct ttm_operation_ctx { ++ bool interruptible; ++ bool no_wait_gpu; ++ struct reservation_object *resv; ++ uint64_t bytes_moved; ++ uint32_t flags; ++}; ++ ++struct ttm_mem_zone; ++ ++struct ttm_mem_global { ++ struct kobject kobj; ++ struct ttm_bo_global *bo_glob; ++ struct workqueue_struct *swap_queue; ++ struct work_struct work; ++ spinlock_t lock; ++ uint64_t lower_mem_limit; ++ struct ttm_mem_zone *zones[2]; ++ unsigned int num_zones; ++ struct ttm_mem_zone *zone_kernel; ++ struct ttm_mem_zone *zone_dma32; ++}; ++ ++struct ttm_bo_global { ++ struct kobject kobj; ++ struct ttm_mem_global *mem_glob; ++ struct page *dummy_read_page; ++ struct mutex device_list_mutex; ++ spinlock_t lru_lock; ++ struct list_head device_list; ++ struct list_head swap_lru[4]; ++ atomic_t bo_count; ++}; ++ ++struct ttm_mem_zone { ++ struct kobject kobj; ++ struct ttm_mem_global *glob; ++ const char *name; ++ uint64_t zone_mem; ++ uint64_t emer_mem; ++ uint64_t max_mem; ++ uint64_t swap_limit; ++ uint64_t used_mem; ++}; ++ ++struct ttm_place { ++ unsigned int fpfn; ++ unsigned int lpfn; ++ uint32_t flags; ++}; ++ ++struct ttm_placement { ++ unsigned int num_placement; ++ const struct ttm_place *placement; ++ unsigned int num_busy_placement; ++ const struct ttm_place *busy_placement; ++}; ++ ++struct ttm_backend_func { ++ int (*bind)(struct ttm_tt *, struct ttm_mem_reg *); ++ int (*unbind)(struct ttm_tt *); ++ void (*destroy)(struct ttm_tt *); ++}; ++ ++struct ttm_mem_type_manager_func { ++ int (*init)(struct ttm_mem_type_manager *, long unsigned int); ++ int (*takedown)(struct ttm_mem_type_manager *); ++ int (*get_node)(struct ttm_mem_type_manager *, struct ttm_buffer_object *, const struct ttm_place *, struct ttm_mem_reg *); ++ void (*put_node)(struct ttm_mem_type_manager *, struct ttm_mem_reg *); ++ void (*debug)(struct ttm_mem_type_manager *, struct drm_printer *); ++}; ++ ++struct ttm_bo_driver { ++ struct ttm_tt * (*ttm_tt_create)(struct ttm_buffer_object *, uint32_t); ++ int (*ttm_tt_populate)(struct ttm_tt *, struct ttm_operation_ctx *); ++ void (*ttm_tt_unpopulate)(struct ttm_tt *); ++ int (*invalidate_caches)(struct ttm_bo_device *, uint32_t); ++ int (*init_mem_type)(struct ttm_bo_device *, uint32_t, struct ttm_mem_type_manager *); ++ bool (*eviction_valuable)(struct ttm_buffer_object *, const struct ttm_place *); ++ void (*evict_flags)(struct ttm_buffer_object *, struct ttm_placement *); ++ int (*move)(struct ttm_buffer_object *, bool, struct ttm_operation_ctx *, struct ttm_mem_reg *); ++ int (*verify_access)(struct ttm_buffer_object *, struct file *); ++ void (*move_notify)(struct ttm_buffer_object *, bool, struct ttm_mem_reg *); ++ int (*fault_reserve_notify)(struct ttm_buffer_object *); ++ void (*swap_notify)(struct ttm_buffer_object *); ++ int (*io_mem_reserve)(struct ttm_bo_device *, struct ttm_mem_reg *); ++ void (*io_mem_free)(struct ttm_bo_device *, struct ttm_mem_reg *); ++ long unsigned int (*io_mem_pfn)(struct ttm_buffer_object *, long unsigned int); ++ int (*access_memory)(struct ttm_buffer_object *, long unsigned int, void *, int, int); ++}; ++ ++struct ttm_dma_tt { ++ struct ttm_tt ttm; ++ dma_addr_t *dma_address; ++ struct list_head pages_list; ++}; ++ ++struct ttm_bo_global_ref { ++ struct drm_global_reference ref; ++ struct ttm_mem_global *mem_glob; ++}; ++ ++struct ttm_bo_kmap_obj { ++ void *virtual; ++ struct page *page; ++ enum { ++ ttm_bo_map_iomap = 129, ++ ttm_bo_map_vmap = 2, ++ ttm_bo_map_kmap = 3, ++ ttm_bo_map_premapped = 132, ++ } bo_kmap_type; ++ struct ttm_buffer_object *bo; ++}; ++ ++struct ttm_transfer_obj { ++ struct ttm_buffer_object base; ++ struct ttm_buffer_object *bo; ++}; ++ ++enum ttm_ref_type { ++ TTM_REF_USAGE = 0, ++ TTM_REF_SYNCCPU_READ = 1, ++ TTM_REF_SYNCCPU_WRITE = 2, ++ TTM_REF_NUM = 3, ++}; ++ ++enum ttm_object_type { ++ ttm_fence_type = 0, ++ ttm_buffer_type = 1, ++ ttm_lock_type = 2, ++ ttm_prime_type = 3, ++ ttm_driver_type0 = 256, ++ ttm_driver_type1 = 257, ++ ttm_driver_type2 = 258, ++ ttm_driver_type3 = 259, ++ ttm_driver_type4 = 260, ++ ttm_driver_type5 = 261, ++}; ++ ++struct ttm_object_file; ++ ++struct ttm_base_object { ++ struct callback_head rhead; ++ struct drm_hash_item hash; ++ enum ttm_object_type object_type; ++ bool shareable; ++ struct ttm_object_file *tfile; ++ struct kref refcount; ++ void (*refcount_release)(struct ttm_base_object **); ++ void (*ref_obj_release)(struct ttm_base_object *, enum ttm_ref_type); ++}; ++ ++struct ttm_object_device; ++ ++struct ttm_object_file { ++ struct ttm_object_device *tdev; ++ spinlock_t lock; ++ struct list_head ref_list; ++ struct drm_open_hash ref_hash[3]; ++ struct kref refcount; ++}; ++ ++struct ttm_prime_object { ++ struct ttm_base_object base; ++ struct mutex mutex; ++ size_t size; ++ enum ttm_object_type real_type; ++ struct dma_buf *dma_buf; ++ void (*refcount_release)(struct ttm_base_object **); ++}; ++ ++struct ttm_object_device { ++ spinlock_t object_lock; ++ struct drm_open_hash object_hash; ++ atomic_t object_count; ++ struct ttm_mem_global *mem_glob; ++ struct dma_buf_ops ops; ++ void (*dmabuf_release)(struct dma_buf *); ++ size_t dma_buf_size; ++}; ++ ++struct ttm_ref_object { ++ struct callback_head callback_head; ++ struct drm_hash_item hash; ++ struct list_head head; ++ struct kref kref; ++ enum ttm_ref_type ref_type; ++ struct ttm_base_object *obj; ++ struct ttm_object_file *tfile; ++}; ++ ++struct ttm_lock { ++ struct ttm_base_object base; ++ wait_queue_head_t queue; ++ spinlock_t lock; ++ int32_t rw; ++ uint32_t flags; ++ bool kill_takers; ++ int signal; ++ struct ttm_object_file *vt_holder; ++}; ++ ++struct ttm_validate_buffer { ++ struct list_head head; ++ struct ttm_buffer_object *bo; ++ bool shared; ++}; ++ ++struct ttm_page_pool { ++ spinlock_t lock; ++ bool fill_lock; ++ struct list_head list; ++ gfp_t gfp_flags; ++ unsigned int npages; ++ char *name; ++ long unsigned int nfrees; ++ long unsigned int nrefills; ++ unsigned int order; ++}; ++ ++struct ttm_pool_opts { ++ unsigned int alloc_size; ++ unsigned int max_size; ++ unsigned int small; ++}; ++ ++struct ttm_pool_manager { ++ struct kobject kobj; ++ struct shrinker mm_shrink; ++ struct ttm_pool_opts options; ++ union { ++ struct ttm_page_pool pools[6]; ++ struct { ++ struct ttm_page_pool wc_pool; ++ struct ttm_page_pool uc_pool; ++ struct ttm_page_pool wc_pool_dma32; ++ struct ttm_page_pool uc_pool_dma32; ++ struct ttm_page_pool wc_pool_huge; ++ struct ttm_page_pool uc_pool_huge; ++ }; ++ }; ++}; ++ ++struct ttm_range_manager { ++ struct drm_mm mm; ++ spinlock_t lock; ++}; ++ ++enum pool_type { ++ IS_UNDEFINED = 0, ++ IS_WC = 2, ++ IS_UC = 4, ++ IS_CACHED = 8, ++ IS_DMA32 = 16, ++ IS_HUGE = 32, ++}; ++ ++struct dma_pool___2 { ++ struct list_head pools; ++ enum pool_type type; ++ spinlock_t lock; ++ struct list_head free_list; ++ struct device *dev; ++ unsigned int size; ++ unsigned int npages_free; ++ unsigned int npages_in_use; ++ long unsigned int nfrees; ++ long unsigned int nrefills; ++ gfp_t gfp_flags; ++ char name[13]; ++ char dev_name[64]; ++}; ++ ++struct dma_page___2 { ++ struct list_head page_list; ++ long unsigned int vaddr; ++ struct page *p; ++ dma_addr_t dma; ++}; ++ ++struct device_pools { ++ struct list_head pools; ++ struct device *dev; ++ struct dma_pool___2 *pool; ++}; ++ ++struct ttm_pool_manager___2 { ++ struct mutex lock; ++ struct list_head pools; ++ struct ttm_pool_opts options; ++ unsigned int npools; ++ struct shrinker mm_shrink; ++ struct kobject kobj; ++}; ++ ++struct hibmc_framebuffer { ++ struct drm_framebuffer fb; ++ struct drm_gem_object *obj; ++}; ++ ++struct hibmc_fbdev { ++ struct drm_fb_helper helper; ++ struct hibmc_framebuffer *fb; ++ int size; ++}; ++ ++struct hibmc_drm_private { ++ void *mmio; ++ void *fb_map; ++ long unsigned int fb_base; ++ long unsigned int fb_size; ++ bool msi_enabled; ++ struct drm_device *dev; ++ bool mode_config_initialized; ++ struct drm_atomic_state *suspend_state; ++ struct drm_global_reference mem_global_ref; ++ struct ttm_bo_global_ref bo_global_ref; ++ struct ttm_bo_device bdev; ++ bool initialized; ++ struct hibmc_fbdev *fbdev; ++ bool mm_inited; ++}; ++ ++struct hibmc_bo { ++ struct ttm_buffer_object bo; ++ struct ttm_placement placement; ++ struct ttm_bo_kmap_obj kmap; ++ struct drm_gem_object gem; ++ struct ttm_place placements[3]; ++ int pin_count; ++}; ++ ++struct hibmc_display_panel_pll { ++ long unsigned int M; ++ long unsigned int N; ++ long unsigned int OD; ++ long unsigned int POD; ++}; ++ ++struct hibmc_dislay_pll_config { ++ long unsigned int hdisplay; ++ long unsigned int vdisplay; ++ u32 pll1_config_value; ++ u32 pll2_config_value; ++}; ++ ++struct hibmc_resolution { ++ int w; ++ int h; ++}; ++ ++struct vga_device { ++ struct list_head list; ++ struct pci_dev *pdev; ++ unsigned int decodes; ++ unsigned int owns; ++ unsigned int locks; ++ unsigned int io_lock_cnt; ++ unsigned int mem_lock_cnt; ++ unsigned int io_norm_cnt; ++ unsigned int mem_norm_cnt; ++ bool bridge_has_one_vga; ++ void *cookie; ++ void (*irq_set_state)(void *, bool); ++ unsigned int (*set_vga_decode)(void *, bool); ++}; ++ ++struct vga_arb_user_card { ++ struct pci_dev *pdev; ++ unsigned int mem_cnt; ++ unsigned int io_cnt; ++}; ++ ++struct vga_arb_private { ++ struct list_head list; ++ struct pci_dev *target; ++ struct vga_arb_user_card cards[64]; ++ spinlock_t lock; ++}; ++ ++struct cb_id { ++ __u32 idx; ++ __u32 val; ++}; ++ ++struct cn_msg { ++ struct cb_id id; ++ __u32 seq; ++ __u32 ack; ++ __u16 len; ++ __u16 flags; ++ __u8 data[0]; ++}; ++ ++struct cn_queue_dev { ++ atomic_t refcnt; ++ unsigned char name[32]; ++ struct list_head queue_list; ++ spinlock_t queue_lock; ++ struct sock *nls; ++}; ++ ++struct cn_callback_id { ++ unsigned char name[32]; ++ struct cb_id id; ++}; ++ ++struct cn_callback_entry { ++ struct list_head callback_entry; ++ refcount_t refcnt; ++ struct cn_queue_dev *pdev; ++ struct cn_callback_id id; ++ void (*callback)(struct cn_msg *, struct netlink_skb_parms *); ++ u32 seq; ++ u32 group; ++}; ++ ++struct cn_dev { ++ struct cb_id id; ++ u32 seq; ++ u32 groups; ++ struct sock *nls; ++ void (*input)(struct sk_buff *); ++ struct cn_queue_dev *cbdev; ++}; ++ ++enum proc_cn_mcast_op { ++ PROC_CN_MCAST_LISTEN = 1, ++ PROC_CN_MCAST_IGNORE = 2, ++}; ++ ++struct fork_proc_event { ++ __kernel_pid_t parent_pid; ++ __kernel_pid_t parent_tgid; ++ __kernel_pid_t child_pid; ++ __kernel_pid_t child_tgid; ++}; ++ ++struct exec_proc_event { ++ __kernel_pid_t process_pid; ++ __kernel_pid_t process_tgid; ++}; ++ ++struct id_proc_event { ++ __kernel_pid_t process_pid; ++ __kernel_pid_t process_tgid; ++ union { ++ __u32 ruid; ++ __u32 rgid; ++ } r; ++ union { ++ __u32 euid; ++ __u32 egid; ++ } e; ++}; ++ ++struct sid_proc_event { ++ __kernel_pid_t process_pid; ++ __kernel_pid_t process_tgid; ++}; ++ ++struct ptrace_proc_event { ++ __kernel_pid_t process_pid; ++ __kernel_pid_t process_tgid; ++ __kernel_pid_t tracer_pid; ++ __kernel_pid_t tracer_tgid; ++}; ++ ++struct comm_proc_event { ++ __kernel_pid_t process_pid; ++ __kernel_pid_t process_tgid; ++ char comm[16]; ++}; ++ ++struct coredump_proc_event { ++ __kernel_pid_t process_pid; ++ __kernel_pid_t process_tgid; ++ __kernel_pid_t parent_pid; ++ __kernel_pid_t parent_tgid; ++}; ++ ++struct exit_proc_event { ++ __kernel_pid_t process_pid; ++ __kernel_pid_t process_tgid; ++ __u32 exit_code; ++ __u32 exit_signal; ++ __kernel_pid_t parent_pid; ++ __kernel_pid_t parent_tgid; ++}; ++ ++struct proc_event { ++ enum what what; ++ __u32 cpu; ++ __u64 timestamp_ns; ++ union { ++ struct { ++ __u32 err; ++ } ack; ++ struct fork_proc_event fork; ++ struct exec_proc_event exec; ++ struct id_proc_event id; ++ struct sid_proc_event sid; ++ struct ptrace_proc_event ptrace; ++ struct comm_proc_event comm; ++ struct coredump_proc_event coredump; ++ struct exit_proc_event exit; ++ } event_data; ++}; ++ ++struct component_ops { ++ int (*bind)(struct device *, struct device *, void *); ++ void (*unbind)(struct device *, struct device *, void *); ++}; ++ ++struct component; ++ ++struct component_match_array { ++ void *data; ++ int (*compare)(struct device *, void *); ++ void (*release)(struct device *, void *); ++ struct component *component; ++ bool duplicate; ++}; ++ ++struct master; ++ ++struct component { ++ struct list_head node; ++ struct master *master; ++ bool bound; ++ const struct component_ops *ops; ++ struct device *dev; ++}; ++ ++struct component_match___2 { ++ size_t alloc; ++ size_t num; ++ struct component_match_array *compare; ++}; ++ ++struct master { ++ struct list_head node; ++ bool bound; ++ const struct component_master_ops *ops; ++ struct device *dev; ++ struct component_match___2 *match; ++ struct dentry *dentry; ++}; ++ ++struct wake_irq { ++ struct device *dev; ++ unsigned int status; ++ int irq; ++ const char *name; ++}; ++ ++enum dpm_order { ++ DPM_ORDER_NONE = 0, ++ DPM_ORDER_DEV_AFTER_PARENT = 1, ++ DPM_ORDER_PARENT_BEFORE_DEV = 2, ++ DPM_ORDER_DEV_LAST = 3, ++}; ++ ++struct subsys_private { ++ struct kset subsys; ++ struct kset *devices_kset; ++ struct list_head interfaces; ++ struct mutex mutex; ++ struct kset *drivers_kset; ++ struct klist klist_devices; ++ struct klist klist_drivers; ++ struct blocking_notifier_head bus_notifier; ++ unsigned int drivers_autoprobe: 1; ++ struct bus_type *bus; ++ struct kset glue_dirs; ++ struct class *class; ++}; ++ ++struct driver_private { ++ struct kobject kobj; ++ struct klist klist_devices; ++ struct klist_node knode_bus; ++ struct module_kobject *mkobj; ++ struct device_driver *driver; ++}; ++ ++struct dev_ext_attribute { ++ struct device_attribute attr; ++ void *var; ++}; ++ ++enum device_link_state { ++ DL_STATE_NONE = 4294967295, ++ DL_STATE_DORMANT = 0, ++ DL_STATE_AVAILABLE = 1, ++ DL_STATE_CONSUMER_PROBE = 2, ++ DL_STATE_ACTIVE = 3, ++ DL_STATE_SUPPLIER_UNBIND = 4, ++}; ++ ++struct device_link { ++ struct device *supplier; ++ struct list_head s_node; ++ struct device *consumer; ++ struct list_head c_node; ++ enum device_link_state status; ++ u32 flags; ++ refcount_t rpm_active; ++ struct kref kref; ++ struct callback_head callback_head; ++ bool supplier_preactivated; ++}; ++ ++struct device_private { ++ struct klist klist_children; ++ struct klist_node knode_parent; ++ struct klist_node knode_driver; ++ struct klist_node knode_bus; ++ struct list_head deferred_probe; ++ struct device *device; ++ u8 dead: 1; ++}; ++ ++union device_attr_group_devres { ++ const struct attribute_group *group; ++ const struct attribute_group **groups; ++}; ++ ++struct class_dir { ++ struct kobject kobj; ++ struct class *class; ++}; ++ ++struct root_device { ++ struct device dev; ++ struct module *owner; ++}; ++ ++struct subsys_dev_iter { ++ struct klist_iter ki; ++ const struct device_type *type; ++}; ++ ++struct subsys_interface { ++ const char *name; ++ struct bus_type *subsys; ++ struct list_head node; ++ int (*add_dev)(struct device *, struct subsys_interface *); ++ void (*remove_dev)(struct device *, struct subsys_interface *); ++}; ++ ++struct device_attach_data { ++ struct device *dev; ++ bool check_async; ++ bool want_async; ++ bool have_async; ++}; ++ ++struct class_compat { ++ struct kobject *kobj; ++}; ++ ++struct early_platform_driver { ++ const char *class_str; ++ struct platform_driver *pdrv; ++ struct list_head list; ++ int requested_id; ++ char *buffer; ++ int bufsize; ++}; ++ ++struct platform_object { ++ struct platform_device pdev; ++ char name[0]; ++}; ++ ++struct cpu_attr { ++ struct device_attribute attr; ++ const struct cpumask * const map; ++}; ++ ++typedef struct kobject *kobj_probe_t(dev_t, int *, void *); ++ ++struct probe { ++ struct probe *next; ++ dev_t dev; ++ long unsigned int range; ++ struct module *owner; ++ kobj_probe_t *get; ++ int (*lock)(dev_t, void *); ++ void *data; ++}; ++ ++struct kobj_map___2 { ++ struct probe *probes[255]; ++ struct mutex *lock; ++}; ++ ++typedef int (*dr_match_t)(struct device *, void *, void *); ++ ++struct devres_node { ++ struct list_head entry; ++ dr_release_t release; ++}; ++ ++struct devres { ++ struct devres_node node; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ u8 data[0]; ++}; ++ ++struct devres_group { ++ struct devres_node node[2]; ++ void *id; ++ int color; ++}; ++ ++struct action_devres { ++ void *data; ++ void (*action)(void *); ++}; ++ ++struct pages_devres { ++ long unsigned int addr; ++ unsigned int order; ++}; ++ ++struct attribute_container { ++ struct list_head node; ++ struct klist containers; ++ struct class *class; ++ const struct attribute_group *grp; ++ struct device_attribute **attrs; ++ int (*match)(struct attribute_container *, struct device *); ++ long unsigned int flags; ++}; ++ ++struct internal_container { ++ struct klist_node node; ++ struct attribute_container *cont; ++ struct device classdev; ++}; ++ ++struct transport_container; ++ ++struct transport_class { ++ struct class class; ++ int (*setup)(struct transport_container *, struct device *, struct device *); ++ int (*configure)(struct transport_container *, struct device *, struct device *); ++ int (*remove)(struct transport_container *, struct device *, struct device *); ++}; ++ ++struct transport_container { ++ struct attribute_container ac; ++ const struct attribute_group *statistics; ++}; ++ ++struct anon_transport_class { ++ struct transport_class tclass; ++ struct attribute_container container; ++}; ++ ++struct mii_bus; ++ ++struct mdio_device { ++ struct device dev; ++ struct mii_bus *bus; ++ char modalias[32]; ++ int (*bus_match)(struct device *, struct device_driver *); ++ void (*device_free)(struct mdio_device *); ++ void (*device_remove)(struct mdio_device *); ++ int addr; ++ int flags; ++ struct gpio_desc___2 *reset; ++ unsigned int reset_assert_delay; ++ unsigned int reset_deassert_delay; ++}; ++ ++struct phy_c45_device_ids { ++ u32 devices_in_package; ++ u32 device_ids[8]; ++}; ++ ++enum phy_state { ++ PHY_DOWN = 0, ++ PHY_STARTING = 1, ++ PHY_READY = 2, ++ PHY_PENDING = 3, ++ PHY_UP = 4, ++ PHY_AN = 5, ++ PHY_RUNNING = 6, ++ PHY_NOLINK = 7, ++ PHY_FORCING = 8, ++ PHY_CHANGELINK = 9, ++ PHY_HALTED = 10, ++ PHY_RESUMING = 11, ++}; ++ ++typedef enum { ++ PHY_INTERFACE_MODE_NA = 0, ++ PHY_INTERFACE_MODE_INTERNAL = 1, ++ PHY_INTERFACE_MODE_MII = 2, ++ PHY_INTERFACE_MODE_GMII = 3, ++ PHY_INTERFACE_MODE_SGMII = 4, ++ PHY_INTERFACE_MODE_TBI = 5, ++ PHY_INTERFACE_MODE_REVMII = 6, ++ PHY_INTERFACE_MODE_RMII = 7, ++ PHY_INTERFACE_MODE_RGMII = 8, ++ PHY_INTERFACE_MODE_RGMII_ID = 9, ++ PHY_INTERFACE_MODE_RGMII_RXID = 10, ++ PHY_INTERFACE_MODE_RGMII_TXID = 11, ++ PHY_INTERFACE_MODE_RTBI = 12, ++ PHY_INTERFACE_MODE_SMII = 13, ++ PHY_INTERFACE_MODE_XGMII = 14, ++ PHY_INTERFACE_MODE_MOCA = 15, ++ PHY_INTERFACE_MODE_QSGMII = 16, ++ PHY_INTERFACE_MODE_TRGMII = 17, ++ PHY_INTERFACE_MODE_1000BASEX = 18, ++ PHY_INTERFACE_MODE_2500BASEX = 19, ++ PHY_INTERFACE_MODE_RXAUI = 20, ++ PHY_INTERFACE_MODE_XAUI = 21, ++ PHY_INTERFACE_MODE_10GKR = 22, ++ PHY_INTERFACE_MODE_MAX = 23, ++} phy_interface_t; ++ ++struct phylink; ++ ++struct phy_driver; ++ ++struct phy_device { ++ struct mdio_device mdio; ++ struct phy_driver *drv; ++ u32 phy_id; ++ struct phy_c45_device_ids c45_ids; ++ unsigned int is_c45: 1; ++ unsigned int is_internal: 1; ++ unsigned int is_pseudo_fixed_link: 1; ++ unsigned int has_fixups: 1; ++ unsigned int suspended: 1; ++ unsigned int sysfs_links: 1; ++ unsigned int loopback_enabled: 1; ++ unsigned int autoneg: 1; ++ unsigned int link: 1; ++ enum phy_state state; ++ u32 dev_flags; ++ phy_interface_t interface; ++ int speed; ++ int duplex; ++ int pause; ++ int asym_pause; ++ u32 interrupts; ++ u32 supported; ++ u32 advertising; ++ u32 lp_advertising; ++ u32 eee_broken_modes; ++ int link_timeout; ++ int irq; ++ void *priv; ++ struct work_struct phy_queue; ++ struct delayed_work state_queue; ++ struct mutex lock; ++ struct phylink *phylink; ++ struct net_device *attached_dev; ++ u8 mdix; ++ u8 mdix_ctrl; ++ void (*phy_link_change)(struct phy_device *, bool, bool); ++ void (*adjust_link)(struct net_device *); ++}; ++ ++struct mii_bus { ++ struct module *owner; ++ const char *name; ++ char id[61]; ++ void *priv; ++ int (*read)(struct mii_bus *, int, int); ++ int (*write)(struct mii_bus *, int, int, u16); ++ int (*reset)(struct mii_bus *); ++ struct mutex mdio_lock; ++ struct device *parent; ++ enum { ++ MDIOBUS_ALLOCATED = 1, ++ MDIOBUS_REGISTERED = 2, ++ MDIOBUS_UNREGISTERED = 3, ++ MDIOBUS_RELEASED = 4, ++ } state; ++ struct device dev; ++ struct mdio_device *mdio_map[32]; ++ u32 phy_mask; ++ u32 phy_ignore_ta_mask; ++ int irq[32]; ++ int reset_delay_us; ++ struct gpio_desc___2 *reset_gpiod; ++}; ++ ++struct mdio_driver_common { ++ struct device_driver driver; ++ int flags; ++}; ++ ++struct phy_driver { ++ struct mdio_driver_common mdiodrv; ++ u32 phy_id; ++ char *name; ++ u32 phy_id_mask; ++ u32 features; ++ u32 flags; ++ const void *driver_data; ++ int (*soft_reset)(struct phy_device *); ++ int (*config_init)(struct phy_device *); ++ int (*probe)(struct phy_device *); ++ int (*suspend)(struct phy_device *); ++ int (*resume)(struct phy_device *); ++ int (*config_aneg)(struct phy_device *); ++ int (*aneg_done)(struct phy_device *); ++ int (*read_status)(struct phy_device *); ++ int (*ack_interrupt)(struct phy_device *); ++ int (*config_intr)(struct phy_device *); ++ int (*did_interrupt)(struct phy_device *); ++ void (*remove)(struct phy_device *); ++ int (*match_phy_device)(struct phy_device *); ++ int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); ++ int (*hwtstamp)(struct phy_device *, struct ifreq *); ++ bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); ++ void (*txtstamp)(struct phy_device *, struct sk_buff *, int); ++ int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); ++ void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); ++ void (*link_change_notify)(struct phy_device *); ++ int (*read_mmd)(struct phy_device *, int, u16); ++ int (*write_mmd)(struct phy_device *, int, u16, u16); ++ int (*read_page)(struct phy_device *); ++ int (*write_page)(struct phy_device *, int); ++ int (*module_info)(struct phy_device *, struct ethtool_modinfo *); ++ int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); ++ int (*get_sset_count)(struct phy_device *); ++ void (*get_strings)(struct phy_device *, u8 *); ++ void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); ++ int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); ++ int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); ++ int (*set_loopback)(struct phy_device *, bool); ++}; ++ ++struct property_set { ++ struct device *dev; ++ struct fwnode_handle fwnode; ++ const struct property_entry *properties; ++}; ++ ++struct cache_type_info { ++ const char *size_prop; ++ const char *line_size_props[2]; ++ const char *nr_sets_prop; ++}; ++ ++struct device_connection { ++ const char *endpoint[2]; ++ const char *id; ++ struct list_head list; ++}; ++ ++struct req { ++ struct req *next; ++ struct completion done; ++ int err; ++ const char *name; ++ umode_t mode; ++ kuid_t uid; ++ kgid_t gid; ++ struct device *dev; ++}; ++ ++typedef int (*pm_callback_t)(struct device *); ++ ++enum gpd_status { ++ GPD_STATE_ACTIVE = 0, ++ GPD_STATE_POWER_OFF = 1, ++}; ++ ++struct gpd_dev_ops { ++ int (*start)(struct device *); ++ int (*stop)(struct device *); ++}; ++ ++struct genpd_power_state { ++ s64 power_off_latency_ns; ++ s64 power_on_latency_ns; ++ s64 residency_ns; ++ struct fwnode_handle *fwnode; ++ ktime_t idle_time; ++}; ++ ++struct dev_pm_opp; ++ ++struct genpd_lock_ops; ++ ++struct generic_pm_domain { ++ struct device dev; ++ struct dev_pm_domain domain; ++ struct list_head gpd_list_node; ++ struct list_head master_links; ++ struct list_head slave_links; ++ struct list_head dev_list; ++ struct dev_power_governor *gov; ++ struct work_struct power_off_work; ++ struct fwnode_handle *provider; ++ bool has_provider; ++ const char *name; ++ atomic_t sd_count; ++ enum gpd_status status; ++ unsigned int device_count; ++ unsigned int suspended_count; ++ unsigned int prepared_count; ++ unsigned int performance_state; ++ int (*power_off)(struct generic_pm_domain *); ++ int (*power_on)(struct generic_pm_domain *); ++ unsigned int (*opp_to_performance_state)(struct generic_pm_domain *, struct dev_pm_opp *); ++ int (*set_performance_state)(struct generic_pm_domain *, unsigned int); ++ struct gpd_dev_ops dev_ops; ++ s64 max_off_time_ns; ++ bool max_off_time_changed; ++ bool cached_power_down_ok; ++ int (*attach_dev)(struct generic_pm_domain *, struct device *); ++ void (*detach_dev)(struct generic_pm_domain *, struct device *); ++ unsigned int flags; ++ struct genpd_power_state *states; ++ unsigned int state_count; ++ unsigned int state_idx; ++ void *free; ++ ktime_t on_time; ++ ktime_t accounting_time; ++ const struct genpd_lock_ops *lock_ops; ++ union { ++ struct mutex mlock; ++ struct { ++ spinlock_t slock; ++ long unsigned int lock_flags; ++ }; ++ }; ++}; ++ ++struct genpd_lock_ops { ++ void (*lock)(struct generic_pm_domain *); ++ void (*lock_nested)(struct generic_pm_domain *, int); ++ int (*lock_interruptible)(struct generic_pm_domain *); ++ void (*unlock)(struct generic_pm_domain *); ++}; ++ ++struct gpd_link { ++ struct generic_pm_domain *master; ++ struct list_head master_node; ++ struct generic_pm_domain *slave; ++ struct list_head slave_node; ++}; ++ ++struct gpd_timing_data { ++ s64 suspend_latency_ns; ++ s64 resume_latency_ns; ++ s64 effective_constraint_ns; ++ bool constraint_changed; ++ bool cached_suspend_ok; ++}; ++ ++struct generic_pm_domain_data { ++ struct pm_domain_data base; ++ struct gpd_timing_data td; ++ struct notifier_block nb; ++ unsigned int performance_state; ++ void *data; ++}; ++ ++typedef struct generic_pm_domain * (*genpd_xlate_t)(struct of_phandle_args *, void *); ++ ++struct genpd_onecell_data { ++ struct generic_pm_domain **domains; ++ unsigned int num_domains; ++ genpd_xlate_t xlate; ++}; ++ ++struct of_genpd_provider { ++ struct list_head link; ++ struct device_node *node; ++ genpd_xlate_t xlate; ++ void *data; ++}; ++ ++struct pm_clk_notifier_block { ++ struct notifier_block nb; ++ struct dev_pm_domain *pm_domain; ++ char *con_ids[0]; ++}; ++ ++enum pce_status { ++ PCE_STATUS_NONE = 0, ++ PCE_STATUS_ACQUIRED = 1, ++ PCE_STATUS_ENABLED = 2, ++ PCE_STATUS_ERROR = 3, ++}; ++ ++struct pm_clock_entry { ++ struct list_head node; ++ char *con_id; ++ struct clk *clk; ++ enum pce_status status; ++}; ++ ++struct builtin_fw { ++ char *name; ++ void *data; ++ long unsigned int size; ++}; ++ ++enum fw_opt { ++ FW_OPT_UEVENT = 1, ++ FW_OPT_NOWAIT = 2, ++ FW_OPT_USERHELPER = 4, ++ FW_OPT_NO_WARN = 8, ++ FW_OPT_NOCACHE = 16, ++ FW_OPT_NOFALLBACK = 32, ++}; ++ ++enum fw_status { ++ FW_STATUS_UNKNOWN = 0, ++ FW_STATUS_LOADING = 1, ++ FW_STATUS_DONE = 2, ++ FW_STATUS_ABORTED = 3, ++}; ++ ++struct fw_state { ++ struct completion completion; ++ enum fw_status status; ++}; ++ ++struct firmware_cache; ++ ++struct fw_priv { ++ struct kref ref; ++ struct list_head list; ++ struct firmware_cache *fwc; ++ struct fw_state fw_st; ++ void *data; ++ size_t size; ++ size_t allocated_size; ++ const char *fw_name; ++}; ++ ++struct firmware_cache { ++ spinlock_t lock; ++ struct list_head head; ++ int state; ++ spinlock_t name_lock; ++ struct list_head fw_names; ++ struct delayed_work work; ++ struct notifier_block pm_notify; ++}; ++ ++struct fw_cache_entry { ++ struct list_head list; ++ const char *name; ++}; ++ ++struct fw_name_devm { ++ long unsigned int magic; ++ const char *name; ++}; ++ ++struct firmware_work { ++ struct work_struct work; ++ struct module *module; ++ const char *name; ++ struct device *device; ++ void *context; ++ void (*cont)(const struct firmware *, void *); ++ enum fw_opt opt_flags; ++}; ++ ++typedef void (*node_registration_func_t)(struct node *); ++ ++typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); ++ ++struct node_attr { ++ struct device_attribute attr; ++ enum node_states state; ++}; ++ ++struct for_each_memory_block_cb_data { ++ walk_memory_blocks_func_t func; ++ void *arg; ++}; ++ ++enum regcache_type { ++ REGCACHE_NONE = 0, ++ REGCACHE_RBTREE = 1, ++ REGCACHE_COMPRESSED = 2, ++ REGCACHE_FLAT = 3, ++}; ++ ++struct reg_default { ++ unsigned int reg; ++ unsigned int def; ++}; ++ ++struct reg_sequence { ++ unsigned int reg; ++ unsigned int def; ++ unsigned int delay_us; ++}; ++ ++enum regmap_endian { ++ REGMAP_ENDIAN_DEFAULT = 0, ++ REGMAP_ENDIAN_BIG = 1, ++ REGMAP_ENDIAN_LITTLE = 2, ++ REGMAP_ENDIAN_NATIVE = 3, ++}; ++ ++struct regmap_range { ++ unsigned int range_min; ++ unsigned int range_max; ++}; ++ ++struct regmap_access_table { ++ const struct regmap_range *yes_ranges; ++ unsigned int n_yes_ranges; ++ const struct regmap_range *no_ranges; ++ unsigned int n_no_ranges; ++}; ++ ++typedef void (*regmap_lock)(void *); ++ ++typedef void (*regmap_unlock)(void *); ++ ++struct regmap_range_cfg; ++ ++struct regmap_config { ++ const char *name; ++ int reg_bits; ++ int reg_stride; ++ int pad_bits; ++ int val_bits; ++ bool (*writeable_reg)(struct device *, unsigned int); ++ bool (*readable_reg)(struct device *, unsigned int); ++ bool (*volatile_reg)(struct device *, unsigned int); ++ bool (*precious_reg)(struct device *, unsigned int); ++ bool (*readable_noinc_reg)(struct device *, unsigned int); ++ bool disable_locking; ++ regmap_lock lock; ++ regmap_unlock unlock; ++ void *lock_arg; ++ int (*reg_read)(void *, unsigned int, unsigned int *); ++ int (*reg_write)(void *, unsigned int, unsigned int); ++ bool fast_io; ++ unsigned int max_register; ++ const struct regmap_access_table *wr_table; ++ const struct regmap_access_table *rd_table; ++ const struct regmap_access_table *volatile_table; ++ const struct regmap_access_table *precious_table; ++ const struct regmap_access_table *rd_noinc_table; ++ const struct reg_default *reg_defaults; ++ unsigned int num_reg_defaults; ++ enum regcache_type cache_type; ++ const void *reg_defaults_raw; ++ unsigned int num_reg_defaults_raw; ++ long unsigned int read_flag_mask; ++ long unsigned int write_flag_mask; ++ bool zero_flag_mask; ++ bool use_single_rw; ++ bool can_multi_write; ++ enum regmap_endian reg_format_endian; ++ enum regmap_endian val_format_endian; ++ const struct regmap_range_cfg *ranges; ++ unsigned int num_ranges; ++ bool use_hwlock; ++ unsigned int hwlock_id; ++ unsigned int hwlock_mode; ++}; ++ ++struct regmap_range_cfg { ++ const char *name; ++ unsigned int range_min; ++ unsigned int range_max; ++ unsigned int selector_reg; ++ unsigned int selector_mask; ++ int selector_shift; ++ unsigned int window_start; ++ unsigned int window_len; ++}; ++ ++typedef int (*regmap_hw_write)(void *, const void *, size_t); ++ ++typedef int (*regmap_hw_gather_write)(void *, const void *, size_t, const void *, size_t); ++ ++struct regmap_async; ++ ++typedef int (*regmap_hw_async_write)(void *, const void *, size_t, const void *, size_t, struct regmap_async *); ++ ++struct regmap___2; ++ ++struct regmap_async { ++ struct list_head list; ++ struct regmap___2 *map; ++ void *work_buf; ++}; ++ ++typedef int (*regmap_hw_read)(void *, const void *, size_t, void *, size_t); ++ ++typedef int (*regmap_hw_reg_read)(void *, unsigned int, unsigned int *); ++ ++typedef int (*regmap_hw_reg_write)(void *, unsigned int, unsigned int); ++ ++typedef int (*regmap_hw_reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); ++ ++typedef struct regmap_async * (*regmap_hw_async_alloc)(); ++ ++typedef void (*regmap_hw_free_context)(void *); ++ ++struct regmap_bus { ++ bool fast_io; ++ regmap_hw_write write; ++ regmap_hw_gather_write gather_write; ++ regmap_hw_async_write async_write; ++ regmap_hw_reg_write reg_write; ++ regmap_hw_reg_update_bits reg_update_bits; ++ regmap_hw_read read; ++ regmap_hw_reg_read reg_read; ++ regmap_hw_free_context free_context; ++ regmap_hw_async_alloc async_alloc; ++ u8 read_flag_mask; ++ enum regmap_endian reg_format_endian_default; ++ enum regmap_endian val_format_endian_default; ++ size_t max_raw_read; ++ size_t max_raw_write; ++}; ++ ++struct reg_field { ++ unsigned int reg; ++ unsigned int lsb; ++ unsigned int msb; ++ unsigned int id_size; ++ unsigned int id_offset; ++}; ++ ++struct regmap_format { ++ size_t buf_size; ++ size_t reg_bytes; ++ size_t pad_bytes; ++ size_t val_bytes; ++ void (*format_write)(struct regmap___2 *, unsigned int, unsigned int); ++ void (*format_reg)(void *, unsigned int, unsigned int); ++ void (*format_val)(void *, unsigned int, unsigned int); ++ unsigned int (*parse_val)(const void *); ++ void (*parse_inplace)(void *); ++}; ++ ++struct hwspinlock; ++ ++struct regcache_ops; ++ ++struct regmap___2 { ++ union { ++ struct mutex mutex; ++ struct { ++ spinlock_t spinlock; ++ long unsigned int spinlock_flags; ++ }; ++ }; ++ regmap_lock lock; ++ regmap_unlock unlock; ++ void *lock_arg; ++ gfp_t alloc_flags; ++ struct device *dev; ++ void *work_buf; ++ struct regmap_format format; ++ const struct regmap_bus *bus; ++ void *bus_context; ++ const char *name; ++ bool async; ++ spinlock_t async_lock; ++ wait_queue_head_t async_waitq; ++ struct list_head async_list; ++ struct list_head async_free; ++ int async_ret; ++ bool debugfs_disable; ++ struct dentry *debugfs; ++ const char *debugfs_name; ++ unsigned int debugfs_reg_len; ++ unsigned int debugfs_val_len; ++ unsigned int debugfs_tot_len; ++ struct list_head debugfs_off_cache; ++ struct mutex cache_lock; ++ unsigned int max_register; ++ bool (*writeable_reg)(struct device *, unsigned int); ++ bool (*readable_reg)(struct device *, unsigned int); ++ bool (*volatile_reg)(struct device *, unsigned int); ++ bool (*precious_reg)(struct device *, unsigned int); ++ bool (*readable_noinc_reg)(struct device *, unsigned int); ++ const struct regmap_access_table *wr_table; ++ const struct regmap_access_table *rd_table; ++ const struct regmap_access_table *volatile_table; ++ const struct regmap_access_table *precious_table; ++ const struct regmap_access_table *rd_noinc_table; ++ int (*reg_read)(void *, unsigned int, unsigned int *); ++ int (*reg_write)(void *, unsigned int, unsigned int); ++ int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); ++ bool defer_caching; ++ long unsigned int read_flag_mask; ++ long unsigned int write_flag_mask; ++ int reg_shift; ++ int reg_stride; ++ int reg_stride_order; ++ const struct regcache_ops *cache_ops; ++ enum regcache_type cache_type; ++ unsigned int cache_size_raw; ++ unsigned int cache_word_size; ++ unsigned int num_reg_defaults; ++ unsigned int num_reg_defaults_raw; ++ bool cache_only; ++ bool cache_bypass; ++ bool cache_free; ++ struct reg_default *reg_defaults; ++ const void *reg_defaults_raw; ++ void *cache; ++ bool cache_dirty; ++ bool no_sync_defaults; ++ struct reg_sequence *patch; ++ int patch_regs; ++ bool use_single_read; ++ bool use_single_write; ++ bool can_multi_write; ++ size_t max_raw_read; ++ size_t max_raw_write; ++ struct rb_root range_tree; ++ void *selector_work_buf; ++ struct hwspinlock *hwlock; ++}; ++ ++struct regcache_ops { ++ const char *name; ++ enum regcache_type type; ++ int (*init)(struct regmap___2 *); ++ int (*exit)(struct regmap___2 *); ++ void (*debugfs_init)(struct regmap___2 *); ++ int (*read)(struct regmap___2 *, unsigned int, unsigned int *); ++ int (*write)(struct regmap___2 *, unsigned int, unsigned int); ++ int (*sync)(struct regmap___2 *, unsigned int, unsigned int); ++ int (*drop)(struct regmap___2 *, unsigned int, unsigned int); ++}; ++ ++struct regmap_range_node { ++ struct rb_node node; ++ const char *name; ++ struct regmap___2 *map; ++ unsigned int range_min; ++ unsigned int range_max; ++ unsigned int selector_reg; ++ unsigned int selector_mask; ++ int selector_shift; ++ unsigned int window_start; ++ unsigned int window_len; ++}; ++ ++struct regmap_field { ++ struct regmap___2 *regmap; ++ unsigned int mask; ++ unsigned int shift; ++ unsigned int reg; ++ unsigned int id_size; ++ unsigned int id_offset; ++}; ++ ++struct trace_event_raw_regmap_reg { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ unsigned int reg; ++ unsigned int val; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_regmap_block { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ unsigned int reg; ++ int count; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_regcache_sync { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ u32 __data_loc_status; ++ u32 __data_loc_type; ++ int type; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_regmap_bool { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ int flag; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_regmap_async { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_regcache_drop_region { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ unsigned int from; ++ unsigned int to; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_regmap_reg { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_regmap_block { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_regcache_sync { ++ u32 name; ++ u32 status; ++ u32 type; ++}; ++ ++struct trace_event_data_offsets_regmap_bool { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_regmap_async { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_regcache_drop_region { ++ u32 name; ++}; ++ ++struct regcache_rbtree_node { ++ void *block; ++ long int *cache_present; ++ unsigned int base_reg; ++ unsigned int blklen; ++ struct rb_node node; ++}; ++ ++struct regcache_rbtree_ctx { ++ struct rb_root root; ++ struct regcache_rbtree_node *cached_rbnode; ++}; ++ ++struct regmap_debugfs_off_cache { ++ struct list_head list; ++ off_t min; ++ off_t max; ++ unsigned int base_reg; ++ unsigned int max_reg; ++}; ++ ++struct regmap_debugfs_node { ++ struct regmap___2 *map; ++ const char *name; ++ struct list_head link; ++}; ++ ++struct spi_statistics { ++ spinlock_t lock; ++ long unsigned int messages; ++ long unsigned int transfers; ++ long unsigned int errors; ++ long unsigned int timedout; ++ long unsigned int spi_sync; ++ long unsigned int spi_sync_immediate; ++ long unsigned int spi_async; ++ long long unsigned int bytes; ++ long long unsigned int bytes_rx; ++ long long unsigned int bytes_tx; ++ long unsigned int transfer_bytes_histo[17]; ++ long unsigned int transfers_split_maxsize; ++}; ++ ++struct spi_controller; ++ ++struct spi_device { ++ struct device dev; ++ struct spi_controller *controller; ++ struct spi_controller *master; ++ u32 max_speed_hz; ++ u8 chip_select; ++ u8 bits_per_word; ++ u16 mode; ++ int irq; ++ void *controller_state; ++ void *controller_data; ++ char modalias[32]; ++ int cs_gpio; ++ struct spi_statistics statistics; ++}; ++ ++struct spi_message; ++ ++struct spi_transfer; ++ ++struct spi_controller_mem_ops; ++ ++struct spi_controller { ++ struct device dev; ++ struct list_head list; ++ s16 bus_num; ++ u16 num_chipselect; ++ u16 dma_alignment; ++ u16 mode_bits; ++ u32 bits_per_word_mask; ++ u32 min_speed_hz; ++ u32 max_speed_hz; ++ u16 flags; ++ bool slave; ++ size_t (*max_transfer_size)(struct spi_device *); ++ size_t (*max_message_size)(struct spi_device *); ++ struct mutex io_mutex; ++ spinlock_t bus_lock_spinlock; ++ struct mutex bus_lock_mutex; ++ bool bus_lock_flag; ++ int (*setup)(struct spi_device *); ++ int (*transfer)(struct spi_device *, struct spi_message *); ++ void (*cleanup)(struct spi_device *); ++ bool (*can_dma)(struct spi_controller *, struct spi_device *, struct spi_transfer *); ++ bool queued; ++ struct kthread_worker kworker; ++ struct task_struct *kworker_task; ++ struct kthread_work pump_messages; ++ spinlock_t queue_lock; ++ struct list_head queue; ++ struct spi_message *cur_msg; ++ bool idling; ++ bool busy; ++ bool running; ++ bool rt; ++ bool auto_runtime_pm; ++ bool cur_msg_prepared; ++ bool cur_msg_mapped; ++ struct completion xfer_completion; ++ size_t max_dma_len; ++ int (*prepare_transfer_hardware)(struct spi_controller *); ++ int (*transfer_one_message)(struct spi_controller *, struct spi_message *); ++ int (*unprepare_transfer_hardware)(struct spi_controller *); ++ int (*prepare_message)(struct spi_controller *, struct spi_message *); ++ int (*unprepare_message)(struct spi_controller *, struct spi_message *); ++ int (*slave_abort)(struct spi_controller *); ++ void (*set_cs)(struct spi_device *, bool); ++ int (*transfer_one)(struct spi_controller *, struct spi_device *, struct spi_transfer *); ++ void (*handle_err)(struct spi_controller *, struct spi_message *); ++ const struct spi_controller_mem_ops *mem_ops; ++ int *cs_gpios; ++ struct spi_statistics statistics; ++ struct dma_chan *dma_tx; ++ struct dma_chan *dma_rx; ++ void *dummy_rx; ++ void *dummy_tx; ++ int (*fw_translate_cs)(struct spi_controller *, unsigned int); ++}; ++ ++struct spi_message { ++ struct list_head transfers; ++ struct spi_device *spi; ++ unsigned int is_dma_mapped: 1; ++ void (*complete)(void *); ++ void *context; ++ unsigned int frame_length; ++ unsigned int actual_length; ++ int status; ++ struct list_head queue; ++ void *state; ++ struct list_head resources; ++}; ++ ++struct spi_transfer { ++ const void *tx_buf; ++ void *rx_buf; ++ unsigned int len; ++ dma_addr_t tx_dma; ++ dma_addr_t rx_dma; ++ struct sg_table tx_sg; ++ struct sg_table rx_sg; ++ unsigned int cs_change: 1; ++ unsigned int tx_nbits: 3; ++ unsigned int rx_nbits: 3; ++ u8 bits_per_word; ++ u16 delay_usecs; ++ u32 speed_hz; ++ struct list_head transfer_list; ++}; ++ ++struct spi_mem; ++ ++struct spi_mem_op; ++ ++struct spi_controller_mem_ops { ++ int (*adjust_op_size)(struct spi_mem *, struct spi_mem_op *); ++ bool (*supports_op)(struct spi_mem *, const struct spi_mem_op *); ++ int (*exec_op)(struct spi_mem *, const struct spi_mem_op *); ++ const char * (*get_name)(struct spi_mem *); ++}; ++ ++struct regmap_async_spi { ++ struct regmap_async core; ++ struct spi_message m; ++ struct spi_transfer t[2]; ++}; ++ ++struct regmap_mmio_context { ++ void *regs; ++ unsigned int val_bytes; ++ bool attached_clk; ++ struct clk *clk; ++ void (*reg_write)(struct regmap_mmio_context *, unsigned int, unsigned int); ++ unsigned int (*reg_read)(struct regmap_mmio_context *, unsigned int); ++}; ++ ++typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); ++ ++struct platform_msi_priv_data { ++ struct device *dev; ++ void *host_data; ++ msi_alloc_info_t arg; ++ irq_write_msi_msg_t write_msg; ++ int devid; ++}; ++ ++struct test_struct { ++ char *get; ++ char *put; ++ void (*get_handler)(char *); ++ int (*put_handler)(char *, char *); ++}; ++ ++struct test_state { ++ char *name; ++ struct test_struct *tst; ++ int idx; ++ int (*run_test)(int, int); ++ int (*validate_put)(char *); ++}; ++ ++struct vexpress_syscfg { ++ struct device *dev; ++ void *base; ++ struct list_head funcs; ++}; ++ ++struct vexpress_syscfg_func { ++ struct list_head list; ++ struct vexpress_syscfg *syscfg; ++ struct regmap *regmap; ++ int num_templates; ++ u32 template[0]; ++}; ++ ++struct syscon_platform_data { ++ const char *label; ++}; ++ ++struct syscon { ++ struct device_node *np; ++ struct regmap *regmap; ++ struct list_head list; ++}; ++ ++struct dax_device___2; ++ ++struct dax_operations { ++ long int (*direct_access)(struct dax_device___2 *, long unsigned int, long int, void **, pfn_t *); ++ size_t (*copy_from_iter)(struct dax_device___2 *, long unsigned int, void *, size_t, struct iov_iter *); ++ size_t (*copy_to_iter)(struct dax_device___2 *, long unsigned int, void *, size_t, struct iov_iter *); ++}; ++ ++struct dax_device___2 { ++ struct hlist_node list; ++ struct inode inode; ++ struct cdev cdev; ++ const char *host; ++ void *private; ++ long unsigned int flags; ++ const struct dax_operations *ops; ++}; ++ ++enum dax_device_flags { ++ DAXDEV_ALIVE = 0, ++ DAXDEV_WRITE_CACHE = 1, ++}; ++ ++struct dma_buf_sync { ++ __u64 flags; ++}; ++ ++struct dma_buf_list { ++ struct list_head head; ++ struct mutex lock; ++}; ++ ++struct trace_event_raw_dma_fence { ++ struct trace_entry ent; ++ u32 __data_loc_driver; ++ u32 __data_loc_timeline; ++ unsigned int context; ++ unsigned int seqno; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_dma_fence { ++ u32 driver; ++ u32 timeline; ++}; ++ ++struct default_wait_cb { ++ struct dma_fence_cb base; ++ struct task_struct *task; ++}; ++ ++struct dma_fence_array; ++ ++struct dma_fence_array_cb { ++ struct dma_fence_cb cb; ++ struct dma_fence_array *array; ++}; ++ ++struct dma_fence_array { ++ struct dma_fence base; ++ spinlock_t lock; ++ unsigned int num_fences; ++ atomic_t num_pending; ++ struct dma_fence **fences; ++ struct irq_work work; ++}; ++ ++enum seqno_fence_condition { ++ SEQNO_FENCE_WAIT_GEQUAL = 0, ++ SEQNO_FENCE_WAIT_NONZERO = 1, ++}; ++ ++struct seqno_fence { ++ struct dma_fence base; ++ const struct dma_fence_ops *ops; ++ struct dma_buf *sync_buf; ++ uint32_t seqno_ofs; ++ enum seqno_fence_condition condition; ++}; ++ ++struct sync_merge_data { ++ char name[32]; ++ __s32 fd2; ++ __s32 fence; ++ __u32 flags; ++ __u32 pad; ++}; ++ ++struct sync_fence_info { ++ char obj_name[32]; ++ char driver_name[32]; ++ __s32 status; ++ __u32 flags; ++ __u64 timestamp_ns; ++}; ++ ++struct sync_file_info { ++ char name[32]; ++ __s32 status; ++ __u32 flags; ++ __u32 num_fences; ++ __u32 pad; ++ __u64 sync_fence_info; ++}; ++ ++struct scsi_sense_hdr { ++ u8 response_code; ++ u8 sense_key; ++ u8 asc; ++ u8 ascq; ++ u8 byte4; ++ u8 byte5; ++ u8 byte6; ++ u8 additional_length; ++}; ++ ++typedef __u64 blist_flags_t; ++ ++enum scsi_device_state { ++ SDEV_CREATED = 1, ++ SDEV_RUNNING = 2, ++ SDEV_CANCEL = 3, ++ SDEV_DEL = 4, ++ SDEV_QUIESCE = 5, ++ SDEV_OFFLINE = 6, ++ SDEV_TRANSPORT_OFFLINE = 7, ++ SDEV_BLOCK = 8, ++ SDEV_CREATED_BLOCK = 9, ++}; ++ ++struct scsi_vpd { ++ struct callback_head rcu; ++ int len; ++ unsigned char data[0]; ++}; ++ ++struct Scsi_Host; ++ ++struct scsi_target; ++ ++struct scsi_device_handler; ++ ++struct scsi_device { ++ struct Scsi_Host *host; ++ struct request_queue *request_queue; ++ struct list_head siblings; ++ struct list_head same_target_siblings; ++ atomic_t device_busy; ++ atomic_t device_blocked; ++ spinlock_t list_lock; ++ struct list_head cmd_list; ++ struct list_head starved_entry; ++ short unsigned int queue_depth; ++ short unsigned int max_queue_depth; ++ short unsigned int last_queue_full_depth; ++ short unsigned int last_queue_full_count; ++ long unsigned int last_queue_full_time; ++ long unsigned int queue_ramp_up_period; ++ long unsigned int last_queue_ramp_up; ++ unsigned int id; ++ unsigned int channel; ++ u64 lun; ++ unsigned int manufacturer; ++ unsigned int sector_size; ++ void *hostdata; ++ unsigned char type; ++ char scsi_level; ++ char inq_periph_qual; ++ struct mutex inquiry_mutex; ++ unsigned char inquiry_len; ++ unsigned char *inquiry; ++ const char *vendor; ++ const char *model; ++ const char *rev; ++ struct scsi_vpd *vpd_pg83; ++ struct scsi_vpd *vpd_pg80; ++ unsigned char current_tag; ++ struct scsi_target *sdev_target; ++ blist_flags_t sdev_bflags; ++ unsigned int eh_timeout; ++ unsigned int removable: 1; ++ unsigned int changed: 1; ++ unsigned int busy: 1; ++ unsigned int lockable: 1; ++ unsigned int locked: 1; ++ unsigned int borken: 1; ++ unsigned int disconnect: 1; ++ unsigned int soft_reset: 1; ++ unsigned int sdtr: 1; ++ unsigned int wdtr: 1; ++ unsigned int ppr: 1; ++ unsigned int tagged_supported: 1; ++ unsigned int simple_tags: 1; ++ unsigned int was_reset: 1; ++ unsigned int expecting_cc_ua: 1; ++ unsigned int use_10_for_rw: 1; ++ unsigned int use_10_for_ms: 1; ++ unsigned int no_report_opcodes: 1; ++ unsigned int no_write_same: 1; ++ unsigned int use_16_for_rw: 1; ++ unsigned int skip_ms_page_8: 1; ++ unsigned int skip_ms_page_3f: 1; ++ unsigned int skip_vpd_pages: 1; ++ unsigned int try_vpd_pages: 1; ++ unsigned int use_192_bytes_for_3f: 1; ++ unsigned int no_start_on_add: 1; ++ unsigned int allow_restart: 1; ++ unsigned int manage_start_stop: 1; ++ unsigned int start_stop_pwr_cond: 1; ++ unsigned int no_uld_attach: 1; ++ unsigned int select_no_atn: 1; ++ unsigned int fix_capacity: 1; ++ unsigned int guess_capacity: 1; ++ unsigned int retry_hwerror: 1; ++ unsigned int last_sector_bug: 1; ++ unsigned int no_read_disc_info: 1; ++ unsigned int no_read_capacity_16: 1; ++ unsigned int try_rc_10_first: 1; ++ unsigned int security_supported: 1; ++ unsigned int is_visible: 1; ++ unsigned int wce_default_on: 1; ++ unsigned int no_dif: 1; ++ unsigned int broken_fua: 1; ++ unsigned int lun_in_cdb: 1; ++ unsigned int unmap_limit_for_ws: 1; ++ atomic_t disk_events_disable_depth; ++ long unsigned int supported_events[1]; ++ long unsigned int pending_events[1]; ++ struct list_head event_list; ++ struct work_struct event_work; ++ unsigned int max_device_blocked; ++ atomic_t iorequest_cnt; ++ atomic_t iodone_cnt; ++ atomic_t ioerr_cnt; ++ struct device sdev_gendev; ++ struct device sdev_dev; ++ struct execute_work ew; ++ struct work_struct requeue_work; ++ struct scsi_device_handler *handler; ++ void *handler_data; ++ unsigned char access_state; ++ struct mutex state_mutex; ++ enum scsi_device_state sdev_state; ++ struct task_struct *quiesced_by; ++ long unsigned int offline_already; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int sdev_data[0]; ++}; ++ ++enum scsi_host_state { ++ SHOST_CREATED = 1, ++ SHOST_RUNNING = 2, ++ SHOST_CANCEL = 3, ++ SHOST_DEL = 4, ++ SHOST_RECOVERY = 5, ++ SHOST_CANCEL_RECOVERY = 6, ++ SHOST_DEL_RECOVERY = 7, ++}; ++ ++struct scsi_host_template; ++ ++struct scsi_transport_template; ++ ++struct Scsi_Host { ++ struct list_head __devices; ++ struct list_head __targets; ++ struct list_head starved_list; ++ spinlock_t default_lock; ++ spinlock_t *host_lock; ++ struct mutex scan_mutex; ++ struct list_head eh_cmd_q; ++ struct task_struct *ehandler; ++ struct completion *eh_action; ++ wait_queue_head_t host_wait; ++ struct scsi_host_template *hostt; ++ struct scsi_transport_template *transportt; ++ union { ++ struct blk_queue_tag *bqt; ++ struct blk_mq_tag_set tag_set; ++ }; ++ atomic_t host_busy; ++ atomic_t host_blocked; ++ unsigned int host_failed; ++ unsigned int host_eh_scheduled; ++ unsigned int host_no; ++ int eh_deadline; ++ long unsigned int last_reset; ++ unsigned int max_channel; ++ unsigned int max_id; ++ u64 max_lun; ++ unsigned int unique_id; ++ short unsigned int max_cmd_len; ++ int this_id; ++ int can_queue; ++ short int cmd_per_lun; ++ short unsigned int sg_tablesize; ++ short unsigned int sg_prot_tablesize; ++ unsigned int max_sectors; ++ long unsigned int dma_boundary; ++ unsigned int nr_hw_queues; ++ long unsigned int cmd_serial_number; ++ unsigned int active_mode: 2; ++ unsigned int unchecked_isa_dma: 1; ++ unsigned int use_clustering: 1; ++ unsigned int host_self_blocked: 1; ++ unsigned int reverse_ordering: 1; ++ unsigned int tmf_in_progress: 1; ++ unsigned int async_scan: 1; ++ unsigned int eh_noresume: 1; ++ unsigned int no_write_same: 1; ++ unsigned int use_blk_mq: 1; ++ unsigned int use_cmd_list: 1; ++ unsigned int short_inquiry: 1; ++ char work_q_name[20]; ++ struct workqueue_struct *work_q; ++ struct workqueue_struct *tmf_work_q; ++ unsigned int no_scsi2_lun_in_cdb: 1; ++ unsigned int max_host_blocked; ++ unsigned int prot_capabilities; ++ unsigned char prot_guard_type; ++ long unsigned int base; ++ long unsigned int io_port; ++ unsigned char n_io_port; ++ unsigned char dma_channel; ++ unsigned int irq; ++ enum scsi_host_state shost_state; ++ struct device shost_gendev; ++ struct device shost_dev; ++ void *shost_data; ++ struct device *dma_dev; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int kabi_reserved5; ++ long unsigned int kabi_reserved6; ++ long unsigned int hostdata[0]; ++}; ++ ++enum scsi_target_state { ++ STARGET_CREATED = 1, ++ STARGET_RUNNING = 2, ++ STARGET_REMOVE = 3, ++ STARGET_CREATED_REMOVE = 4, ++ STARGET_DEL = 5, ++}; ++ ++struct scsi_target { ++ struct scsi_device *starget_sdev_user; ++ struct list_head siblings; ++ struct list_head devices; ++ struct device dev; ++ struct kref reap_ref; ++ unsigned int channel; ++ unsigned int id; ++ unsigned int create: 1; ++ unsigned int single_lun: 1; ++ unsigned int pdt_1f_for_no_lun: 1; ++ unsigned int no_report_luns: 1; ++ unsigned int expecting_lun_change: 1; ++ atomic_t target_busy; ++ atomic_t target_blocked; ++ unsigned int can_queue; ++ unsigned int max_target_blocked; ++ char scsi_level; ++ enum scsi_target_state state; ++ void *hostdata; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++ long unsigned int starget_data[0]; ++}; ++ ++struct scsi_data_buffer { ++ struct sg_table table; ++ unsigned int length; ++ int resid; ++}; ++ ++struct scsi_pointer { ++ char *ptr; ++ int this_residual; ++ struct scatterlist *buffer; ++ int buffers_residual; ++ dma_addr_t dma_handle; ++ volatile int Status; ++ volatile int Message; ++ volatile int have_data_in; ++ volatile int sent_command; ++ volatile int phase; ++}; ++ ++struct scsi_cmnd { ++ struct scsi_request req; ++ struct scsi_device *device; ++ struct list_head list; ++ struct list_head eh_entry; ++ struct delayed_work abort_work; ++ struct callback_head rcu; ++ int eh_eflags; ++ long unsigned int serial_number; ++ long unsigned int jiffies_at_alloc; ++ int retries; ++ int allowed; ++ unsigned char prot_op; ++ unsigned char prot_type; ++ unsigned char prot_flags; ++ short unsigned int cmd_len; ++ enum dma_data_direction sc_data_direction; ++ unsigned char *cmnd; ++ struct scsi_data_buffer sdb; ++ struct scsi_data_buffer *prot_sdb; ++ unsigned int underflow; ++ unsigned int transfersize; ++ struct request *request; ++ unsigned char *sense_buffer; ++ void (*scsi_done)(struct scsi_cmnd *); ++ struct scsi_pointer SCp; ++ unsigned char *host_scribble; ++ int result; ++ int flags; ++ unsigned char tag; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++enum scsi_prot_operations { ++ SCSI_PROT_NORMAL = 0, ++ SCSI_PROT_READ_INSERT = 1, ++ SCSI_PROT_WRITE_STRIP = 2, ++ SCSI_PROT_READ_STRIP = 3, ++ SCSI_PROT_WRITE_INSERT = 4, ++ SCSI_PROT_READ_PASS = 5, ++ SCSI_PROT_WRITE_PASS = 6, ++}; ++ ++struct scsi_driver { ++ struct device_driver gendrv; ++ void (*rescan)(struct device *); ++ int (*init_command)(struct scsi_cmnd *); ++ void (*uninit_command)(struct scsi_cmnd *); ++ int (*done)(struct scsi_cmnd *); ++ int (*eh_action)(struct scsi_cmnd *, int); ++ void (*eh_reset)(struct scsi_cmnd *); ++}; ++ ++struct scsi_host_cmd_pool; ++ ++struct scsi_host_template { ++ struct module *module; ++ const char *name; ++ const char * (*info)(struct Scsi_Host *); ++ int (*ioctl)(struct scsi_device *, unsigned int, void *); ++ int (*compat_ioctl)(struct scsi_device *, unsigned int, void *); ++ int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); ++ int (*eh_abort_handler)(struct scsi_cmnd *); ++ int (*eh_device_reset_handler)(struct scsi_cmnd *); ++ int (*eh_target_reset_handler)(struct scsi_cmnd *); ++ int (*eh_bus_reset_handler)(struct scsi_cmnd *); ++ int (*eh_host_reset_handler)(struct scsi_cmnd *); ++ int (*slave_alloc)(struct scsi_device *); ++ int (*slave_configure)(struct scsi_device *); ++ void (*slave_destroy)(struct scsi_device *); ++ int (*target_alloc)(struct scsi_target *); ++ void (*target_destroy)(struct scsi_target *); ++ int (*scan_finished)(struct Scsi_Host *, long unsigned int); ++ void (*scan_start)(struct Scsi_Host *); ++ int (*change_queue_depth)(struct scsi_device *, int); ++ int (*map_queues)(struct Scsi_Host *); ++ int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *); ++ void (*unlock_native_capacity)(struct scsi_device *); ++ int (*show_info)(struct seq_file *, struct Scsi_Host *); ++ int (*write_info)(struct Scsi_Host *, char *, int); ++ enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); ++ int (*host_reset)(struct Scsi_Host *, int); ++ const char *proc_name; ++ struct proc_dir_entry *proc_dir; ++ int can_queue; ++ int this_id; ++ short unsigned int sg_tablesize; ++ short unsigned int sg_prot_tablesize; ++ unsigned int max_sectors; ++ long unsigned int dma_boundary; ++ short int cmd_per_lun; ++ unsigned char present; ++ int tag_alloc_policy; ++ unsigned int track_queue_depth: 1; ++ unsigned int supported_mode: 2; ++ unsigned int unchecked_isa_dma: 1; ++ unsigned int use_clustering: 1; ++ unsigned int emulated: 1; ++ unsigned int skip_settle_delay: 1; ++ unsigned int no_write_same: 1; ++ unsigned int force_blk_mq: 1; ++ unsigned int max_host_blocked; ++ struct device_attribute **shost_attrs; ++ struct device_attribute **sdev_attrs; ++ const struct attribute_group **sdev_groups; ++ u64 vendor_id; ++ unsigned int cmd_size; ++ struct scsi_host_cmd_pool *cmd_pool; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct trace_event_raw_scsi_dispatch_cmd_start { ++ struct trace_entry ent; ++ unsigned int host_no; ++ unsigned int channel; ++ unsigned int id; ++ unsigned int lun; ++ unsigned int opcode; ++ unsigned int cmd_len; ++ unsigned int data_sglen; ++ unsigned int prot_sglen; ++ unsigned char prot_op; ++ u32 __data_loc_cmnd; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_scsi_dispatch_cmd_error { ++ struct trace_entry ent; ++ unsigned int host_no; ++ unsigned int channel; ++ unsigned int id; ++ unsigned int lun; ++ int rtn; ++ unsigned int opcode; ++ unsigned int cmd_len; ++ unsigned int data_sglen; ++ unsigned int prot_sglen; ++ unsigned char prot_op; ++ u32 __data_loc_cmnd; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_scsi_cmd_done_timeout_template { ++ struct trace_entry ent; ++ unsigned int host_no; ++ unsigned int channel; ++ unsigned int id; ++ unsigned int lun; ++ int result; ++ unsigned int opcode; ++ unsigned int cmd_len; ++ unsigned int data_sglen; ++ unsigned int prot_sglen; ++ unsigned char prot_op; ++ u32 __data_loc_cmnd; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_scsi_eh_wakeup { ++ struct trace_entry ent; ++ unsigned int host_no; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_scsi_dispatch_cmd_start { ++ u32 cmnd; ++}; ++ ++struct trace_event_data_offsets_scsi_dispatch_cmd_error { ++ u32 cmnd; ++}; ++ ++struct trace_event_data_offsets_scsi_cmd_done_timeout_template { ++ u32 cmnd; ++}; ++ ++struct trace_event_data_offsets_scsi_eh_wakeup {}; ++ ++struct scsi_transport_template { ++ struct transport_container host_attrs; ++ struct transport_container target_attrs; ++ struct transport_container device_attrs; ++ int (*user_scan)(struct Scsi_Host *, uint, uint, u64); ++ int device_size; ++ int device_private_offset; ++ int target_size; ++ int target_private_offset; ++ int host_size; ++ unsigned int create_work_queue: 1; ++ void (*eh_strategy_handler)(struct Scsi_Host *); ++}; ++ ++struct scsi_idlun { ++ __u32 dev_id; ++ __u32 host_unique_id; ++}; ++ ++typedef void (*activate_complete)(void *, int); ++ ++struct scsi_device_handler { ++ struct list_head list; ++ struct module *module; ++ const char *name; ++ int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); ++ int (*attach)(struct scsi_device *); ++ void (*detach)(struct scsi_device *); ++ int (*activate)(struct scsi_device *, activate_complete, void *); ++ int (*prep_fn)(struct scsi_device *, struct request *); ++ int (*set_params)(struct scsi_device *, const char *); ++ void (*rescan)(struct scsi_device *); ++}; ++ ++struct scsi_eh_save { ++ int result; ++ unsigned int resid_len; ++ int eh_eflags; ++ enum dma_data_direction data_direction; ++ unsigned int underflow; ++ unsigned char cmd_len; ++ unsigned char prot_op; ++ unsigned char *cmnd; ++ struct scsi_data_buffer sdb; ++ struct request *next_rq; ++ unsigned char eh_cmnd[16]; ++ struct scatterlist sense_sgl; ++}; ++ ++struct scsi_varlen_cdb_hdr { ++ __u8 opcode; ++ __u8 control; ++ __u8 misc[5]; ++ __u8 additional_cdb_length; ++ __be16 service_action; ++}; ++ ++struct scsi_mode_data { ++ __u32 length; ++ __u16 block_descriptor_length; ++ __u8 medium_type; ++ __u8 device_specific; ++ __u8 header_length; ++ __u8 longlba: 1; ++}; ++ ++struct scsi_event { ++ enum scsi_device_event evt_type; ++ struct list_head node; ++}; ++ ++enum scsi_host_prot_capabilities { ++ SHOST_DIF_TYPE1_PROTECTION = 1, ++ SHOST_DIF_TYPE2_PROTECTION = 2, ++ SHOST_DIF_TYPE3_PROTECTION = 4, ++ SHOST_DIX_TYPE0_PROTECTION = 8, ++ SHOST_DIX_TYPE1_PROTECTION = 16, ++ SHOST_DIX_TYPE2_PROTECTION = 32, ++ SHOST_DIX_TYPE3_PROTECTION = 64, ++}; ++ ++enum { ++ ACTION_FAIL = 0, ++ ACTION_REPREP = 1, ++ ACTION_RETRY = 2, ++ ACTION_DELAYED_RETRY = 3, ++}; ++ ++struct value_name_pair; ++ ++struct sa_name_list { ++ int opcode; ++ const struct value_name_pair *arr; ++ int arr_sz; ++}; ++ ++struct value_name_pair { ++ int value; ++ const char *name; ++}; ++ ++struct error_info { ++ short unsigned int code12; ++ short unsigned int size; ++}; ++ ++struct error_info2 { ++ unsigned char code1; ++ unsigned char code2_min; ++ unsigned char code2_max; ++ const char *str; ++ const char *fmt; ++}; ++ ++struct scsi_lun { ++ __u8 scsi_lun[8]; ++}; ++ ++enum scsi_timeouts { ++ SCSI_DEFAULT_EH_TIMEOUT = 2500, ++}; ++ ++enum scsi_scan_mode { ++ SCSI_SCAN_INITIAL = 0, ++ SCSI_SCAN_RESCAN = 1, ++ SCSI_SCAN_MANUAL = 2, ++}; ++ ++struct async_scan_data { ++ struct list_head list; ++ struct Scsi_Host *shost; ++ struct completion prev_finished; ++}; ++ ++enum scsi_devinfo_key { ++ SCSI_DEVINFO_GLOBAL = 0, ++ SCSI_DEVINFO_SPI = 1, ++}; ++ ++struct scsi_dev_info_list { ++ struct list_head dev_info_list; ++ char vendor[8]; ++ char model[16]; ++ blist_flags_t flags; ++ unsigned int compatible; ++}; ++ ++struct scsi_dev_info_list_table { ++ struct list_head node; ++ struct list_head scsi_dev_info_list; ++ const char *name; ++ int key; ++}; ++ ++struct double_list { ++ struct list_head *top; ++ struct list_head *bottom; ++}; ++ ++struct scsi_nl_hdr { ++ uint8_t version; ++ uint8_t transport; ++ uint16_t magic; ++ uint16_t msgtype; ++ uint16_t msglen; ++}; ++ ++enum { ++ SCSI_DH_OK = 0, ++ SCSI_DH_DEV_FAILED = 1, ++ SCSI_DH_DEV_TEMP_BUSY = 2, ++ SCSI_DH_DEV_UNSUPP = 3, ++ SCSI_DH_DEVICE_MAX = 4, ++ SCSI_DH_NOTCONN = 5, ++ SCSI_DH_CONN_FAILURE = 6, ++ SCSI_DH_TRANSPORT_MAX = 7, ++ SCSI_DH_IO = 8, ++ SCSI_DH_INVALID_IO = 9, ++ SCSI_DH_RETRY = 10, ++ SCSI_DH_IMM_RETRY = 11, ++ SCSI_DH_TIMED_OUT = 12, ++ SCSI_DH_RES_TEMP_UNAVAIL = 13, ++ SCSI_DH_DEV_OFFLINED = 14, ++ SCSI_DH_NOMEM = 15, ++ SCSI_DH_NOSYS = 16, ++ SCSI_DH_DRIVER_MAX = 17, ++}; ++ ++struct scsi_dh_blist { ++ const char *vendor; ++ const char *model; ++ const char *driver; ++}; ++ ++struct rdac_mode_6_hdr { ++ u8 data_len; ++ u8 medium_type; ++ u8 device_params; ++ u8 block_desc_len; ++}; ++ ++struct rdac_mode_10_hdr { ++ u16 data_len; ++ u8 medium_type; ++ u8 device_params; ++ u16 reserved; ++ u16 block_desc_len; ++}; ++ ++struct rdac_mode_common { ++ u8 controller_serial[16]; ++ u8 alt_controller_serial[16]; ++ u8 rdac_mode[2]; ++ u8 alt_rdac_mode[2]; ++ u8 quiescence_timeout; ++ u8 rdac_options; ++}; ++ ++struct rdac_pg_legacy { ++ struct rdac_mode_6_hdr hdr; ++ u8 page_code; ++ u8 page_len; ++ struct rdac_mode_common common; ++ u8 lun_table[32]; ++ u8 reserved2[32]; ++ u8 reserved3; ++ u8 reserved4; ++}; ++ ++struct rdac_pg_expanded { ++ struct rdac_mode_10_hdr hdr; ++ u8 page_code; ++ u8 subpage_code; ++ u8 page_len[2]; ++ struct rdac_mode_common common; ++ u8 lun_table[256]; ++ u8 reserved3; ++ u8 reserved4; ++}; ++ ++struct c9_inquiry { ++ u8 peripheral_info; ++ u8 page_code; ++ u8 reserved1; ++ u8 page_len; ++ u8 page_id[4]; ++ u8 avte_cvp; ++ u8 path_prio; ++ u8 reserved2[38]; ++}; ++ ++struct c4_inquiry { ++ u8 peripheral_info; ++ u8 page_code; ++ u8 reserved1; ++ u8 page_len; ++ u8 page_id[4]; ++ u8 subsys_id[16]; ++ u8 revision[4]; ++ u8 slot_id[2]; ++ u8 reserved[2]; ++}; ++ ++struct c8_inquiry { ++ u8 peripheral_info; ++ u8 page_code; ++ u8 reserved1; ++ u8 page_len; ++ u8 page_id[4]; ++ u8 reserved2[3]; ++ u8 vol_uniq_id_len; ++ u8 vol_uniq_id[16]; ++ u8 vol_user_label_len; ++ u8 vol_user_label[60]; ++ u8 array_uniq_id_len; ++ u8 array_unique_id[16]; ++ u8 array_user_label_len; ++ u8 array_user_label[60]; ++ u8 lun[8]; ++}; ++ ++struct rdac_controller { ++ u8 array_id[16]; ++ int use_ms10; ++ struct kref kref; ++ struct list_head node; ++ union { ++ struct rdac_pg_legacy legacy; ++ struct rdac_pg_expanded expanded; ++ } mode_select; ++ u8 index; ++ u8 array_name[31]; ++ struct Scsi_Host *host; ++ spinlock_t ms_lock; ++ int ms_queued; ++ struct work_struct ms_work; ++ struct scsi_device *ms_sdev; ++ struct list_head ms_head; ++ struct list_head dh_list; ++}; ++ ++struct c2_inquiry { ++ u8 peripheral_info; ++ u8 page_code; ++ u8 reserved1; ++ u8 page_len; ++ u8 page_id[4]; ++ u8 sw_version[3]; ++ u8 sw_date[3]; ++ u8 features_enabled; ++ u8 max_lun_supported; ++ u8 partitions[239]; ++}; ++ ++struct rdac_dh_data { ++ struct list_head node; ++ struct rdac_controller *ctlr; ++ struct scsi_device *sdev; ++ unsigned int lun; ++ unsigned char mode; ++ unsigned char state; ++ char lun_state; ++ char preferred; ++ union { ++ struct c2_inquiry c2; ++ struct c4_inquiry c4; ++ struct c8_inquiry c8; ++ struct c9_inquiry c9; ++ } inq; ++}; ++ ++struct rdac_queue_data { ++ struct list_head entry; ++ struct rdac_dh_data *h; ++ activate_complete callback_fn; ++ void *callback_data; ++}; ++ ++struct hp_sw_dh_data { ++ int path_state; ++ int retries; ++ int retry_cnt; ++ struct scsi_device *sdev; ++}; ++ ++struct clariion_dh_data { ++ unsigned int flags; ++ unsigned char buffer[252]; ++ int lun_state; ++ int port; ++ int default_sp; ++ int current_sp; ++}; ++ ++struct alua_port_group { ++ struct kref kref; ++ struct callback_head rcu; ++ struct list_head node; ++ struct list_head dh_list; ++ unsigned char device_id_str[256]; ++ int device_id_len; ++ int group_id; ++ int tpgs; ++ int state; ++ int pref; ++ int valid_states; ++ unsigned int flags; ++ unsigned char transition_tmo; ++ long unsigned int expiry; ++ long unsigned int interval; ++ struct delayed_work rtpg_work; ++ spinlock_t lock; ++ struct list_head rtpg_list; ++ struct scsi_device *rtpg_sdev; ++}; ++ ++struct alua_dh_data { ++ struct list_head node; ++ struct alua_port_group *pg; ++ int group_id; ++ spinlock_t pg_lock; ++ struct scsi_device *sdev; ++ int init_error; ++ struct mutex init_mutex; ++}; ++ ++struct alua_queue_data { ++ struct list_head entry; ++ activate_complete callback_fn; ++ void *callback_data; ++}; ++ ++struct spi_device_id { ++ char name[32]; ++ kernel_ulong_t driver_data; ++}; ++ ++struct of_reconfig_data { ++ struct device_node *dn; ++ struct property *prop; ++ struct property *old_prop; ++}; ++ ++enum of_reconfig_change { ++ OF_RECONFIG_NO_CHANGE = 0, ++ OF_RECONFIG_CHANGE_ADD = 1, ++ OF_RECONFIG_CHANGE_REMOVE = 2, ++}; ++ ++struct spi_driver { ++ const struct spi_device_id *id_table; ++ int (*probe)(struct spi_device *); ++ int (*remove)(struct spi_device *); ++ void (*shutdown)(struct spi_device *); ++ struct device_driver driver; ++}; ++ ++typedef void (*spi_res_release_t)(struct spi_controller *, struct spi_message *, void *); ++ ++struct spi_res { ++ struct list_head entry; ++ spi_res_release_t release; ++ long long unsigned int data[0]; ++}; ++ ++struct spi_replaced_transfers; ++ ++typedef void (*spi_replaced_release_t)(struct spi_controller *, struct spi_message *, struct spi_replaced_transfers *); ++ ++struct spi_replaced_transfers { ++ spi_replaced_release_t release; ++ void *extradata; ++ struct list_head replaced_transfers; ++ struct list_head *replaced_after; ++ size_t inserted; ++ struct spi_transfer inserted_transfers[0]; ++}; ++ ++struct spi_board_info { ++ char modalias[32]; ++ const void *platform_data; ++ const struct property_entry *properties; ++ void *controller_data; ++ int irq; ++ u32 max_speed_hz; ++ u16 bus_num; ++ u16 chip_select; ++ u16 mode; ++}; ++ ++enum spi_mem_data_dir { ++ SPI_MEM_DATA_IN = 0, ++ SPI_MEM_DATA_OUT = 1, ++}; ++ ++struct spi_mem_op { ++ struct { ++ u8 buswidth; ++ u8 opcode; ++ } cmd; ++ struct { ++ u8 nbytes; ++ u8 buswidth; ++ u64 val; ++ } addr; ++ struct { ++ u8 nbytes; ++ u8 buswidth; ++ } dummy; ++ struct { ++ u8 buswidth; ++ enum spi_mem_data_dir dir; ++ unsigned int nbytes; ++ union { ++ void *in; ++ const void *out; ++ } buf; ++ } data; ++}; ++ ++struct spi_mem { ++ struct spi_device *spi; ++ void *drvpriv; ++ const char *name; ++}; ++ ++struct trace_event_raw_spi_controller { ++ struct trace_entry ent; ++ int bus_num; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_spi_message { ++ struct trace_entry ent; ++ int bus_num; ++ int chip_select; ++ struct spi_message *msg; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_spi_message_done { ++ struct trace_entry ent; ++ int bus_num; ++ int chip_select; ++ struct spi_message *msg; ++ unsigned int frame; ++ unsigned int actual; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_spi_transfer { ++ struct trace_entry ent; ++ int bus_num; ++ int chip_select; ++ struct spi_transfer *xfer; ++ int len; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_spi_controller {}; ++ ++struct trace_event_data_offsets_spi_message {}; ++ ++struct trace_event_data_offsets_spi_message_done {}; ++ ++struct trace_event_data_offsets_spi_transfer {}; ++ ++struct boardinfo { ++ struct list_head list; ++ struct spi_board_info board_info; ++}; ++ ++enum dw_ssi_type { ++ SSI_MOTO_SPI = 0, ++ SSI_TI_SSP = 1, ++ SSI_NS_MICROWIRE = 2, ++}; ++ ++struct dw_spi; ++ ++struct dw_spi_dma_ops { ++ int (*dma_init)(struct dw_spi *); ++ void (*dma_exit)(struct dw_spi *); ++ int (*dma_setup)(struct dw_spi *, struct spi_transfer *); ++ bool (*can_dma)(struct spi_controller *, struct spi_device *, struct spi_transfer *); ++ int (*dma_transfer)(struct dw_spi *, struct spi_transfer *); ++ void (*dma_stop)(struct dw_spi *); ++}; ++ ++struct dw_spi { ++ struct spi_controller *master; ++ enum dw_ssi_type type; ++ void *regs; ++ long unsigned int paddr; ++ int irq; ++ u32 fifo_len; ++ u32 max_freq; ++ u32 reg_io_width; ++ u16 bus_num; ++ u16 num_cs; ++ void (*set_cs)(struct spi_device *, bool); ++ size_t len; ++ void *tx; ++ void *tx_end; ++ spinlock_t buf_lock; ++ void *rx; ++ void *rx_end; ++ int dma_mapped; ++ u8 n_bytes; ++ u32 dma_width; ++ irqreturn_t (*transfer_handler)(struct dw_spi *); ++ u32 current_freq; ++ int dma_inited; ++ struct dma_chan *txchan; ++ struct dma_chan *rxchan; ++ long unsigned int dma_chan_busy; ++ dma_addr_t dma_addr; ++ const struct dw_spi_dma_ops *dma_ops; ++ void *dma_tx; ++ void *dma_rx; ++ void *priv; ++ struct dentry *debugfs; ++}; ++ ++struct dw_spi_chip { ++ u8 poll_mode; ++ u8 type; ++ void (*cs_control)(u32); ++}; ++ ++struct chip_data { ++ u8 tmode; ++ u8 type; ++ u8 poll_mode; ++ u16 clk_div; ++ u32 speed_hz; ++ void (*cs_control)(u32); ++}; ++ ++enum ssp_loopback { ++ LOOPBACK_DISABLED = 0, ++ LOOPBACK_ENABLED = 1, ++}; ++ ++enum ssp_interface { ++ SSP_INTERFACE_MOTOROLA_SPI = 0, ++ SSP_INTERFACE_TI_SYNC_SERIAL = 1, ++ SSP_INTERFACE_NATIONAL_MICROWIRE = 2, ++ SSP_INTERFACE_UNIDIRECTIONAL = 3, ++}; ++ ++enum ssp_hierarchy { ++ SSP_MASTER = 0, ++ SSP_SLAVE = 1, ++}; ++ ++struct ssp_clock_params { ++ u8 cpsdvsr; ++ u8 scr; ++}; ++ ++enum ssp_rx_endian { ++ SSP_RX_MSB = 0, ++ SSP_RX_LSB = 1, ++}; ++ ++enum ssp_tx_endian { ++ SSP_TX_MSB = 0, ++ SSP_TX_LSB = 1, ++}; ++ ++enum ssp_data_size { ++ SSP_DATA_BITS_4 = 3, ++ SSP_DATA_BITS_5 = 4, ++ SSP_DATA_BITS_6 = 5, ++ SSP_DATA_BITS_7 = 6, ++ SSP_DATA_BITS_8 = 7, ++ SSP_DATA_BITS_9 = 8, ++ SSP_DATA_BITS_10 = 9, ++ SSP_DATA_BITS_11 = 10, ++ SSP_DATA_BITS_12 = 11, ++ SSP_DATA_BITS_13 = 12, ++ SSP_DATA_BITS_14 = 13, ++ SSP_DATA_BITS_15 = 14, ++ SSP_DATA_BITS_16 = 15, ++ SSP_DATA_BITS_17 = 16, ++ SSP_DATA_BITS_18 = 17, ++ SSP_DATA_BITS_19 = 18, ++ SSP_DATA_BITS_20 = 19, ++ SSP_DATA_BITS_21 = 20, ++ SSP_DATA_BITS_22 = 21, ++ SSP_DATA_BITS_23 = 22, ++ SSP_DATA_BITS_24 = 23, ++ SSP_DATA_BITS_25 = 24, ++ SSP_DATA_BITS_26 = 25, ++ SSP_DATA_BITS_27 = 26, ++ SSP_DATA_BITS_28 = 27, ++ SSP_DATA_BITS_29 = 28, ++ SSP_DATA_BITS_30 = 29, ++ SSP_DATA_BITS_31 = 30, ++ SSP_DATA_BITS_32 = 31, ++}; ++ ++enum ssp_mode { ++ INTERRUPT_TRANSFER = 0, ++ POLLING_TRANSFER = 1, ++ DMA_TRANSFER = 2, ++}; ++ ++enum ssp_rx_level_trig { ++ SSP_RX_1_OR_MORE_ELEM = 0, ++ SSP_RX_4_OR_MORE_ELEM = 1, ++ SSP_RX_8_OR_MORE_ELEM = 2, ++ SSP_RX_16_OR_MORE_ELEM = 3, ++ SSP_RX_32_OR_MORE_ELEM = 4, ++}; ++ ++enum ssp_tx_level_trig { ++ SSP_TX_1_OR_MORE_EMPTY_LOC = 0, ++ SSP_TX_4_OR_MORE_EMPTY_LOC = 1, ++ SSP_TX_8_OR_MORE_EMPTY_LOC = 2, ++ SSP_TX_16_OR_MORE_EMPTY_LOC = 3, ++ SSP_TX_32_OR_MORE_EMPTY_LOC = 4, ++}; ++ ++enum ssp_spi_clk_phase { ++ SSP_CLK_FIRST_EDGE = 0, ++ SSP_CLK_SECOND_EDGE = 1, ++}; ++ ++enum ssp_spi_clk_pol { ++ SSP_CLK_POL_IDLE_LOW = 0, ++ SSP_CLK_POL_IDLE_HIGH = 1, ++}; ++ ++enum ssp_microwire_ctrl_len { ++ SSP_BITS_4 = 3, ++ SSP_BITS_5 = 4, ++ SSP_BITS_6 = 5, ++ SSP_BITS_7 = 6, ++ SSP_BITS_8 = 7, ++ SSP_BITS_9 = 8, ++ SSP_BITS_10 = 9, ++ SSP_BITS_11 = 10, ++ SSP_BITS_12 = 11, ++ SSP_BITS_13 = 12, ++ SSP_BITS_14 = 13, ++ SSP_BITS_15 = 14, ++ SSP_BITS_16 = 15, ++ SSP_BITS_17 = 16, ++ SSP_BITS_18 = 17, ++ SSP_BITS_19 = 18, ++ SSP_BITS_20 = 19, ++ SSP_BITS_21 = 20, ++ SSP_BITS_22 = 21, ++ SSP_BITS_23 = 22, ++ SSP_BITS_24 = 23, ++ SSP_BITS_25 = 24, ++ SSP_BITS_26 = 25, ++ SSP_BITS_27 = 26, ++ SSP_BITS_28 = 27, ++ SSP_BITS_29 = 28, ++ SSP_BITS_30 = 29, ++ SSP_BITS_31 = 30, ++ SSP_BITS_32 = 31, ++}; ++ ++enum ssp_microwire_wait_state { ++ SSP_MWIRE_WAIT_ZERO = 0, ++ SSP_MWIRE_WAIT_ONE = 1, ++}; ++ ++enum ssp_duplex { ++ SSP_MICROWIRE_CHANNEL_FULL_DUPLEX = 0, ++ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX = 1, ++}; ++ ++enum ssp_clkdelay { ++ SSP_FEEDBACK_CLK_DELAY_NONE = 0, ++ SSP_FEEDBACK_CLK_DELAY_1T = 1, ++ SSP_FEEDBACK_CLK_DELAY_2T = 2, ++ SSP_FEEDBACK_CLK_DELAY_3T = 3, ++ SSP_FEEDBACK_CLK_DELAY_4T = 4, ++ SSP_FEEDBACK_CLK_DELAY_5T = 5, ++ SSP_FEEDBACK_CLK_DELAY_6T = 6, ++ SSP_FEEDBACK_CLK_DELAY_7T = 7, ++}; ++ ++enum ssp_chip_select { ++ SSP_CHIP_SELECT = 0, ++ SSP_CHIP_DESELECT = 1, ++}; ++ ++struct pl022_ssp_controller { ++ u16 bus_id; ++ u8 num_chipselect; ++ u8 enable_dma: 1; ++ bool (*dma_filter)(struct dma_chan *, void *); ++ void *dma_rx_param; ++ void *dma_tx_param; ++ int autosuspend_delay; ++ bool rt; ++ int *chipselects; ++}; ++ ++struct pl022_config_chip { ++ enum ssp_interface iface; ++ enum ssp_hierarchy hierarchy; ++ bool slave_tx_disable; ++ struct ssp_clock_params clk_freq; ++ enum ssp_mode com_mode; ++ enum ssp_rx_level_trig rx_lev_trig; ++ enum ssp_tx_level_trig tx_lev_trig; ++ enum ssp_microwire_ctrl_len ctrl_len; ++ enum ssp_microwire_wait_state wait_state; ++ enum ssp_duplex duplex; ++ enum ssp_clkdelay clkdelay; ++ void (*cs_control)(u32); ++}; ++ ++enum ssp_reading { ++ READING_NULL = 0, ++ READING_U8 = 1, ++ READING_U16 = 2, ++ READING_U32 = 3, ++}; ++ ++enum ssp_writing { ++ WRITING_NULL = 0, ++ WRITING_U8 = 1, ++ WRITING_U16 = 2, ++ WRITING_U32 = 3, ++}; ++ ++struct vendor_data___2 { ++ int fifodepth; ++ int max_bpw; ++ bool unidir; ++ bool extended_cr; ++ bool pl023; ++ bool loopback; ++ bool internal_cs_ctrl; ++}; ++ ++struct chip_data___2; ++ ++struct pl022 { ++ struct amba_device *adev; ++ struct vendor_data___2 *vendor; ++ resource_size_t phybase; ++ void *virtbase; ++ struct clk *clk; ++ struct spi_controller *master; ++ struct pl022_ssp_controller *master_info; ++ struct tasklet_struct pump_transfers; ++ struct spi_message *cur_msg; ++ struct spi_transfer *cur_transfer; ++ struct chip_data___2 *cur_chip; ++ bool next_msg_cs_active; ++ void *tx; ++ void *tx_end; ++ void *rx; ++ void *rx_end; ++ enum ssp_reading read; ++ enum ssp_writing write; ++ u32 exp_fifo_level; ++ enum ssp_rx_level_trig rx_lev_trig; ++ enum ssp_tx_level_trig tx_lev_trig; ++ struct dma_chan *dma_rx_channel; ++ struct dma_chan *dma_tx_channel; ++ struct sg_table sgt_rx; ++ struct sg_table sgt_tx; ++ char *dummypage; ++ bool dma_running; ++ int cur_cs; ++ int *chipselects; ++}; ++ ++struct chip_data___2 { ++ u32 cr0; ++ u16 cr1; ++ u16 dmacr; ++ u16 cpsr; ++ u8 n_bytes; ++ bool enable_dma; ++ enum ssp_reading read; ++ enum ssp_writing write; ++ void (*cs_control)(u32); ++ int xfer_type; ++}; ++ ++struct spi_qup { ++ void *base; ++ struct device *dev; ++ struct clk *cclk; ++ struct clk *iclk; ++ int irq; ++ spinlock_t lock; ++ int in_fifo_sz; ++ int out_fifo_sz; ++ int in_blk_sz; ++ int out_blk_sz; ++ struct spi_transfer *xfer; ++ struct completion done; ++ int error; ++ int w_size; ++ int n_words; ++ int tx_bytes; ++ int rx_bytes; ++ const u8 *tx_buf; ++ u8 *rx_buf; ++ int qup_v1; ++ int mode; ++ struct dma_slave_config rx_conf; ++ struct dma_slave_config tx_conf; ++}; ++ ++struct devprobe2 { ++ struct net_device * (*probe)(int); ++ int status; ++}; ++ ++enum { ++ SKBTX_HW_TSTAMP = 1, ++ SKBTX_SW_TSTAMP = 2, ++ SKBTX_IN_PROGRESS = 4, ++ SKBTX_DEV_ZEROCOPY = 8, ++ SKBTX_WIFI_STATUS = 16, ++ SKBTX_SHARED_FRAG = 32, ++ SKBTX_SCHED_TSTAMP = 64, ++}; ++ ++enum netdev_priv_flags { ++ IFF_802_1Q_VLAN = 1, ++ IFF_EBRIDGE = 2, ++ IFF_BONDING = 4, ++ IFF_ISATAP = 8, ++ IFF_WAN_HDLC = 16, ++ IFF_XMIT_DST_RELEASE = 32, ++ IFF_DONT_BRIDGE = 64, ++ IFF_DISABLE_NETPOLL = 128, ++ IFF_MACVLAN_PORT = 256, ++ IFF_BRIDGE_PORT = 512, ++ IFF_OVS_DATAPATH = 1024, ++ IFF_TX_SKB_SHARING = 2048, ++ IFF_UNICAST_FLT = 4096, ++ IFF_TEAM_PORT = 8192, ++ IFF_SUPP_NOFCS = 16384, ++ IFF_LIVE_ADDR_CHANGE = 32768, ++ IFF_MACVLAN = 65536, ++ IFF_XMIT_DST_RELEASE_PERM = 131072, ++ IFF_L3MDEV_MASTER = 262144, ++ IFF_NO_QUEUE = 524288, ++ IFF_OPENVSWITCH = 1048576, ++ IFF_L3MDEV_SLAVE = 2097152, ++ IFF_TEAM = 4194304, ++ IFF_RXFH_CONFIGURED = 8388608, ++ IFF_PHONY_HEADROOM = 16777216, ++ IFF_MACSEC = 33554432, ++ IFF_NO_RX_HANDLER = 67108864, ++ IFF_FAILOVER = 134217728, ++ IFF_FAILOVER_SLAVE = 268435456, ++ IFF_L3MDEV_RX_HANDLER = 536870912, ++ IFF_LIVE_RENAME_OK = 1073741824, ++}; ++ ++struct pcpu_lstats { ++ u64 packets; ++ u64 bytes; ++ struct u64_stats_sync syncp; ++}; ++ ++enum { ++ SOF_TIMESTAMPING_TX_HARDWARE = 1, ++ SOF_TIMESTAMPING_TX_SOFTWARE = 2, ++ SOF_TIMESTAMPING_RX_HARDWARE = 4, ++ SOF_TIMESTAMPING_RX_SOFTWARE = 8, ++ SOF_TIMESTAMPING_SOFTWARE = 16, ++ SOF_TIMESTAMPING_SYS_HARDWARE = 32, ++ SOF_TIMESTAMPING_RAW_HARDWARE = 64, ++ SOF_TIMESTAMPING_OPT_ID = 128, ++ SOF_TIMESTAMPING_TX_SCHED = 256, ++ SOF_TIMESTAMPING_TX_ACK = 512, ++ SOF_TIMESTAMPING_OPT_CMSG = 1024, ++ SOF_TIMESTAMPING_OPT_TSONLY = 2048, ++ SOF_TIMESTAMPING_OPT_STATS = 4096, ++ SOF_TIMESTAMPING_OPT_PKTINFO = 8192, ++ SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, ++ SOF_TIMESTAMPING_LAST = 16384, ++ SOF_TIMESTAMPING_MASK = 32767, ++}; ++ ++enum ethtool_link_mode_bit_indices { ++ ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, ++ ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, ++ ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, ++ ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, ++ ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, ++ ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, ++ ETHTOOL_LINK_MODE_Autoneg_BIT = 6, ++ ETHTOOL_LINK_MODE_TP_BIT = 7, ++ ETHTOOL_LINK_MODE_AUI_BIT = 8, ++ ETHTOOL_LINK_MODE_MII_BIT = 9, ++ ETHTOOL_LINK_MODE_FIBRE_BIT = 10, ++ ETHTOOL_LINK_MODE_BNC_BIT = 11, ++ ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, ++ ETHTOOL_LINK_MODE_Pause_BIT = 13, ++ ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, ++ ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, ++ ETHTOOL_LINK_MODE_Backplane_BIT = 16, ++ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, ++ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, ++ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, ++ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, ++ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, ++ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, ++ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, ++ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, ++ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, ++ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, ++ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, ++ ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, ++ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, ++ ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, ++ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, ++ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, ++ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, ++ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, ++ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, ++ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, ++ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, ++ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, ++ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, ++ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, ++ ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, ++ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, ++ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, ++ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, ++ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, ++ ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, ++ ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, ++ ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, ++ ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, ++ ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, ++ ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, ++ __ETHTOOL_LINK_MODE_LAST = 51, ++}; ++ ++struct mdio_board_info { ++ const char *bus_id; ++ char modalias[32]; ++ int mdio_addr; ++ const void *platform_data; ++}; ++ ++struct mdio_board_entry { ++ struct list_head list; ++ struct mdio_board_info board_info; ++}; ++ ++struct mii_ioctl_data { ++ __u16 phy_id; ++ __u16 reg_num; ++ __u16 val_in; ++ __u16 val_out; ++}; ++ ++struct phy_setting { ++ u32 speed; ++ u8 duplex; ++ u8 bit; ++}; ++ ++struct phy_fixup { ++ struct list_head list; ++ char bus_id[64]; ++ u32 phy_uid; ++ u32 phy_uid_mask; ++ int (*run)(struct phy_device *); ++}; ++ ++struct trace_event_raw_mdio_access { ++ struct trace_entry ent; ++ char busid[61]; ++ char read; ++ u8 addr; ++ u16 val; ++ unsigned int regnum; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mdio_access {}; ++ ++struct mdio_driver { ++ struct mdio_driver_common mdiodrv; ++ int (*probe)(struct mdio_device *); ++ void (*remove)(struct mdio_device *); ++}; ++ ++struct fixed_phy_status { ++ int link; ++ int speed; ++ int duplex; ++ int pause; ++ int asym_pause; ++}; ++ ++struct swmii_regs { ++ u16 bmcr; ++ u16 bmsr; ++ u16 lpa; ++ u16 lpagb; ++}; ++ ++enum { ++ SWMII_SPEED_10 = 0, ++ SWMII_SPEED_100 = 1, ++ SWMII_SPEED_1000 = 2, ++ SWMII_DUPLEX_HALF = 0, ++ SWMII_DUPLEX_FULL = 1, ++}; ++ ++enum xgene_enet_cmd { ++ XGENE_ENET_WR_CMD = 2147483648, ++ XGENE_ENET_RD_CMD = 1073741824, ++}; ++ ++enum { ++ MIIM_CMD_IDLE = 0, ++ MIIM_CMD_LEGACY_WRITE = 1, ++ MIIM_CMD_LEGACY_READ = 2, ++}; ++ ++enum xgene_mdio_id { ++ XGENE_MDIO_RGMII = 1, ++ XGENE_MDIO_XFI = 2, ++}; ++ ++struct xgene_mdio_pdata { ++ struct clk *clk; ++ struct device *dev; ++ void *mac_csr_addr; ++ void *diag_csr_addr; ++ void *mdio_csr_addr; ++ struct mii_bus *mdio_bus; ++ int mdio_id; ++ spinlock_t mac_lock; ++}; ++ ++struct fixed_mdio_bus { ++ struct mii_bus *mii_bus; ++ struct list_head phys; ++}; ++ ++struct fixed_phy { ++ int addr; ++ struct phy_device *phydev; ++ seqcount_t seqcount; ++ struct fixed_phy_status status; ++ int (*link_update)(struct net_device *, struct fixed_phy_status *); ++ struct list_head node; ++ int link_gpio; ++}; ++ ++enum xgene_enet_rm { ++ RM0 = 0, ++ RM1 = 1, ++ RM3 = 3, ++}; ++ ++struct xgene_enet_raw_desc { ++ __le64 m0; ++ __le64 m1; ++ __le64 m2; ++ __le64 m3; ++}; ++ ++struct xgene_enet_raw_desc16 { ++ __le64 m0; ++ __le64 m1; ++}; ++ ++enum xgene_enet_ring_cfgsize { ++ RING_CFGSIZE_512B = 0, ++ RING_CFGSIZE_2KB = 1, ++ RING_CFGSIZE_16KB = 2, ++ RING_CFGSIZE_64KB = 3, ++ RING_CFGSIZE_512KB = 4, ++ RING_CFGSIZE_INVALID = 5, ++}; ++ ++enum xgene_enet_ring_type { ++ RING_DISABLED = 0, ++ RING_REGULAR = 1, ++ RING_BUFPOOL = 2, ++}; ++ ++enum xgene_ring_owner { ++ RING_OWNER_ETH0 = 0, ++ RING_OWNER_ETH1 = 1, ++ RING_OWNER_CPU = 15, ++ RING_OWNER_INVALID = 16, ++}; ++ ++enum xgene_enet_ring_bufnum { ++ RING_BUFNUM_REGULAR = 0, ++ RING_BUFNUM_BUFPOOL = 32, ++ RING_BUFNUM_INVALID = 33, ++}; ++ ++enum xgene_enet_err_code { ++ HBF_READ_DATA = 3, ++ HBF_LL_READ = 4, ++ BAD_WORK_MSG = 6, ++ BUFPOOL_TIMEOUT = 15, ++ INGRESS_CRC = 16, ++ INGRESS_CHECKSUM = 17, ++ INGRESS_TRUNC_FRAME = 18, ++ INGRESS_PKT_LEN = 19, ++ INGRESS_PKT_UNDER = 20, ++ INGRESS_FIFO_OVERRUN = 21, ++ INGRESS_CHECKSUM_COMPUTE = 26, ++ ERR_CODE_INVALID = 27, ++}; ++ ++struct xgene_enet_pdata; ++ ++struct xgene_mac_ops { ++ void (*init)(struct xgene_enet_pdata *); ++ void (*reset)(struct xgene_enet_pdata *); ++ void (*tx_enable)(struct xgene_enet_pdata *); ++ void (*rx_enable)(struct xgene_enet_pdata *); ++ void (*tx_disable)(struct xgene_enet_pdata *); ++ void (*rx_disable)(struct xgene_enet_pdata *); ++ void (*get_drop_cnt)(struct xgene_enet_pdata *, u32 *, u32 *); ++ void (*set_speed)(struct xgene_enet_pdata *); ++ void (*set_mac_addr)(struct xgene_enet_pdata *); ++ void (*set_framesize)(struct xgene_enet_pdata *, int); ++ void (*set_mss)(struct xgene_enet_pdata *, u16, u8); ++ void (*link_state)(struct work_struct *); ++ void (*enable_tx_pause)(struct xgene_enet_pdata *, bool); ++ void (*flowctl_rx)(struct xgene_enet_pdata *, bool); ++ void (*flowctl_tx)(struct xgene_enet_pdata *, bool); ++}; ++ ++struct xgene_enet_desc_ring; ++ ++struct xgene_port_ops { ++ int (*reset)(struct xgene_enet_pdata *); ++ void (*clear)(struct xgene_enet_pdata *, struct xgene_enet_desc_ring *); ++ void (*cle_bypass)(struct xgene_enet_pdata *, u32, u16, u16); ++ void (*shutdown)(struct xgene_enet_pdata *); ++}; ++ ++struct xgene_ring_ops { ++ u8 num_ring_config; ++ u8 num_ring_id_shift; ++ struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *); ++ void (*clear)(struct xgene_enet_desc_ring *); ++ void (*wr_cmd)(struct xgene_enet_desc_ring *, int); ++ u32 (*len)(struct xgene_enet_desc_ring *); ++ void (*coalesce)(struct xgene_enet_desc_ring *); ++}; ++ ++enum xgene_cle_parser { ++ PARSER0 = 0, ++ PARSER1 = 1, ++ PARSER2 = 2, ++ PARSER_ALL = 3, ++}; ++ ++struct xgene_cle_ptree_key { ++ u8 priority; ++ u16 result_pointer; ++}; ++ ++struct xgene_cle_ptree_kn { ++ u8 node_type; ++ u8 num_keys; ++ struct xgene_cle_ptree_key key[32]; ++}; ++ ++struct xgene_cle_dbptr { ++ u8 split_boundary; ++ u8 mirror_nxtfpsel; ++ u8 mirror_fpsel; ++ u16 mirror_dstqid; ++ u8 drop; ++ u8 mirror; ++ u8 hdr_data_split; ++ u64 hopinfomsbs; ++ u8 DR; ++ u8 HR; ++ u64 hopinfomlsbs; ++ u16 h0enq_num; ++ u8 h0fpsel; ++ u8 nxtfpsel; ++ u8 fpsel; ++ u16 dstqid; ++ u8 cle_priority; ++ u8 cle_flowgroup; ++ u8 cle_perflow; ++ u8 cle_insert_timestamp; ++ u8 stash; ++ u8 in; ++ u8 perprioen; ++ u8 perflowgroupen; ++ u8 perflowen; ++ u8 selhash; ++ u8 selhdrext; ++ u8 mirror_nxtfpsel_msb; ++ u8 mirror_fpsel_msb; ++ u8 hfpsel_msb; ++ u8 nxtfpsel_msb; ++ u8 fpsel_msb; ++}; ++ ++struct xgene_cle_ptree { ++ struct xgene_cle_ptree_kn *kn; ++ struct xgene_cle_dbptr *dbptr; ++ u32 num_kn; ++ u32 num_dbptr; ++ u32 start_node; ++ u32 start_pkt; ++ u32 start_dbptr; ++}; ++ ++struct xgene_enet_cle { ++ void *base; ++ struct xgene_cle_ptree ptree; ++ enum xgene_cle_parser active_parser; ++ u32 parsers; ++ u32 max_nodes; ++ u32 max_dbptrs; ++ u32 jump_bytes; ++}; ++ ++struct xgene_cle_ops { ++ int (*cle_init)(struct xgene_enet_pdata *); ++}; ++ ++enum xgene_enet_id { ++ XGENE_ENET1 = 1, ++ XGENE_ENET2 = 2, ++}; ++ ++struct xgene_enet_desc_ring { ++ struct net_device *ndev; ++ u16 id; ++ u16 num; ++ u16 head; ++ u16 tail; ++ u16 exp_buf_tail; ++ u16 slots; ++ u16 irq; ++ char irq_name[16]; ++ u32 size; ++ u32 state[6]; ++ void *cmd_base; ++ void *cmd; ++ dma_addr_t dma; ++ dma_addr_t irq_mbox_dma; ++ void *irq_mbox_addr; ++ u16 dst_ring_num; ++ u16 nbufpool; ++ int npagepool; ++ u8 index; ++ u32 flags; ++ struct sk_buff **rx_skb; ++ struct sk_buff **cp_skb; ++ dma_addr_t *frag_dma_addr; ++ struct page **frag_page; ++ enum xgene_enet_ring_cfgsize cfgsize; ++ struct xgene_enet_desc_ring *cp_ring; ++ struct xgene_enet_desc_ring *buf_pool; ++ struct xgene_enet_desc_ring *page_pool; ++ struct napi_struct napi; ++ union { ++ void *desc_addr; ++ struct xgene_enet_raw_desc *raw_desc; ++ struct xgene_enet_raw_desc16 *raw_desc16; ++ }; ++ __le64 *exp_bufs; ++ u64 tx_packets; ++ u64 tx_bytes; ++ u64 tx_dropped; ++ u64 tx_errors; ++ u64 rx_packets; ++ u64 rx_bytes; ++ u64 rx_dropped; ++ u64 rx_errors; ++ u64 rx_length_errors; ++ u64 rx_crc_errors; ++ u64 rx_frame_errors; ++ u64 rx_fifo_errors; ++}; ++ ++struct xgene_enet_pdata { ++ struct net_device *ndev; ++ struct mii_bus *mdio_bus; ++ int phy_speed; ++ struct clk *clk; ++ struct platform_device *pdev; ++ enum xgene_enet_id enet_id; ++ struct xgene_enet_desc_ring *tx_ring[8]; ++ struct xgene_enet_desc_ring *rx_ring[8]; ++ u16 tx_level[8]; ++ u16 txc_level[8]; ++ char *dev_name; ++ u32 rx_buff_cnt; ++ u32 tx_qcnt_hi; ++ u32 irqs[16]; ++ u8 rxq_cnt; ++ u8 txq_cnt; ++ u8 cq_cnt; ++ void *eth_csr_addr; ++ void *eth_ring_if_addr; ++ void *eth_diag_csr_addr; ++ void *mcx_mac_addr; ++ void *mcx_mac_csr_addr; ++ void *mcx_stats_addr; ++ void *base_addr; ++ void *pcs_addr; ++ void *ring_csr_addr; ++ void *ring_cmd_addr; ++ int phy_mode; ++ enum xgene_enet_rm rm; ++ struct xgene_enet_cle cle; ++ u64 *extd_stats; ++ u64 false_rflr; ++ u64 vlan_rjbr; ++ spinlock_t stats_lock; ++ const struct xgene_mac_ops *mac_ops; ++ spinlock_t mac_lock; ++ const struct xgene_port_ops *port_ops; ++ struct xgene_ring_ops *ring_ops; ++ const struct xgene_cle_ops *cle_ops; ++ struct delayed_work link_work; ++ u32 port_id; ++ u8 cpu_bufnum; ++ u8 eth_bufnum; ++ u8 bp_bufnum; ++ u16 ring_num; ++ u32 mss[4]; ++ u32 mss_refcnt[4]; ++ spinlock_t mss_lock; ++ u8 tx_delay; ++ u8 rx_delay; ++ bool mdio_driver; ++ struct gpio_desc___2 *sfp_rdy; ++ bool sfp_gpio_en; ++ u32 pause_autoneg; ++ bool tx_pause; ++ bool rx_pause; ++}; ++ ++enum netdev_state_t { ++ __LINK_STATE_START = 0, ++ __LINK_STATE_PRESENT = 1, ++ __LINK_STATE_NOCARRIER = 2, ++ __LINK_STATE_LINKWATCH_PENDING = 3, ++ __LINK_STATE_DORMANT = 4, ++}; ++ ++enum xgene_phy_speed { ++ PHY_SPEED_10 = 0, ++ PHY_SPEED_100 = 1, ++ PHY_SPEED_1000 = 2, ++}; ++ ++enum netdev_queue_state_t { ++ __QUEUE_STATE_DRV_XOFF = 0, ++ __QUEUE_STATE_STACK_XOFF = 1, ++ __QUEUE_STATE_FROZEN = 2, ++}; ++ ++enum skb_free_reason { ++ SKB_REASON_CONSUMED = 0, ++ SKB_REASON_DROPPED = 1, ++}; ++ ++enum xgene_enet_buf_len { ++ SIZE_2K = 2048, ++ SIZE_4K = 4096, ++ SIZE_16K = 16384, ++}; ++ ++enum ethtool_stringset { ++ ETH_SS_TEST = 0, ++ ETH_SS_STATS = 1, ++ ETH_SS_PRIV_FLAGS = 2, ++ ETH_SS_NTUPLE_FILTERS = 3, ++ ETH_SS_FEATURES = 4, ++ ETH_SS_RSS_HASH_FUNCS = 5, ++ ETH_SS_TUNABLES = 6, ++ ETH_SS_PHY_STATS = 7, ++ ETH_SS_PHY_TUNABLES = 8, ++}; ++ ++struct xgene_gstrings_stats { ++ char name[32]; ++ int offset; ++ u32 addr; ++ u32 mask; ++}; ++ ++enum xgene_cle_ptree_nodes { ++ PKT_TYPE_NODE = 0, ++ PKT_PROT_NODE = 1, ++ RSS_IPV4_TCP_NODE = 2, ++ RSS_IPV4_UDP_NODE = 3, ++ RSS_IPV4_OTHERS_NODE = 4, ++ LAST_NODE = 5, ++ MAX_NODES = 6, ++}; ++ ++enum xgene_cle_byte_store { ++ NO_BYTE = 0, ++ FIRST_BYTE = 1, ++ SECOND_BYTE = 2, ++ BOTH_BYTES = 3, ++}; ++ ++enum xgene_cle_node_type { ++ INV = 0, ++ KN = 1, ++ EWDN = 2, ++ RES_NODE = 3, ++}; ++ ++enum xgene_cle_op_type { ++ EQT = 0, ++ NEQT = 1, ++ LTEQT = 2, ++ GTEQT = 3, ++ AND = 4, ++ NAND = 5, ++}; ++ ++enum xgene_cle_dram_type { ++ PKT_RAM = 0, ++ RSS_IDT = 1, ++ RSS_IPV4_HASH_SKEY = 2, ++ PTREE_RAM = 12, ++ AVL_RAM = 13, ++ DB_RAM = 14, ++}; ++ ++enum xgene_cle_cmd_type { ++ CLE_CMD_WR = 1, ++ CLE_CMD_RD = 2, ++ CLE_CMD_AVL_ADD = 8, ++ CLE_CMD_AVL_DEL = 16, ++ CLE_CMD_AVL_SRCH = 32, ++}; ++ ++enum xgene_cle_ipv4_rss_hashtype { ++ RSS_IPV4_8B = 0, ++ RSS_IPV4_12B = 1, ++}; ++ ++enum xgene_cle_prot_type { ++ XGENE_CLE_TCP = 0, ++ XGENE_CLE_UDP = 1, ++ XGENE_CLE_ESP = 2, ++ XGENE_CLE_OTHER = 3, ++}; ++ ++enum xgene_cle_prot_version { ++ XGENE_CLE_IPV4 = 0, ++}; ++ ++enum xgene_cle_ptree_dbptrs { ++ DB_RES_DROP = 0, ++ DB_RES_DEF = 1, ++ DB_RES_ACCEPT = 2, ++ DB_MAX_PTRS = 3, ++}; ++ ++struct xgene_cle_ptree_branch { ++ bool valid; ++ u16 next_packet_pointer; ++ bool jump_bw; ++ bool jump_rel; ++ u8 operation; ++ u16 next_node; ++ u8 next_branch; ++ u16 data; ++ u16 mask; ++}; ++ ++struct xgene_cle_ptree_ewdn { ++ u8 node_type; ++ bool last_node; ++ bool hdr_len_store; ++ u8 hdr_extn; ++ u8 byte_store; ++ u8 search_byte_store; ++ u16 result_pointer; ++ u8 num_branches; ++ struct xgene_cle_ptree_branch branch[6]; ++}; ++ ++struct ptp_clock_time { ++ __s64 sec; ++ __u32 nsec; ++ __u32 reserved; ++}; ++ ++struct ptp_extts_request { ++ unsigned int index; ++ unsigned int flags; ++ unsigned int rsv[2]; ++}; ++ ++struct ptp_perout_request { ++ struct ptp_clock_time start; ++ struct ptp_clock_time period; ++ unsigned int index; ++ unsigned int flags; ++ unsigned int rsv[4]; ++}; ++ ++enum ptp_pin_function { ++ PTP_PF_NONE = 0, ++ PTP_PF_EXTTS = 1, ++ PTP_PF_PEROUT = 2, ++ PTP_PF_PHYSYNC = 3, ++}; ++ ++struct ptp_pin_desc { ++ char name[64]; ++ unsigned int index; ++ unsigned int func; ++ unsigned int chan; ++ unsigned int rsv[5]; ++}; ++ ++struct ptp_clock_request { ++ enum { ++ PTP_CLK_REQ_EXTTS = 0, ++ PTP_CLK_REQ_PEROUT = 1, ++ PTP_CLK_REQ_PPS = 2, ++ } type; ++ union { ++ struct ptp_extts_request extts; ++ struct ptp_perout_request perout; ++ }; ++}; ++ ++struct ptp_clock_info { ++ struct module *owner; ++ char name[16]; ++ s32 max_adj; ++ int n_alarm; ++ int n_ext_ts; ++ int n_per_out; ++ int n_pins; ++ int pps; ++ struct ptp_pin_desc *pin_config; ++ int (*adjfine)(struct ptp_clock_info *, long int); ++ int (*adjfreq)(struct ptp_clock_info *, s32); ++ int (*adjtime)(struct ptp_clock_info *, s64); ++ int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); ++ int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); ++ int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); ++ int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); ++ int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); ++ long int (*do_aux_work)(struct ptp_clock_info *); ++}; ++ ++struct ptp_clock; ++ ++struct cavium_ptp { ++ struct pci_dev *pdev; ++ spinlock_t spin_lock; ++ struct cyclecounter cycle_counter; ++ struct timecounter time_counter; ++ void *reg_base; ++ u32 clock_rate; ++ struct ptp_clock_info ptp_info; ++ struct ptp_clock *ptp_clock; ++}; ++ ++struct usb_device_id { ++ __u16 match_flags; ++ __u16 idVendor; ++ __u16 idProduct; ++ __u16 bcdDevice_lo; ++ __u16 bcdDevice_hi; ++ __u8 bDeviceClass; ++ __u8 bDeviceSubClass; ++ __u8 bDeviceProtocol; ++ __u8 bInterfaceClass; ++ __u8 bInterfaceSubClass; ++ __u8 bInterfaceProtocol; ++ __u8 bInterfaceNumber; ++ kernel_ulong_t driver_info; ++}; ++ ++struct usb_descriptor_header { ++ __u8 bLength; ++ __u8 bDescriptorType; ++}; ++ ++struct usb_device_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __le16 bcdUSB; ++ __u8 bDeviceClass; ++ __u8 bDeviceSubClass; ++ __u8 bDeviceProtocol; ++ __u8 bMaxPacketSize0; ++ __le16 idVendor; ++ __le16 idProduct; ++ __le16 bcdDevice; ++ __u8 iManufacturer; ++ __u8 iProduct; ++ __u8 iSerialNumber; ++ __u8 bNumConfigurations; ++}; ++ ++struct usb_config_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __le16 wTotalLength; ++ __u8 bNumInterfaces; ++ __u8 bConfigurationValue; ++ __u8 iConfiguration; ++ __u8 bmAttributes; ++ __u8 bMaxPower; ++} __attribute__((packed)); ++ ++struct usb_interface_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bInterfaceNumber; ++ __u8 bAlternateSetting; ++ __u8 bNumEndpoints; ++ __u8 bInterfaceClass; ++ __u8 bInterfaceSubClass; ++ __u8 bInterfaceProtocol; ++ __u8 iInterface; ++}; ++ ++struct usb_endpoint_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bEndpointAddress; ++ __u8 bmAttributes; ++ __le16 wMaxPacketSize; ++ __u8 bInterval; ++ __u8 bRefresh; ++ __u8 bSynchAddress; ++} __attribute__((packed)); ++ ++struct usb_ssp_isoc_ep_comp_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __le16 wReseved; ++ __le32 dwBytesPerInterval; ++}; ++ ++struct usb_ss_ep_comp_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bMaxBurst; ++ __u8 bmAttributes; ++ __le16 wBytesPerInterval; ++}; ++ ++struct usb_interface_assoc_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bFirstInterface; ++ __u8 bInterfaceCount; ++ __u8 bFunctionClass; ++ __u8 bFunctionSubClass; ++ __u8 bFunctionProtocol; ++ __u8 iFunction; ++}; ++ ++struct usb_bos_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __le16 wTotalLength; ++ __u8 bNumDeviceCaps; ++} __attribute__((packed)); ++ ++struct usb_ext_cap_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDevCapabilityType; ++ __le32 bmAttributes; ++} __attribute__((packed)); ++ ++struct usb_ss_cap_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDevCapabilityType; ++ __u8 bmAttributes; ++ __le16 wSpeedSupported; ++ __u8 bFunctionalitySupport; ++ __u8 bU1devExitLat; ++ __le16 bU2DevExitLat; ++}; ++ ++struct usb_ss_container_id_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDevCapabilityType; ++ __u8 bReserved; ++ __u8 ContainerID[16]; ++}; ++ ++struct usb_ssp_cap_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDevCapabilityType; ++ __u8 bReserved; ++ __le32 bmAttributes; ++ __le16 wFunctionalitySupport; ++ __le16 wReserved; ++ __le32 bmSublinkSpeedAttr[1]; ++}; ++ ++struct usb_ptm_cap_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDevCapabilityType; ++}; ++ ++enum usb_device_speed { ++ USB_SPEED_UNKNOWN = 0, ++ USB_SPEED_LOW = 1, ++ USB_SPEED_FULL = 2, ++ USB_SPEED_HIGH = 3, ++ USB_SPEED_WIRELESS = 4, ++ USB_SPEED_SUPER = 5, ++ USB_SPEED_SUPER_PLUS = 6, ++}; ++ ++enum usb_device_state { ++ USB_STATE_NOTATTACHED = 0, ++ USB_STATE_ATTACHED = 1, ++ USB_STATE_POWERED = 2, ++ USB_STATE_RECONNECTING = 3, ++ USB_STATE_UNAUTHENTICATED = 4, ++ USB_STATE_DEFAULT = 5, ++ USB_STATE_ADDRESS = 6, ++ USB_STATE_CONFIGURED = 7, ++ USB_STATE_SUSPENDED = 8, ++}; ++ ++enum usb3_link_state { ++ USB3_LPM_U0 = 0, ++ USB3_LPM_U1 = 1, ++ USB3_LPM_U2 = 2, ++ USB3_LPM_U3 = 3, ++}; ++ ++struct ep_device; ++ ++struct usb_host_endpoint { ++ struct usb_endpoint_descriptor desc; ++ struct usb_ss_ep_comp_descriptor ss_ep_comp; ++ struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; ++ char: 8; ++ struct list_head urb_list; ++ void *hcpriv; ++ struct ep_device *ep_dev; ++ unsigned char *extra; ++ int extralen; ++ int enabled; ++ int streams; ++ int: 32; ++} __attribute__((packed)); ++ ++struct usb_host_interface { ++ struct usb_interface_descriptor desc; ++ int extralen; ++ unsigned char *extra; ++ struct usb_host_endpoint *endpoint; ++ char *string; ++}; ++ ++enum usb_interface_condition { ++ USB_INTERFACE_UNBOUND = 0, ++ USB_INTERFACE_BINDING = 1, ++ USB_INTERFACE_BOUND = 2, ++ USB_INTERFACE_UNBINDING = 3, ++}; ++ ++struct usb_interface { ++ struct usb_host_interface *altsetting; ++ struct usb_host_interface *cur_altsetting; ++ unsigned int num_altsetting; ++ struct usb_interface_assoc_descriptor *intf_assoc; ++ int minor; ++ enum usb_interface_condition condition; ++ unsigned int sysfs_files_created: 1; ++ unsigned int ep_devs_created: 1; ++ unsigned int unregistering: 1; ++ unsigned int needs_remote_wakeup: 1; ++ unsigned int needs_altsetting0: 1; ++ unsigned int needs_binding: 1; ++ unsigned int resetting_device: 1; ++ unsigned int authorized: 1; ++ struct device dev; ++ struct device *usb_dev; ++ struct work_struct reset_ws; ++}; ++ ++struct usb_interface_cache { ++ unsigned int num_altsetting; ++ struct kref ref; ++ struct usb_host_interface altsetting[0]; ++}; ++ ++struct usb_host_config { ++ struct usb_config_descriptor desc; ++ char *string; ++ struct usb_interface_assoc_descriptor *intf_assoc[16]; ++ struct usb_interface *interface[32]; ++ struct usb_interface_cache *intf_cache[32]; ++ unsigned char *extra; ++ int extralen; ++}; ++ ++struct usb_host_bos { ++ struct usb_bos_descriptor *desc; ++ struct usb_ext_cap_descriptor *ext_cap; ++ struct usb_ss_cap_descriptor *ss_cap; ++ struct usb_ssp_cap_descriptor *ssp_cap; ++ struct usb_ss_container_id_descriptor *ss_id; ++ struct usb_ptm_cap_descriptor *ptm_cap; ++}; ++ ++struct usb_devmap { ++ long unsigned int devicemap[2]; ++}; ++ ++struct usb_device; ++ ++struct mon_bus; ++ ++struct usb_bus { ++ struct device *controller; ++ struct device *sysdev; ++ int busnum; ++ const char *bus_name; ++ u8 uses_dma; ++ u8 uses_pio_for_control; ++ u8 otg_port; ++ unsigned int is_b_host: 1; ++ unsigned int b_hnp_enable: 1; ++ unsigned int no_stop_on_short: 1; ++ unsigned int no_sg_constraint: 1; ++ unsigned int sg_tablesize; ++ int devnum_next; ++ struct mutex devnum_next_mutex; ++ struct usb_devmap devmap; ++ struct usb_device *root_hub; ++ struct usb_bus *hs_companion; ++ int bandwidth_allocated; ++ int bandwidth_int_reqs; ++ int bandwidth_isoc_reqs; ++ unsigned int resuming_ports; ++ struct mon_bus *mon_bus; ++ int monitored; ++}; ++ ++struct wusb_dev; ++ ++enum usb_device_removable { ++ USB_DEVICE_REMOVABLE_UNKNOWN = 0, ++ USB_DEVICE_REMOVABLE = 1, ++ USB_DEVICE_FIXED = 2, ++}; ++ ++struct usb2_lpm_parameters { ++ unsigned int besl; ++ int timeout; ++}; ++ ++struct usb3_lpm_parameters { ++ unsigned int mel; ++ unsigned int pel; ++ unsigned int sel; ++ int timeout; ++}; ++ ++struct usb_tt; ++ ++struct usb_device { ++ int devnum; ++ char devpath[16]; ++ u32 route; ++ enum usb_device_state state; ++ enum usb_device_speed speed; ++ unsigned int rx_lanes; ++ unsigned int tx_lanes; ++ struct usb_tt *tt; ++ int ttport; ++ unsigned int toggle[2]; ++ struct usb_device *parent; ++ struct usb_bus *bus; ++ struct usb_host_endpoint ep0; ++ struct device dev; ++ struct usb_device_descriptor descriptor; ++ struct usb_host_bos *bos; ++ struct usb_host_config *config; ++ struct usb_host_config *actconfig; ++ struct usb_host_endpoint *ep_in[16]; ++ struct usb_host_endpoint *ep_out[16]; ++ char **rawdescriptors; ++ short unsigned int bus_mA; ++ u8 portnum; ++ u8 level; ++ unsigned int can_submit: 1; ++ unsigned int persist_enabled: 1; ++ unsigned int have_langid: 1; ++ unsigned int authorized: 1; ++ unsigned int authenticated: 1; ++ unsigned int wusb: 1; ++ unsigned int lpm_capable: 1; ++ unsigned int usb2_hw_lpm_capable: 1; ++ unsigned int usb2_hw_lpm_besl_capable: 1; ++ unsigned int usb2_hw_lpm_enabled: 1; ++ unsigned int usb2_hw_lpm_allowed: 1; ++ unsigned int usb3_lpm_u1_enabled: 1; ++ unsigned int usb3_lpm_u2_enabled: 1; ++ int string_langid; ++ char *product; ++ char *manufacturer; ++ char *serial; ++ struct list_head filelist; ++ int maxchild; ++ u32 quirks; ++ atomic_t urbnum; ++ long unsigned int active_duration; ++ long unsigned int connect_time; ++ unsigned int do_remote_wakeup: 1; ++ unsigned int reset_resume: 1; ++ unsigned int port_is_suspended: 1; ++ struct wusb_dev *wusb_dev; ++ int slot_id; ++ enum usb_device_removable removable; ++ struct usb2_lpm_parameters l1_params; ++ struct usb3_lpm_parameters u1_params; ++ struct usb3_lpm_parameters u2_params; ++ unsigned int lpm_disable_count; ++ u16 hub_delay; ++}; ++ ++struct usb_tt { ++ struct usb_device *hub; ++ int multi; ++ unsigned int think_time; ++ void *hcpriv; ++ spinlock_t lock; ++ struct list_head clear_list; ++ struct work_struct clear_work; ++}; ++ ++struct usb_dynids { ++ spinlock_t lock; ++ struct list_head list; ++}; ++ ++struct usbdrv_wrap { ++ struct device_driver driver; ++ int for_devices; ++}; ++ ++struct usb_driver { ++ const char *name; ++ int (*probe)(struct usb_interface *, const struct usb_device_id *); ++ void (*disconnect)(struct usb_interface *); ++ int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); ++ int (*suspend)(struct usb_interface *, pm_message_t); ++ int (*resume)(struct usb_interface *); ++ int (*reset_resume)(struct usb_interface *); ++ int (*pre_reset)(struct usb_interface *); ++ int (*post_reset)(struct usb_interface *); ++ const struct usb_device_id *id_table; ++ struct usb_dynids dynids; ++ struct usbdrv_wrap drvwrap; ++ unsigned int no_dynamic_id: 1; ++ unsigned int supports_autosuspend: 1; ++ unsigned int disable_hub_initiated_lpm: 1; ++ unsigned int soft_unbind: 1; ++}; ++ ++struct usb_device_driver { ++ const char *name; ++ int (*probe)(struct usb_device *); ++ void (*disconnect)(struct usb_device *); ++ int (*suspend)(struct usb_device *, pm_message_t); ++ int (*resume)(struct usb_device *, pm_message_t); ++ struct usbdrv_wrap drvwrap; ++ unsigned int supports_autosuspend: 1; ++}; ++ ++struct usb_iso_packet_descriptor { ++ unsigned int offset; ++ unsigned int length; ++ unsigned int actual_length; ++ int status; ++}; ++ ++struct usb_anchor { ++ struct list_head urb_list; ++ wait_queue_head_t wait; ++ spinlock_t lock; ++ atomic_t suspend_wakeups; ++ unsigned int poisoned: 1; ++}; ++ ++struct urb; ++ ++typedef void (*usb_complete_t)(struct urb *); ++ ++struct urb { ++ struct kref kref; ++ void *hcpriv; ++ atomic_t use_count; ++ atomic_t reject; ++ int unlinked; ++ struct list_head urb_list; ++ struct list_head anchor_list; ++ struct usb_anchor *anchor; ++ struct usb_device *dev; ++ struct usb_host_endpoint *ep; ++ unsigned int pipe; ++ unsigned int stream_id; ++ int status; ++ unsigned int transfer_flags; ++ void *transfer_buffer; ++ dma_addr_t transfer_dma; ++ struct scatterlist *sg; ++ int num_mapped_sgs; ++ int num_sgs; ++ u32 transfer_buffer_length; ++ u32 actual_length; ++ unsigned char *setup_packet; ++ dma_addr_t setup_dma; ++ int start_frame; ++ int number_of_packets; ++ int interval; ++ int error_count; ++ void *context; ++ usb_complete_t complete; ++ struct usb_iso_packet_descriptor iso_frame_desc[0]; ++}; ++ ++struct giveback_urb_bh { ++ bool running; ++ spinlock_t lock; ++ struct list_head head; ++ struct tasklet_struct bh; ++ struct usb_host_endpoint *completing_ep; ++}; ++ ++struct usb_phy_roothub; ++ ++struct dma_pool___3; ++ ++struct hc_driver; ++ ++struct usb_phy; ++ ++struct usb_hcd { ++ struct usb_bus self; ++ struct kref kref; ++ const char *product_desc; ++ int speed; ++ char irq_descr[24]; ++ struct timer_list rh_timer; ++ struct urb *status_urb; ++ struct work_struct wakeup_work; ++ const struct hc_driver *driver; ++ struct usb_phy *usb_phy; ++ struct usb_phy_roothub *phy_roothub; ++ long unsigned int flags; ++ unsigned int rh_registered: 1; ++ unsigned int rh_pollable: 1; ++ unsigned int msix_enabled: 1; ++ unsigned int msi_enabled: 1; ++ unsigned int skip_phy_initialization: 1; ++ unsigned int uses_new_polling: 1; ++ unsigned int wireless: 1; ++ unsigned int has_tt: 1; ++ unsigned int amd_resume_bug: 1; ++ unsigned int can_do_streams: 1; ++ unsigned int tpl_support: 1; ++ unsigned int cant_recv_wakeups: 1; ++ unsigned int irq; ++ void *regs; ++ resource_size_t rsrc_start; ++ resource_size_t rsrc_len; ++ unsigned int power_budget; ++ struct giveback_urb_bh high_prio_bh; ++ struct giveback_urb_bh low_prio_bh; ++ struct mutex *address0_mutex; ++ struct mutex *bandwidth_mutex; ++ struct usb_hcd *shared_hcd; ++ struct usb_hcd *primary_hcd; ++ struct dma_pool___3 *pool[4]; ++ int state; ++ long unsigned int hcd_priv[0]; ++}; ++ ++struct hc_driver { ++ const char *description; ++ const char *product_desc; ++ size_t hcd_priv_size; ++ irqreturn_t (*irq)(struct usb_hcd *); ++ int flags; ++ int (*reset)(struct usb_hcd *); ++ int (*start)(struct usb_hcd *); ++ int (*pci_suspend)(struct usb_hcd *, bool); ++ int (*pci_resume)(struct usb_hcd *, bool); ++ void (*stop)(struct usb_hcd *); ++ void (*shutdown)(struct usb_hcd *); ++ int (*get_frame_number)(struct usb_hcd *); ++ int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); ++ int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); ++ int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); ++ void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); ++ void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); ++ void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); ++ int (*hub_status_data)(struct usb_hcd *, char *); ++ int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); ++ int (*bus_suspend)(struct usb_hcd *); ++ int (*bus_resume)(struct usb_hcd *); ++ int (*start_port_reset)(struct usb_hcd *, unsigned int); ++ long unsigned int (*get_resuming_ports)(struct usb_hcd *); ++ void (*relinquish_port)(struct usb_hcd *, int); ++ int (*port_handed_over)(struct usb_hcd *, int); ++ void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); ++ int (*alloc_dev)(struct usb_hcd *, struct usb_device *); ++ void (*free_dev)(struct usb_hcd *, struct usb_device *); ++ int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); ++ int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); ++ int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); ++ int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); ++ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); ++ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); ++ int (*address_device)(struct usb_hcd *, struct usb_device *); ++ int (*enable_device)(struct usb_hcd *, struct usb_device *); ++ int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); ++ int (*reset_device)(struct usb_hcd *, struct usb_device *); ++ int (*update_device)(struct usb_hcd *, struct usb_device *); ++ int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); ++ int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); ++ int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); ++ int (*find_raw_port_number)(struct usb_hcd *, int); ++ int (*port_power)(struct usb_hcd *, int, bool); ++}; ++ ++enum usb_phy_type { ++ USB_PHY_TYPE_UNDEFINED = 0, ++ USB_PHY_TYPE_USB2 = 1, ++ USB_PHY_TYPE_USB3 = 2, ++}; ++ ++enum usb_phy_events { ++ USB_EVENT_NONE = 0, ++ USB_EVENT_VBUS = 1, ++ USB_EVENT_ID = 2, ++ USB_EVENT_CHARGER = 3, ++ USB_EVENT_ENUMERATED = 4, ++}; ++ ++struct extcon_dev; ++ ++enum usb_charger_type { ++ UNKNOWN_TYPE = 0, ++ SDP_TYPE = 1, ++ DCP_TYPE = 2, ++ CDP_TYPE = 3, ++ ACA_TYPE = 4, ++}; ++ ++enum usb_charger_state { ++ USB_CHARGER_DEFAULT = 0, ++ USB_CHARGER_PRESENT = 1, ++ USB_CHARGER_ABSENT = 2, ++}; ++ ++struct usb_charger_current { ++ unsigned int sdp_min; ++ unsigned int sdp_max; ++ unsigned int dcp_min; ++ unsigned int dcp_max; ++ unsigned int cdp_min; ++ unsigned int cdp_max; ++ unsigned int aca_min; ++ unsigned int aca_max; ++}; ++ ++struct usb_otg; ++ ++struct usb_phy_io_ops; ++ ++struct usb_phy { ++ struct device *dev; ++ const char *label; ++ unsigned int flags; ++ enum usb_phy_type type; ++ enum usb_phy_events last_event; ++ struct usb_otg *otg; ++ struct device *io_dev; ++ struct usb_phy_io_ops *io_ops; ++ void *io_priv; ++ struct extcon_dev *edev; ++ struct extcon_dev *id_edev; ++ struct notifier_block vbus_nb; ++ struct notifier_block id_nb; ++ struct notifier_block type_nb; ++ enum usb_charger_type chg_type; ++ enum usb_charger_state chg_state; ++ struct usb_charger_current chg_cur; ++ struct work_struct chg_work; ++ struct atomic_notifier_head notifier; ++ u16 port_status; ++ u16 port_change; ++ struct list_head head; ++ int (*init)(struct usb_phy *); ++ void (*shutdown)(struct usb_phy *); ++ int (*set_vbus)(struct usb_phy *, int); ++ int (*set_power)(struct usb_phy *, unsigned int); ++ int (*set_suspend)(struct usb_phy *, int); ++ int (*set_wakeup)(struct usb_phy *, bool); ++ int (*notify_connect)(struct usb_phy *, enum usb_device_speed); ++ int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed); ++ enum usb_charger_type (*charger_detect)(struct usb_phy *); ++}; ++ ++struct usb_mon_operations { ++ void (*urb_submit)(struct usb_bus *, struct urb *); ++ void (*urb_submit_error)(struct usb_bus *, struct urb *, int); ++ void (*urb_complete)(struct usb_bus *, struct urb *, int); ++}; ++ ++enum usb_otg_state { ++ OTG_STATE_UNDEFINED = 0, ++ OTG_STATE_B_IDLE = 1, ++ OTG_STATE_B_SRP_INIT = 2, ++ OTG_STATE_B_PERIPHERAL = 3, ++ OTG_STATE_B_WAIT_ACON = 4, ++ OTG_STATE_B_HOST = 5, ++ OTG_STATE_A_IDLE = 6, ++ OTG_STATE_A_WAIT_VRISE = 7, ++ OTG_STATE_A_WAIT_BCON = 8, ++ OTG_STATE_A_HOST = 9, ++ OTG_STATE_A_SUSPEND = 10, ++ OTG_STATE_A_PERIPHERAL = 11, ++ OTG_STATE_A_WAIT_VFALL = 12, ++ OTG_STATE_A_VBUS_ERR = 13, ++}; ++ ++struct usb_phy_io_ops { ++ int (*read)(struct usb_phy *, u32); ++ int (*write)(struct usb_phy *, u32, u32); ++}; ++ ++struct usb_gadget; ++ ++struct usb_otg { ++ u8 default_a; ++ struct phy *phy; ++ struct usb_phy *usb_phy; ++ struct usb_bus *host; ++ struct usb_gadget *gadget; ++ enum usb_otg_state state; ++ int (*set_host)(struct usb_otg *, struct usb_bus *); ++ int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); ++ int (*set_vbus)(struct usb_otg *, bool); ++ int (*start_srp)(struct usb_otg *); ++ int (*start_hnp)(struct usb_otg *); ++}; ++ ++struct find_interface_arg { ++ int minor; ++ struct device_driver *drv; ++}; ++ ++struct each_dev_arg { ++ void *data; ++ int (*fn)(struct usb_device *, void *); ++}; ++ ++struct usb_qualifier_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __le16 bcdUSB; ++ __u8 bDeviceClass; ++ __u8 bDeviceSubClass; ++ __u8 bDeviceProtocol; ++ __u8 bMaxPacketSize0; ++ __u8 bNumConfigurations; ++ __u8 bRESERVED; ++}; ++ ++struct usb_set_sel_req { ++ __u8 u1_sel; ++ __u8 u1_pel; ++ __le16 u2_sel; ++ __le16 u2_pel; ++}; ++ ++enum usb_port_connect_type { ++ USB_PORT_CONNECT_TYPE_UNKNOWN = 0, ++ USB_PORT_CONNECT_TYPE_HOT_PLUG = 1, ++ USB_PORT_CONNECT_TYPE_HARD_WIRED = 2, ++ USB_PORT_NOT_USED = 3, ++}; ++ ++struct usbdevfs_hub_portinfo { ++ char nports; ++ char port[127]; ++}; ++ ++struct usb_port_status { ++ __le16 wPortStatus; ++ __le16 wPortChange; ++ __le32 dwExtPortStatus; ++}; ++ ++struct usb_hub_status { ++ __le16 wHubStatus; ++ __le16 wHubChange; ++}; ++ ++struct usb_hub_descriptor { ++ __u8 bDescLength; ++ __u8 bDescriptorType; ++ __u8 bNbrPorts; ++ __le16 wHubCharacteristics; ++ __u8 bPwrOn2PwrGood; ++ __u8 bHubContrCurrent; ++ union { ++ struct { ++ __u8 DeviceRemovable[4]; ++ __u8 PortPwrCtrlMask[4]; ++ } hs; ++ struct { ++ __u8 bHubHdrDecLat; ++ __le16 wHubDelay; ++ __le16 DeviceRemovable; ++ } __attribute__((packed)) ss; ++ } u; ++} __attribute__((packed)); ++ ++enum hub_led_mode { ++ INDICATOR_AUTO = 0, ++ INDICATOR_CYCLE = 1, ++ INDICATOR_GREEN_BLINK = 2, ++ INDICATOR_GREEN_BLINK_OFF = 3, ++ INDICATOR_AMBER_BLINK = 4, ++ INDICATOR_AMBER_BLINK_OFF = 5, ++ INDICATOR_ALT_BLINK = 6, ++ INDICATOR_ALT_BLINK_OFF = 7, ++}; ++ ++struct usb_tt_clear { ++ struct list_head clear_list; ++ unsigned int tt; ++ u16 devinfo; ++ struct usb_hcd *hcd; ++ struct usb_host_endpoint *ep; ++}; ++ ++typedef u32 usb_port_location_t; ++ ++struct usb_port; ++ ++struct usb_hub { ++ struct device *intfdev; ++ struct usb_device *hdev; ++ struct kref kref; ++ struct urb *urb; ++ u8 (*buffer)[8]; ++ union { ++ struct usb_hub_status hub; ++ struct usb_port_status port; ++ } *status; ++ struct mutex status_mutex; ++ int error; ++ int nerrors; ++ long unsigned int event_bits[1]; ++ long unsigned int change_bits[1]; ++ long unsigned int removed_bits[1]; ++ long unsigned int wakeup_bits[1]; ++ long unsigned int power_bits[1]; ++ long unsigned int child_usage_bits[1]; ++ long unsigned int warm_reset_bits[1]; ++ struct usb_hub_descriptor *descriptor; ++ struct usb_tt tt; ++ unsigned int mA_per_port; ++ unsigned int wakeup_enabled_descendants; ++ unsigned int limited_power: 1; ++ unsigned int quiescing: 1; ++ unsigned int disconnected: 1; ++ unsigned int in_reset: 1; ++ unsigned int quirk_check_port_auto_suspend: 1; ++ unsigned int has_indicators: 1; ++ u8 indicator[31]; ++ struct delayed_work leds; ++ struct delayed_work init_work; ++ struct work_struct events; ++ struct usb_port **ports; ++}; ++ ++struct usb_dev_state; ++ ++struct usb_port { ++ struct usb_device *child; ++ struct device dev; ++ struct usb_dev_state *port_owner; ++ struct usb_port *peer; ++ struct dev_pm_qos_request *req; ++ enum usb_port_connect_type connect_type; ++ usb_port_location_t location; ++ struct mutex status_lock; ++ u32 over_current_count; ++ u8 portnum; ++ u32 quirks; ++ unsigned int is_superspeed: 1; ++ unsigned int usb3_lpm_u1_permit: 1; ++ unsigned int usb3_lpm_u2_permit: 1; ++}; ++ ++enum hub_activation_type { ++ HUB_INIT = 0, ++ HUB_INIT2 = 1, ++ HUB_INIT3 = 2, ++ HUB_POST_RESET = 3, ++ HUB_RESUME = 4, ++ HUB_RESET_RESUME = 5, ++}; ++ ++enum hub_quiescing_type { ++ HUB_DISCONNECT = 0, ++ HUB_PRE_RESET = 1, ++ HUB_SUSPEND = 2, ++}; ++ ++struct usb_ctrlrequest { ++ __u8 bRequestType; ++ __u8 bRequest; ++ __le16 wValue; ++ __le16 wIndex; ++ __le16 wLength; ++}; ++ ++enum usb_led_event { ++ USB_LED_EVENT_HOST = 0, ++ USB_LED_EVENT_GADGET = 1, ++}; ++ ++struct usb_sg_request { ++ int status; ++ size_t bytes; ++ spinlock_t lock; ++ struct usb_device *dev; ++ int pipe; ++ int entries; ++ struct urb **urbs; ++ int count; ++ struct completion complete; ++}; ++ ++struct usb_cdc_header_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __le16 bcdCDC; ++} __attribute__((packed)); ++ ++struct usb_cdc_call_mgmt_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __u8 bmCapabilities; ++ __u8 bDataInterface; ++}; ++ ++struct usb_cdc_acm_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __u8 bmCapabilities; ++}; ++ ++struct usb_cdc_union_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __u8 bMasterInterface0; ++ __u8 bSlaveInterface0; ++}; ++ ++struct usb_cdc_country_functional_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __u8 iCountryCodeRelDate; ++ __le16 wCountyCode0; ++}; ++ ++struct usb_cdc_network_terminal_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __u8 bEntityId; ++ __u8 iName; ++ __u8 bChannelIndex; ++ __u8 bPhysicalInterface; ++}; ++ ++struct usb_cdc_ether_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __u8 iMACAddress; ++ __le32 bmEthernetStatistics; ++ __le16 wMaxSegmentSize; ++ __le16 wNumberMCFilters; ++ __u8 bNumberPowerFilters; ++} __attribute__((packed)); ++ ++struct usb_cdc_dmm_desc { ++ __u8 bFunctionLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubtype; ++ __u16 bcdVersion; ++ __le16 wMaxCommand; ++} __attribute__((packed)); ++ ++struct usb_cdc_mdlm_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __le16 bcdVersion; ++ __u8 bGUID[16]; ++} __attribute__((packed)); ++ ++struct usb_cdc_mdlm_detail_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __u8 bGuidDescriptorType; ++ __u8 bDetailData[0]; ++}; ++ ++struct usb_cdc_obex_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __le16 bcdVersion; ++} __attribute__((packed)); ++ ++struct usb_cdc_ncm_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __le16 bcdNcmVersion; ++ __u8 bmNetworkCapabilities; ++} __attribute__((packed)); ++ ++struct usb_cdc_mbim_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __le16 bcdMBIMVersion; ++ __le16 wMaxControlMessage; ++ __u8 bNumberFilters; ++ __u8 bMaxFilterSize; ++ __le16 wMaxSegmentSize; ++ __u8 bmNetworkCapabilities; ++} __attribute__((packed)); ++ ++struct usb_cdc_mbim_extended_desc { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDescriptorSubType; ++ __le16 bcdMBIMExtendedVersion; ++ __u8 bMaxOutstandingCommandMessages; ++ __le16 wMTU; ++} __attribute__((packed)); ++ ++struct usb_cdc_parsed_header { ++ struct usb_cdc_union_desc *usb_cdc_union_desc; ++ struct usb_cdc_header_desc *usb_cdc_header_desc; ++ struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; ++ struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; ++ struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; ++ struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; ++ struct usb_cdc_ether_desc *usb_cdc_ether_desc; ++ struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; ++ struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; ++ struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; ++ struct usb_cdc_obex_desc *usb_cdc_obex_desc; ++ struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; ++ struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; ++ struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; ++ bool phonet_magic_present; ++}; ++ ++struct api_context { ++ struct completion done; ++ int status; ++}; ++ ++struct set_config_request { ++ struct usb_device *udev; ++ int config; ++ struct work_struct work; ++ struct list_head node; ++}; ++ ++struct usb_dynid { ++ struct list_head node; ++ struct usb_device_id id; ++}; ++ ++struct usb_dev_cap_header { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __u8 bDevCapabilityType; ++}; ++ ++struct usb_class_driver { ++ char *name; ++ char * (*devnode)(struct device *, umode_t *); ++ const struct file_operations *fops; ++ int minor_base; ++}; ++ ++struct usb_class { ++ struct kref kref; ++ struct class *class; ++}; ++ ++struct ep_device { ++ struct usb_endpoint_descriptor *desc; ++ struct usb_device *udev; ++ struct device dev; ++}; ++ ++struct usbdevfs_ctrltransfer { ++ __u8 bRequestType; ++ __u8 bRequest; ++ __u16 wValue; ++ __u16 wIndex; ++ __u16 wLength; ++ __u32 timeout; ++ void *data; ++}; ++ ++struct usbdevfs_bulktransfer { ++ unsigned int ep; ++ unsigned int len; ++ unsigned int timeout; ++ void *data; ++}; ++ ++struct usbdevfs_setinterface { ++ unsigned int interface; ++ unsigned int altsetting; ++}; ++ ++struct usbdevfs_disconnectsignal { ++ unsigned int signr; ++ void *context; ++}; ++ ++struct usbdevfs_getdriver { ++ unsigned int interface; ++ char driver[256]; ++}; ++ ++struct usbdevfs_connectinfo { ++ unsigned int devnum; ++ unsigned char slow; ++}; ++ ++struct usbdevfs_iso_packet_desc { ++ unsigned int length; ++ unsigned int actual_length; ++ unsigned int status; ++}; ++ ++struct usbdevfs_urb { ++ unsigned char type; ++ unsigned char endpoint; ++ int status; ++ unsigned int flags; ++ void *buffer; ++ int buffer_length; ++ int actual_length; ++ int start_frame; ++ union { ++ int number_of_packets; ++ unsigned int stream_id; ++ }; ++ int error_count; ++ unsigned int signr; ++ void *usercontext; ++ struct usbdevfs_iso_packet_desc iso_frame_desc[0]; ++}; ++ ++struct usbdevfs_ioctl { ++ int ifno; ++ int ioctl_code; ++ void *data; ++}; ++ ++struct usbdevfs_disconnect_claim { ++ unsigned int interface; ++ unsigned int flags; ++ char driver[256]; ++}; ++ ++struct usbdevfs_streams { ++ unsigned int num_streams; ++ unsigned int num_eps; ++ unsigned char eps[0]; ++}; ++ ++struct usbdevfs_ctrltransfer32 { ++ u8 bRequestType; ++ u8 bRequest; ++ u16 wValue; ++ u16 wIndex; ++ u16 wLength; ++ u32 timeout; ++ compat_caddr_t data; ++}; ++ ++struct usbdevfs_bulktransfer32 { ++ compat_uint_t ep; ++ compat_uint_t len; ++ compat_uint_t timeout; ++ compat_caddr_t data; ++}; ++ ++struct usbdevfs_disconnectsignal32 { ++ compat_int_t signr; ++ compat_caddr_t context; ++}; ++ ++struct usbdevfs_urb32 { ++ unsigned char type; ++ unsigned char endpoint; ++ compat_int_t status; ++ compat_uint_t flags; ++ compat_caddr_t buffer; ++ compat_int_t buffer_length; ++ compat_int_t actual_length; ++ compat_int_t start_frame; ++ compat_int_t number_of_packets; ++ compat_int_t error_count; ++ compat_uint_t signr; ++ compat_caddr_t usercontext; ++ struct usbdevfs_iso_packet_desc iso_frame_desc[0]; ++}; ++ ++struct usbdevfs_ioctl32 { ++ s32 ifno; ++ s32 ioctl_code; ++ compat_caddr_t data; ++}; ++ ++struct usb_dev_state___2 { ++ struct list_head list; ++ struct usb_device *dev; ++ struct file *file; ++ spinlock_t lock; ++ struct list_head async_pending; ++ struct list_head async_completed; ++ struct list_head memory_list; ++ wait_queue_head_t wait; ++ unsigned int discsignr; ++ struct pid *disc_pid; ++ const struct cred *cred; ++ void *disccontext; ++ long unsigned int ifclaimed; ++ u32 disabled_bulk_eps; ++ bool privileges_dropped; ++ long unsigned int interface_allowed_mask; ++}; ++ ++struct usb_memory { ++ struct list_head memlist; ++ int vma_use_count; ++ int urb_use_count; ++ u32 size; ++ void *mem; ++ dma_addr_t dma_handle; ++ long unsigned int vm_start; ++ struct usb_dev_state___2 *ps; ++}; ++ ++struct async { ++ struct list_head asynclist; ++ struct usb_dev_state___2 *ps; ++ struct pid *pid; ++ const struct cred *cred; ++ unsigned int signr; ++ unsigned int ifnum; ++ void *userbuffer; ++ void *userurb; ++ struct urb *urb; ++ struct usb_memory *usbm; ++ unsigned int mem_usage; ++ int status; ++ u8 bulk_addr; ++ u8 bulk_status; ++}; ++ ++enum snoop_when { ++ SUBMIT = 0, ++ COMPLETE = 1, ++}; ++ ++struct quirk_entry { ++ u16 vid; ++ u16 pid; ++ u32 flags; ++}; ++ ++struct device_connect_event { ++ atomic_t count; ++ wait_queue_head_t wait; ++}; ++ ++struct class_info { ++ int class; ++ char *class_name; ++}; ++ ++struct usb_phy_roothub___2 { ++ struct phy *phy; ++ struct list_head list; ++}; ++ ++typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *); ++ ++enum usb_phy_interface { ++ USBPHY_INTERFACE_MODE_UNKNOWN = 0, ++ USBPHY_INTERFACE_MODE_UTMI = 1, ++ USBPHY_INTERFACE_MODE_UTMIW = 2, ++ USBPHY_INTERFACE_MODE_ULPI = 3, ++ USBPHY_INTERFACE_MODE_SERIAL = 4, ++ USBPHY_INTERFACE_MODE_HSIC = 5, ++}; ++ ++struct mon_bus { ++ struct list_head bus_link; ++ spinlock_t lock; ++ struct usb_bus *u_bus; ++ int text_inited; ++ int bin_inited; ++ struct dentry *dent_s; ++ struct dentry *dent_t; ++ struct dentry *dent_u; ++ struct device *classdev; ++ int nreaders; ++ struct list_head r_list; ++ struct kref ref; ++ unsigned int cnt_events; ++ unsigned int cnt_text_lost; ++}; ++ ++struct mon_reader { ++ struct list_head r_link; ++ struct mon_bus *m_bus; ++ void *r_data; ++ void (*rnf_submit)(void *, struct urb *); ++ void (*rnf_error)(void *, struct urb *, int); ++ void (*rnf_complete)(void *, struct urb *, int); ++}; ++ ++struct snap { ++ int slen; ++ char str[80]; ++}; ++ ++struct mon_iso_desc { ++ int status; ++ unsigned int offset; ++ unsigned int length; ++}; ++ ++struct mon_event_text { ++ struct list_head e_link; ++ int type; ++ long unsigned int id; ++ unsigned int tstamp; ++ int busnum; ++ char devnum; ++ char epnum; ++ char is_in; ++ char xfertype; ++ int length; ++ int status; ++ int interval; ++ int start_frame; ++ int error_count; ++ char setup_flag; ++ char data_flag; ++ int numdesc; ++ struct mon_iso_desc isodesc[5]; ++ unsigned char setup[8]; ++ unsigned char data[32]; ++}; ++ ++struct mon_reader_text { ++ struct kmem_cache *e_slab; ++ int nevents; ++ struct list_head e_list; ++ struct mon_reader r; ++ wait_queue_head_t wait; ++ int printf_size; ++ size_t printf_offset; ++ size_t printf_togo; ++ char *printf_buf; ++ struct mutex printf_lock; ++ char slab_name[30]; ++}; ++ ++struct mon_text_ptr { ++ int cnt; ++ int limit; ++ char *pbuf; ++}; ++ ++enum { ++ NAMESZ = 10, ++}; ++ ++struct iso_rec { ++ int error_count; ++ int numdesc; ++}; ++ ++struct mon_bin_hdr { ++ u64 id; ++ unsigned char type; ++ unsigned char xfer_type; ++ unsigned char epnum; ++ unsigned char devnum; ++ short unsigned int busnum; ++ char flag_setup; ++ char flag_data; ++ s64 ts_sec; ++ s32 ts_usec; ++ int status; ++ unsigned int len_urb; ++ unsigned int len_cap; ++ union { ++ unsigned char setup[8]; ++ struct iso_rec iso; ++ } s; ++ int interval; ++ int start_frame; ++ unsigned int xfer_flags; ++ unsigned int ndesc; ++}; ++ ++struct mon_bin_isodesc { ++ int iso_status; ++ unsigned int iso_off; ++ unsigned int iso_len; ++ u32 _pad; ++}; ++ ++struct mon_bin_stats { ++ u32 queued; ++ u32 dropped; ++}; ++ ++struct mon_bin_get { ++ struct mon_bin_hdr *hdr; ++ void *data; ++ size_t alloc; ++}; ++ ++struct mon_bin_mfetch { ++ u32 *offvec; ++ u32 nfetch; ++ u32 nflush; ++}; ++ ++struct mon_bin_get32 { ++ u32 hdr32; ++ u32 data32; ++ u32 alloc32; ++}; ++ ++struct mon_bin_mfetch32 { ++ u32 offvec32; ++ u32 nfetch32; ++ u32 nflush32; ++}; ++ ++struct mon_pgmap { ++ struct page *pg; ++ unsigned char *ptr; ++}; ++ ++struct mon_reader_bin { ++ spinlock_t b_lock; ++ unsigned int b_size; ++ unsigned int b_cnt; ++ unsigned int b_in; ++ unsigned int b_out; ++ unsigned int b_read; ++ struct mon_pgmap *b_vec; ++ wait_queue_head_t b_wait; ++ struct mutex fetch_lock; ++ int mmap_active; ++ struct mon_reader r; ++ unsigned int cnt_lost; ++}; ++ ++enum amd_chipset_gen { ++ NOT_AMD_CHIPSET = 0, ++ AMD_CHIPSET_SB600 = 1, ++ AMD_CHIPSET_SB700 = 2, ++ AMD_CHIPSET_SB800 = 3, ++ AMD_CHIPSET_HUDSON2 = 4, ++ AMD_CHIPSET_BOLTON = 5, ++ AMD_CHIPSET_YANGTZE = 6, ++ AMD_CHIPSET_TAISHAN = 7, ++ AMD_CHIPSET_UNKNOWN = 8, ++}; ++ ++struct amd_chipset_type { ++ enum amd_chipset_gen gen; ++ u8 rev; ++}; ++ ++struct amd_chipset_info { ++ struct pci_dev *nb_dev; ++ struct pci_dev *smbus_dev; ++ int nb_type; ++ struct amd_chipset_type sb_type; ++ int isoc_reqs; ++ int probe_count; ++ int probe_result; ++}; ++ ++struct ehci_stats { ++ long unsigned int normal; ++ long unsigned int error; ++ long unsigned int iaa; ++ long unsigned int lost_iaa; ++ long unsigned int complete; ++ long unsigned int unlink; ++}; ++ ++struct ehci_per_sched { ++ struct usb_device *udev; ++ struct usb_host_endpoint *ep; ++ struct list_head ps_list; ++ u16 tt_usecs; ++ u16 cs_mask; ++ u16 period; ++ u16 phase; ++ u8 bw_phase; ++ u8 phase_uf; ++ u8 usecs; ++ u8 c_usecs; ++ u8 bw_uperiod; ++ u8 bw_period; ++}; ++ ++enum ehci_rh_state { ++ EHCI_RH_HALTED = 0, ++ EHCI_RH_SUSPENDED = 1, ++ EHCI_RH_RUNNING = 2, ++ EHCI_RH_STOPPING = 3, ++}; ++ ++enum ehci_hrtimer_event { ++ EHCI_HRTIMER_POLL_ASS = 0, ++ EHCI_HRTIMER_POLL_PSS = 1, ++ EHCI_HRTIMER_POLL_DEAD = 2, ++ EHCI_HRTIMER_UNLINK_INTR = 3, ++ EHCI_HRTIMER_FREE_ITDS = 4, ++ EHCI_HRTIMER_ACTIVE_UNLINK = 5, ++ EHCI_HRTIMER_START_UNLINK_INTR = 6, ++ EHCI_HRTIMER_ASYNC_UNLINKS = 7, ++ EHCI_HRTIMER_IAA_WATCHDOG = 8, ++ EHCI_HRTIMER_DISABLE_PERIODIC = 9, ++ EHCI_HRTIMER_DISABLE_ASYNC = 10, ++ EHCI_HRTIMER_IO_WATCHDOG = 11, ++ EHCI_HRTIMER_NUM_EVENTS = 12, ++}; ++ ++struct ehci_caps; ++ ++struct ehci_regs; ++ ++struct ehci_dbg_port; ++ ++struct ehci_qh; ++ ++union ehci_shadow; ++ ++struct ehci_itd; ++ ++struct ehci_sitd; ++ ++struct ehci_hcd { ++ enum ehci_hrtimer_event next_hrtimer_event; ++ unsigned int enabled_hrtimer_events; ++ ktime_t hr_timeouts[12]; ++ struct hrtimer hrtimer; ++ int PSS_poll_count; ++ int ASS_poll_count; ++ int died_poll_count; ++ struct ehci_caps *caps; ++ struct ehci_regs *regs; ++ struct ehci_dbg_port *debug; ++ __u32 hcs_params; ++ spinlock_t lock; ++ enum ehci_rh_state rh_state; ++ bool scanning: 1; ++ bool need_rescan: 1; ++ bool intr_unlinking: 1; ++ bool iaa_in_progress: 1; ++ bool async_unlinking: 1; ++ bool shutdown: 1; ++ struct ehci_qh *qh_scan_next; ++ struct ehci_qh *async; ++ struct ehci_qh *dummy; ++ struct list_head async_unlink; ++ struct list_head async_idle; ++ unsigned int async_unlink_cycle; ++ unsigned int async_count; ++ __le32 old_current; ++ __le32 old_token; ++ unsigned int periodic_size; ++ __le32 *periodic; ++ dma_addr_t periodic_dma; ++ struct list_head intr_qh_list; ++ unsigned int i_thresh; ++ union ehci_shadow *pshadow; ++ struct list_head intr_unlink_wait; ++ struct list_head intr_unlink; ++ unsigned int intr_unlink_wait_cycle; ++ unsigned int intr_unlink_cycle; ++ unsigned int now_frame; ++ unsigned int last_iso_frame; ++ unsigned int intr_count; ++ unsigned int isoc_count; ++ unsigned int periodic_count; ++ unsigned int uframe_periodic_max; ++ struct list_head cached_itd_list; ++ struct ehci_itd *last_itd_to_free; ++ struct list_head cached_sitd_list; ++ struct ehci_sitd *last_sitd_to_free; ++ long unsigned int reset_done[15]; ++ long unsigned int bus_suspended; ++ long unsigned int companion_ports; ++ long unsigned int owned_ports; ++ long unsigned int port_c_suspend; ++ long unsigned int suspended_ports; ++ long unsigned int resuming_ports; ++ struct dma_pool___3 *qh_pool; ++ struct dma_pool___3 *qtd_pool; ++ struct dma_pool___3 *itd_pool; ++ struct dma_pool___3 *sitd_pool; ++ unsigned int random_frame; ++ long unsigned int next_statechange; ++ ktime_t last_periodic_enable; ++ u32 command; ++ unsigned int no_selective_suspend: 1; ++ unsigned int has_fsl_port_bug: 1; ++ unsigned int has_fsl_hs_errata: 1; ++ unsigned int has_fsl_susp_errata: 1; ++ unsigned int big_endian_mmio: 1; ++ unsigned int big_endian_desc: 1; ++ unsigned int big_endian_capbase: 1; ++ unsigned int has_amcc_usb23: 1; ++ unsigned int need_io_watchdog: 1; ++ unsigned int amd_pll_fix: 1; ++ unsigned int use_dummy_qh: 1; ++ unsigned int has_synopsys_hc_bug: 1; ++ unsigned int frame_index_bug: 1; ++ unsigned int need_oc_pp_cycle: 1; ++ unsigned int imx28_write_fix: 1; ++ __le32 *ohci_hcctrl_reg; ++ unsigned int has_hostpc: 1; ++ unsigned int has_tdi_phy_lpm: 1; ++ unsigned int has_ppcd: 1; ++ u8 sbrn; ++ struct ehci_stats stats; ++ struct dentry *debug_dir; ++ u8 bandwidth[64]; ++ u8 tt_budget[64]; ++ struct list_head tt_list; ++ long unsigned int priv[0]; ++}; ++ ++struct ehci_caps { ++ u32 hc_capbase; ++ u32 hcs_params; ++ u32 hcc_params; ++ u8 portroute[8]; ++}; ++ ++struct ehci_regs { ++ u32 command; ++ u32 status; ++ u32 intr_enable; ++ u32 frame_index; ++ u32 segment; ++ u32 frame_list; ++ u32 async_next; ++ u32 reserved1[2]; ++ u32 txfill_tuning; ++ u32 reserved2[6]; ++ u32 configured_flag; ++ u32 port_status[0]; ++ u32 reserved3[9]; ++ u32 usbmode; ++ u32 reserved4[6]; ++ u32 hostpc[0]; ++ u32 reserved5[17]; ++ u32 usbmode_ex; ++}; ++ ++struct ehci_dbg_port { ++ u32 control; ++ u32 pids; ++ u32 data03; ++ u32 data47; ++ u32 address; ++}; ++ ++struct ehci_fstn; ++ ++union ehci_shadow { ++ struct ehci_qh *qh; ++ struct ehci_itd *itd; ++ struct ehci_sitd *sitd; ++ struct ehci_fstn *fstn; ++ __le32 *hw_next; ++ void *ptr; ++}; ++ ++struct ehci_qh_hw; ++ ++struct ehci_qtd; ++ ++struct ehci_qh { ++ struct ehci_qh_hw *hw; ++ dma_addr_t qh_dma; ++ union ehci_shadow qh_next; ++ struct list_head qtd_list; ++ struct list_head intr_node; ++ struct ehci_qtd *dummy; ++ struct list_head unlink_node; ++ struct ehci_per_sched ps; ++ unsigned int unlink_cycle; ++ u8 qh_state; ++ u8 xacterrs; ++ u8 unlink_reason; ++ u8 gap_uf; ++ unsigned int is_out: 1; ++ unsigned int clearing_tt: 1; ++ unsigned int dequeue_during_giveback: 1; ++ unsigned int should_be_inactive: 1; ++}; ++ ++struct ehci_iso_stream; ++ ++struct ehci_itd { ++ __le32 hw_next; ++ __le32 hw_transaction[8]; ++ __le32 hw_bufp[7]; ++ __le32 hw_bufp_hi[7]; ++ dma_addr_t itd_dma; ++ union ehci_shadow itd_next; ++ struct urb *urb; ++ struct ehci_iso_stream *stream; ++ struct list_head itd_list; ++ unsigned int frame; ++ unsigned int pg; ++ unsigned int index[8]; ++ long: 64; ++}; ++ ++struct ehci_sitd { ++ __le32 hw_next; ++ __le32 hw_fullspeed_ep; ++ __le32 hw_uframe; ++ __le32 hw_results; ++ __le32 hw_buf[2]; ++ __le32 hw_backpointer; ++ __le32 hw_buf_hi[2]; ++ dma_addr_t sitd_dma; ++ union ehci_shadow sitd_next; ++ struct urb *urb; ++ struct ehci_iso_stream *stream; ++ struct list_head sitd_list; ++ unsigned int frame; ++ unsigned int index; ++}; ++ ++struct ehci_qtd { ++ __le32 hw_next; ++ __le32 hw_alt_next; ++ __le32 hw_token; ++ __le32 hw_buf[5]; ++ __le32 hw_buf_hi[5]; ++ dma_addr_t qtd_dma; ++ struct list_head qtd_list; ++ struct urb *urb; ++ size_t length; ++}; ++ ++struct ehci_fstn { ++ __le32 hw_next; ++ __le32 hw_prev; ++ dma_addr_t fstn_dma; ++ union ehci_shadow fstn_next; ++ long: 64; ++}; ++ ++struct ehci_qh_hw { ++ __le32 hw_next; ++ __le32 hw_info1; ++ __le32 hw_info2; ++ __le32 hw_current; ++ __le32 hw_qtd_next; ++ __le32 hw_alt_next; ++ __le32 hw_token; ++ __le32 hw_buf[5]; ++ __le32 hw_buf_hi[5]; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct ehci_iso_packet { ++ u64 bufp; ++ __le32 transaction; ++ u8 cross; ++ u32 buf1; ++}; ++ ++struct ehci_iso_sched { ++ struct list_head td_list; ++ unsigned int span; ++ unsigned int first_packet; ++ struct ehci_iso_packet packet[0]; ++}; ++ ++struct ehci_iso_stream { ++ struct ehci_qh_hw *hw; ++ u8 bEndpointAddress; ++ u8 highspeed; ++ struct list_head td_list; ++ struct list_head free_list; ++ struct ehci_per_sched ps; ++ unsigned int next_uframe; ++ __le32 splits; ++ u16 uperiod; ++ u16 maxp; ++ unsigned int bandwidth; ++ __le32 buf0; ++ __le32 buf1; ++ __le32 buf2; ++ __le32 address; ++}; ++ ++struct ehci_tt { ++ u16 bandwidth[8]; ++ struct list_head tt_list; ++ struct list_head ps_list; ++ struct usb_tt *usb_tt; ++ int tt_port; ++}; ++ ++struct ehci_driver_overrides { ++ size_t extra_priv_size; ++ int (*reset)(struct usb_hcd *); ++ int (*port_power)(struct usb_hcd *, int, bool); ++}; ++ ++struct debug_buffer { ++ ssize_t (*fill_func)(struct debug_buffer *); ++ struct usb_bus *bus; ++ struct mutex mutex; ++ size_t count; ++ char *output_buf; ++ size_t alloc_size; ++}; ++ ++struct usb_ehci_pdata { ++ int caps_offset; ++ unsigned int has_tt: 1; ++ unsigned int has_synopsys_hc_bug: 1; ++ unsigned int big_endian_desc: 1; ++ unsigned int big_endian_mmio: 1; ++ unsigned int no_io_watchdog: 1; ++ unsigned int reset_on_resume: 1; ++ unsigned int dma_mask_64: 1; ++ int (*power_on)(struct platform_device *); ++ void (*power_off)(struct platform_device *); ++ void (*power_suspend)(struct platform_device *); ++ int (*pre_setup)(struct usb_hcd *); ++}; ++ ++struct ehci_platform_priv { ++ struct clk *clks[4]; ++ struct reset_control *rsts; ++ bool reset_on_resume; ++}; ++ ++typedef __u32 __hc32; ++ ++typedef __u16 __hc16; ++ ++struct td; ++ ++struct ed { ++ __hc32 hwINFO; ++ __hc32 hwTailP; ++ __hc32 hwHeadP; ++ __hc32 hwNextED; ++ dma_addr_t dma; ++ struct td *dummy; ++ struct ed *ed_next; ++ struct ed *ed_prev; ++ struct list_head td_list; ++ struct list_head in_use_list; ++ u8 state; ++ u8 type; ++ u8 branch; ++ u16 interval; ++ u16 load; ++ u16 last_iso; ++ u16 tick; ++ unsigned int takeback_wdh_cnt; ++ struct td *pending_td; ++ long: 64; ++}; ++ ++struct td { ++ __hc32 hwINFO; ++ __hc32 hwCBP; ++ __hc32 hwNextTD; ++ __hc32 hwBE; ++ __hc16 hwPSW[2]; ++ __u8 index; ++ struct ed *ed; ++ struct td *td_hash; ++ struct td *next_dl_td; ++ struct urb *urb; ++ dma_addr_t td_dma; ++ dma_addr_t data_dma; ++ struct list_head td_list; ++ long: 64; ++}; ++ ++struct ohci_hcca { ++ __hc32 int_table[32]; ++ __hc32 frame_no; ++ __hc32 done_head; ++ u8 reserved_for_hc[116]; ++ u8 what[4]; ++}; ++ ++struct ohci_roothub_regs { ++ __hc32 a; ++ __hc32 b; ++ __hc32 status; ++ __hc32 portstatus[15]; ++}; ++ ++struct ohci_regs { ++ __hc32 revision; ++ __hc32 control; ++ __hc32 cmdstatus; ++ __hc32 intrstatus; ++ __hc32 intrenable; ++ __hc32 intrdisable; ++ __hc32 hcca; ++ __hc32 ed_periodcurrent; ++ __hc32 ed_controlhead; ++ __hc32 ed_controlcurrent; ++ __hc32 ed_bulkhead; ++ __hc32 ed_bulkcurrent; ++ __hc32 donehead; ++ __hc32 fminterval; ++ __hc32 fmremaining; ++ __hc32 fmnumber; ++ __hc32 periodicstart; ++ __hc32 lsthresh; ++ struct ohci_roothub_regs roothub; ++ long: 64; ++ long: 64; ++}; ++ ++struct urb_priv { ++ struct ed *ed; ++ u16 length; ++ u16 td_cnt; ++ struct list_head pending; ++ struct td *td[0]; ++}; ++ ++typedef struct urb_priv urb_priv_t; ++ ++enum ohci_rh_state { ++ OHCI_RH_HALTED = 0, ++ OHCI_RH_SUSPENDED = 1, ++ OHCI_RH_RUNNING = 2, ++}; ++ ++struct ohci_hcd { ++ spinlock_t lock; ++ struct ohci_regs *regs; ++ struct ohci_hcca *hcca; ++ dma_addr_t hcca_dma; ++ struct ed *ed_rm_list; ++ struct ed *ed_bulktail; ++ struct ed *ed_controltail; ++ struct ed *periodic[32]; ++ void (*start_hnp)(struct ohci_hcd *); ++ struct dma_pool___3 *td_cache; ++ struct dma_pool___3 *ed_cache; ++ struct td *td_hash[64]; ++ struct td *dl_start; ++ struct td *dl_end; ++ struct list_head pending; ++ struct list_head eds_in_use; ++ enum ohci_rh_state rh_state; ++ int num_ports; ++ int load[32]; ++ u32 hc_control; ++ long unsigned int next_statechange; ++ u32 fminterval; ++ unsigned int autostop: 1; ++ unsigned int working: 1; ++ unsigned int restart_work: 1; ++ long unsigned int flags; ++ unsigned int prev_frame_no; ++ unsigned int wdh_cnt; ++ unsigned int prev_wdh_cnt; ++ u32 prev_donehead; ++ struct timer_list io_watchdog; ++ struct work_struct nec_work; ++ struct dentry *debug_dir; ++ long unsigned int priv[0]; ++}; ++ ++struct ohci_driver_overrides { ++ const char *product_desc; ++ size_t extra_priv_size; ++ int (*reset)(struct usb_hcd *); ++}; ++ ++struct debug_buffer___2 { ++ ssize_t (*fill_func)(struct debug_buffer___2 *); ++ struct ohci_hcd *ohci; ++ struct mutex mutex; ++ size_t count; ++ char *page; ++}; ++ ++struct uhci_td; ++ ++struct uhci_qh { ++ __le32 link; ++ __le32 element; ++ dma_addr_t dma_handle; ++ struct list_head node; ++ struct usb_host_endpoint *hep; ++ struct usb_device *udev; ++ struct list_head queue; ++ struct uhci_td *dummy_td; ++ struct uhci_td *post_td; ++ struct usb_iso_packet_descriptor *iso_packet_desc; ++ long unsigned int advance_jiffies; ++ unsigned int unlink_frame; ++ unsigned int period; ++ short int phase; ++ short int load; ++ unsigned int iso_frame; ++ int state; ++ int type; ++ int skel; ++ unsigned int initial_toggle: 1; ++ unsigned int needs_fixup: 1; ++ unsigned int is_stopped: 1; ++ unsigned int wait_expired: 1; ++ unsigned int bandwidth_reserved: 1; ++}; ++ ++struct uhci_td { ++ __le32 link; ++ __le32 status; ++ __le32 token; ++ __le32 buffer; ++ dma_addr_t dma_handle; ++ struct list_head list; ++ int frame; ++ struct list_head fl_list; ++}; ++ ++enum uhci_rh_state { ++ UHCI_RH_RESET = 0, ++ UHCI_RH_SUSPENDED = 1, ++ UHCI_RH_AUTO_STOPPED = 2, ++ UHCI_RH_RESUMING = 3, ++ UHCI_RH_SUSPENDING = 4, ++ UHCI_RH_RUNNING = 5, ++ UHCI_RH_RUNNING_NODEVS = 6, ++}; ++ ++struct uhci_hcd { ++ struct dentry *dentry; ++ long unsigned int io_addr; ++ void *regs; ++ struct dma_pool___3 *qh_pool; ++ struct dma_pool___3 *td_pool; ++ struct uhci_td *term_td; ++ struct uhci_qh *skelqh[11]; ++ struct uhci_qh *next_qh; ++ spinlock_t lock; ++ dma_addr_t frame_dma_handle; ++ __le32 *frame; ++ void **frame_cpu; ++ enum uhci_rh_state rh_state; ++ long unsigned int auto_stop_time; ++ unsigned int frame_number; ++ unsigned int is_stopped; ++ unsigned int last_iso_frame; ++ unsigned int cur_iso_frame; ++ unsigned int scan_in_progress: 1; ++ unsigned int need_rescan: 1; ++ unsigned int dead: 1; ++ unsigned int RD_enable: 1; ++ unsigned int is_initialized: 1; ++ unsigned int fsbr_is_on: 1; ++ unsigned int fsbr_is_wanted: 1; ++ unsigned int fsbr_expiring: 1; ++ struct timer_list fsbr_timer; ++ unsigned int oc_low: 1; ++ unsigned int wait_for_hp: 1; ++ unsigned int big_endian_mmio: 1; ++ unsigned int big_endian_desc: 1; ++ unsigned int is_aspeed: 1; ++ long unsigned int port_c_suspend; ++ long unsigned int resuming_ports; ++ long unsigned int ports_timeout; ++ struct list_head idle_qh_list; ++ int rh_numports; ++ wait_queue_head_t waitqh; ++ int num_waiting; ++ int total_load; ++ short int load[32]; ++ struct clk *clk; ++ void (*reset_hc)(struct uhci_hcd *); ++ int (*check_and_reset_hc)(struct uhci_hcd *); ++ void (*configure_hc)(struct uhci_hcd *); ++ int (*resume_detect_interrupts_are_broken)(struct uhci_hcd *); ++ int (*global_suspend_mode_is_broken)(struct uhci_hcd *); ++}; ++ ++struct urb_priv___2 { ++ struct list_head node; ++ struct urb *urb; ++ struct uhci_qh *qh; ++ struct list_head td_list; ++ unsigned int fsbr: 1; ++}; ++ ++struct uhci_debug { ++ int size; ++ char *data; ++}; ++ ++struct xhci_cap_regs { ++ __le32 hc_capbase; ++ __le32 hcs_params1; ++ __le32 hcs_params2; ++ __le32 hcs_params3; ++ __le32 hcc_params; ++ __le32 db_off; ++ __le32 run_regs_off; ++ __le32 hcc_params2; ++}; ++ ++struct xhci_op_regs { ++ __le32 command; ++ __le32 status; ++ __le32 page_size; ++ __le32 reserved1; ++ __le32 reserved2; ++ __le32 dev_notification; ++ __le64 cmd_ring; ++ __le32 reserved3[4]; ++ __le64 dcbaa_ptr; ++ __le32 config_reg; ++ __le32 reserved4[241]; ++ __le32 port_status_base; ++ __le32 port_power_base; ++ __le32 port_link_base; ++ __le32 reserved5; ++ __le32 reserved6[1016]; ++}; ++ ++struct xhci_intr_reg { ++ __le32 irq_pending; ++ __le32 irq_control; ++ __le32 erst_size; ++ __le32 rsvd; ++ __le64 erst_base; ++ __le64 erst_dequeue; ++}; ++ ++struct xhci_run_regs { ++ __le32 microframe_index; ++ __le32 rsvd[7]; ++ struct xhci_intr_reg ir_set[128]; ++}; ++ ++struct xhci_doorbell_array { ++ __le32 doorbell[256]; ++}; ++ ++struct xhci_container_ctx { ++ unsigned int type; ++ int size; ++ u8 *bytes; ++ dma_addr_t dma; ++}; ++ ++struct xhci_slot_ctx { ++ __le32 dev_info; ++ __le32 dev_info2; ++ __le32 tt_info; ++ __le32 dev_state; ++ __le32 reserved[4]; ++}; ++ ++struct xhci_ep_ctx { ++ __le32 ep_info; ++ __le32 ep_info2; ++ __le64 deq; ++ __le32 tx_info; ++ __le32 reserved[3]; ++}; ++ ++struct xhci_input_control_ctx { ++ __le32 drop_flags; ++ __le32 add_flags; ++ __le32 rsvd2[6]; ++}; ++ ++union xhci_trb; ++ ++struct xhci_command { ++ struct xhci_container_ctx *in_ctx; ++ u32 status; ++ int slot_id; ++ struct completion *completion; ++ union xhci_trb *command_trb; ++ struct list_head cmd_list; ++}; ++ ++struct xhci_link_trb { ++ __le64 segment_ptr; ++ __le32 intr_target; ++ __le32 control; ++}; ++ ++struct xhci_transfer_event { ++ __le64 buffer; ++ __le32 transfer_len; ++ __le32 flags; ++}; ++ ++struct xhci_event_cmd { ++ __le64 cmd_trb; ++ __le32 status; ++ __le32 flags; ++}; ++ ++struct xhci_generic_trb { ++ __le32 field[4]; ++}; ++ ++union xhci_trb { ++ struct xhci_link_trb link; ++ struct xhci_transfer_event trans_event; ++ struct xhci_event_cmd event_cmd; ++ struct xhci_generic_trb generic; ++}; ++ ++struct xhci_stream_ctx { ++ __le64 stream_ring; ++ __le32 reserved[2]; ++}; ++ ++struct xhci_ring; ++ ++struct xhci_stream_info { ++ struct xhci_ring **stream_rings; ++ unsigned int num_streams; ++ struct xhci_stream_ctx *stream_ctx_array; ++ unsigned int num_stream_ctxs; ++ dma_addr_t ctx_array_dma; ++ struct radix_tree_root trb_address_map; ++ struct xhci_command *free_streams_command; ++}; ++ ++enum xhci_ring_type { ++ TYPE_CTRL = 0, ++ TYPE_ISOC = 1, ++ TYPE_BULK = 2, ++ TYPE_INTR = 3, ++ TYPE_STREAM = 4, ++ TYPE_COMMAND = 5, ++ TYPE_EVENT = 6, ++}; ++ ++struct xhci_segment; ++ ++struct xhci_ring { ++ struct xhci_segment *first_seg; ++ struct xhci_segment *last_seg; ++ union xhci_trb *enqueue; ++ struct xhci_segment *enq_seg; ++ union xhci_trb *dequeue; ++ struct xhci_segment *deq_seg; ++ struct list_head td_list; ++ u32 cycle_state; ++ unsigned int stream_id; ++ unsigned int num_segs; ++ unsigned int num_trbs_free; ++ unsigned int num_trbs_free_temp; ++ unsigned int bounce_buf_len; ++ enum xhci_ring_type type; ++ bool last_td_was_short; ++ struct radix_tree_root *trb_address_map; ++}; ++ ++struct xhci_bw_info { ++ unsigned int ep_interval; ++ unsigned int mult; ++ unsigned int num_packets; ++ unsigned int max_packet_size; ++ unsigned int max_esit_payload; ++ unsigned int type; ++}; ++ ++struct xhci_hcd; ++ ++struct xhci_virt_ep { ++ struct xhci_ring *ring; ++ struct xhci_stream_info *stream_info; ++ struct xhci_ring *new_ring; ++ unsigned int ep_state; ++ struct list_head cancelled_td_list; ++ struct timer_list stop_cmd_timer; ++ struct xhci_hcd *xhci; ++ struct xhci_segment *queued_deq_seg; ++ union xhci_trb *queued_deq_ptr; ++ bool skip; ++ struct xhci_bw_info bw_info; ++ struct list_head bw_endpoint_list; ++ int next_frame_id; ++ bool use_extended_tbc; ++}; ++ ++struct xhci_erst_entry; ++ ++struct xhci_erst { ++ struct xhci_erst_entry *entries; ++ unsigned int num_entries; ++ dma_addr_t erst_dma_addr; ++ unsigned int erst_size; ++}; ++ ++struct s3_save { ++ u32 command; ++ u32 dev_nt; ++ u64 dcbaa_ptr; ++ u32 config_reg; ++ u32 irq_pending; ++ u32 irq_control; ++ u32 erst_size; ++ u64 erst_base; ++ u64 erst_dequeue; ++}; ++ ++struct xhci_bus_state { ++ long unsigned int bus_suspended; ++ long unsigned int next_statechange; ++ u32 port_c_suspend; ++ u32 suspended_ports; ++ u32 port_remote_wakeup; ++ long unsigned int resume_done[31]; ++ long unsigned int resuming_ports; ++ long unsigned int rexit_ports; ++ struct completion rexit_done[31]; ++}; ++ ++struct xhci_port; ++ ++struct xhci_hub { ++ struct xhci_port **ports; ++ unsigned int num_ports; ++ struct usb_hcd *hcd; ++ u8 maj_rev; ++ u8 min_rev; ++ u32 *psi; ++ u8 psi_count; ++ u8 psi_uid_count; ++}; ++ ++struct xhci_device_context_array; ++ ++struct xhci_scratchpad; ++ ++struct xhci_virt_device; ++ ++struct xhci_root_port_bw_info; ++ ++struct xhci_hcd { ++ struct usb_hcd *main_hcd; ++ struct usb_hcd *shared_hcd; ++ struct xhci_cap_regs *cap_regs; ++ struct xhci_op_regs *op_regs; ++ struct xhci_run_regs *run_regs; ++ struct xhci_doorbell_array *dba; ++ struct xhci_intr_reg *ir_set; ++ __u32 hcs_params1; ++ __u32 hcs_params2; ++ __u32 hcs_params3; ++ __u32 hcc_params; ++ __u32 hcc_params2; ++ spinlock_t lock; ++ u8 sbrn; ++ u16 hci_version; ++ u8 max_slots; ++ u8 max_interrupters; ++ u8 max_ports; ++ u8 isoc_threshold; ++ u32 imod_interval; ++ int event_ring_max; ++ int page_size; ++ int page_shift; ++ int msix_count; ++ struct clk *clk; ++ struct clk *reg_clk; ++ struct xhci_device_context_array *dcbaa; ++ struct xhci_ring *cmd_ring; ++ unsigned int cmd_ring_state; ++ struct list_head cmd_list; ++ unsigned int cmd_ring_reserved_trbs; ++ struct delayed_work cmd_timer; ++ struct completion cmd_ring_stop_completion; ++ struct xhci_command *current_cmd; ++ struct xhci_ring *event_ring; ++ struct xhci_erst erst; ++ struct xhci_scratchpad *scratchpad; ++ struct list_head lpm_failed_devs; ++ struct mutex mutex; ++ struct xhci_command *lpm_command; ++ struct xhci_virt_device *devs[256]; ++ struct xhci_root_port_bw_info *rh_bw; ++ struct dma_pool___3 *device_pool; ++ struct dma_pool___3 *segment_pool; ++ struct dma_pool___3 *small_streams_pool; ++ struct dma_pool___3 *medium_streams_pool; ++ unsigned int xhc_state; ++ u32 command; ++ struct s3_save s3; ++ long long unsigned int quirks; ++ unsigned int num_active_eps; ++ unsigned int limit_active_eps; ++ struct xhci_bus_state bus_state[2]; ++ struct xhci_port *hw_ports; ++ struct xhci_hub usb2_rhub; ++ struct xhci_hub usb3_rhub; ++ unsigned int sw_lpm_support: 1; ++ unsigned int hw_lpm_support: 1; ++ unsigned int broken_suspend: 1; ++ u32 *ext_caps; ++ unsigned int num_ext_caps; ++ struct timer_list comp_mode_recovery_timer; ++ u32 port_status_u0; ++ u16 test_mode; ++ struct dentry *debugfs_root; ++ struct dentry *debugfs_slots; ++ struct list_head regset_list; ++ void *dbc; ++ long unsigned int priv[0]; ++}; ++ ++struct xhci_segment { ++ union xhci_trb *trbs; ++ struct xhci_segment *next; ++ dma_addr_t dma; ++ dma_addr_t bounce_dma; ++ void *bounce_buf; ++ unsigned int bounce_offs; ++ unsigned int bounce_len; ++}; ++ ++enum xhci_overhead_type { ++ LS_OVERHEAD_TYPE = 0, ++ FS_OVERHEAD_TYPE = 1, ++ HS_OVERHEAD_TYPE = 2, ++}; ++ ++struct xhci_interval_bw { ++ unsigned int num_packets; ++ struct list_head endpoints; ++ unsigned int overhead[3]; ++}; ++ ++struct xhci_interval_bw_table { ++ unsigned int interval0_esit_payload; ++ struct xhci_interval_bw interval_bw[16]; ++ unsigned int bw_used; ++ unsigned int ss_bw_in; ++ unsigned int ss_bw_out; ++}; ++ ++struct xhci_tt_bw_info; ++ ++struct xhci_virt_device { ++ struct usb_device *udev; ++ struct xhci_container_ctx *out_ctx; ++ struct xhci_container_ctx *in_ctx; ++ struct xhci_virt_ep eps[31]; ++ u8 fake_port; ++ u8 real_port; ++ struct xhci_interval_bw_table *bw_table; ++ struct xhci_tt_bw_info *tt_info; ++ long unsigned int flags; ++ u16 current_mel; ++ void *debugfs_private; ++}; ++ ++struct xhci_tt_bw_info { ++ struct list_head tt_list; ++ int slot_id; ++ int ttport; ++ struct xhci_interval_bw_table bw_table; ++ int active_eps; ++}; ++ ++struct xhci_root_port_bw_info { ++ struct list_head tts; ++ unsigned int num_active_tts; ++ struct xhci_interval_bw_table bw_table; ++}; ++ ++struct xhci_device_context_array { ++ __le64 dev_context_ptrs[256]; ++ dma_addr_t dma; ++}; ++ ++enum xhci_setup_dev { ++ SETUP_CONTEXT_ONLY = 0, ++ SETUP_CONTEXT_ADDRESS = 1, ++}; ++ ++struct xhci_td { ++ struct list_head td_list; ++ struct list_head cancelled_td_list; ++ struct urb *urb; ++ struct xhci_segment *start_seg; ++ union xhci_trb *first_trb; ++ union xhci_trb *last_trb; ++ struct xhci_segment *bounce_seg; ++ bool urb_length_set; ++}; ++ ++struct xhci_dequeue_state { ++ struct xhci_segment *new_deq_seg; ++ union xhci_trb *new_deq_ptr; ++ int new_cycle_state; ++ unsigned int stream_id; ++}; ++ ++struct xhci_erst_entry { ++ __le64 seg_addr; ++ __le32 seg_size; ++ __le32 rsvd; ++}; ++ ++struct xhci_scratchpad { ++ u64 *sp_array; ++ dma_addr_t sp_dma; ++ void **sp_buffers; ++}; ++ ++struct urb_priv___3 { ++ int num_tds; ++ int num_tds_done; ++ struct xhci_td td[0]; ++}; ++ ++struct xhci_port { ++ __le32 *addr; ++ int hw_portnum; ++ int hcd_portnum; ++ struct xhci_hub *rhub; ++}; ++ ++struct xhci_driver_overrides { ++ size_t extra_priv_size; ++ int (*reset)(struct usb_hcd *); ++ int (*start)(struct usb_hcd *); ++}; ++ ++typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); ++ ++enum xhci_ep_reset_type { ++ EP_HARD_RESET = 0, ++ EP_SOFT_RESET = 1, ++}; ++ ++struct kfifo { ++ union { ++ struct __kfifo kfifo; ++ unsigned char *type; ++ const unsigned char *const_type; ++ char (*rectype)[0]; ++ void *ptr; ++ const void *ptr_const; ++ }; ++ unsigned char buf[0]; ++}; ++ ++struct dbc_regs { ++ __le32 capability; ++ __le32 doorbell; ++ __le32 ersts; ++ __le32 __reserved_0; ++ __le64 erstba; ++ __le64 erdp; ++ __le32 control; ++ __le32 status; ++ __le32 portsc; ++ __le32 __reserved_1; ++ __le64 dccp; ++ __le32 devinfo1; ++ __le32 devinfo2; ++}; ++ ++struct dbc_str_descs { ++ char string0[64]; ++ char manufacturer[64]; ++ char product[64]; ++ char serial[64]; ++}; ++ ++enum dbc_state { ++ DS_DISABLED = 0, ++ DS_INITIALIZED = 1, ++ DS_ENABLED = 2, ++ DS_CONNECTED = 3, ++ DS_CONFIGURED = 4, ++ DS_STALLED = 5, ++}; ++ ++struct dbc_ep; ++ ++struct dbc_request { ++ void *buf; ++ unsigned int length; ++ dma_addr_t dma; ++ void (*complete)(struct xhci_hcd *, struct dbc_request *); ++ struct list_head list_pool; ++ int status; ++ unsigned int actual; ++ struct dbc_ep *dep; ++ struct list_head list_pending; ++ dma_addr_t trb_dma; ++ union xhci_trb *trb; ++ unsigned int direction: 1; ++}; ++ ++struct xhci_dbc; ++ ++struct dbc_ep { ++ struct xhci_dbc *dbc; ++ struct list_head list_pending; ++ struct xhci_ring *ring; ++ unsigned int direction: 1; ++}; ++ ++struct dbc_port { ++ struct tty_port port; ++ spinlock_t port_lock; ++ struct list_head read_pool; ++ struct list_head read_queue; ++ unsigned int n_read; ++ struct tasklet_struct push; ++ struct list_head write_pool; ++ struct kfifo write_fifo; ++ bool registered; ++ struct dbc_ep *in; ++ struct dbc_ep *out; ++}; ++ ++struct xhci_dbc { ++ spinlock_t lock; ++ struct xhci_hcd *xhci; ++ struct dbc_regs *regs; ++ struct xhci_ring *ring_evt; ++ struct xhci_ring *ring_in; ++ struct xhci_ring *ring_out; ++ struct xhci_erst erst; ++ struct xhci_container_ctx *ctx; ++ struct dbc_str_descs *string; ++ dma_addr_t string_dma; ++ size_t string_size; ++ enum dbc_state state; ++ struct delayed_work event_work; ++ unsigned int resume_required: 1; ++ struct dbc_ep eps[2]; ++ struct dbc_port port; ++}; ++ ++struct trace_event_raw_xhci_log_msg { ++ struct trace_entry ent; ++ u32 __data_loc_msg; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_ctx { ++ struct trace_entry ent; ++ int ctx_64; ++ unsigned int ctx_type; ++ dma_addr_t ctx_dma; ++ u8 *ctx_va; ++ unsigned int ctx_ep_num; ++ int slot_id; ++ u32 __data_loc_ctx_data; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_trb { ++ struct trace_entry ent; ++ u32 type; ++ u32 field0; ++ u32 field1; ++ u32 field2; ++ u32 field3; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_free_virt_dev { ++ struct trace_entry ent; ++ void *vdev; ++ long long unsigned int out_ctx; ++ long long unsigned int in_ctx; ++ u8 fake_port; ++ u8 real_port; ++ u16 current_mel; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_virt_dev { ++ struct trace_entry ent; ++ void *vdev; ++ long long unsigned int out_ctx; ++ long long unsigned int in_ctx; ++ int devnum; ++ int state; ++ int speed; ++ u8 portnum; ++ u8 level; ++ int slot_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_urb { ++ struct trace_entry ent; ++ void *urb; ++ unsigned int pipe; ++ unsigned int stream; ++ int status; ++ unsigned int flags; ++ int num_mapped_sgs; ++ int num_sgs; ++ int length; ++ int actual; ++ int epnum; ++ int dir_in; ++ int type; ++ int slot_id; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_ep_ctx { ++ struct trace_entry ent; ++ u32 info; ++ u32 info2; ++ u64 deq; ++ u32 tx_info; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_slot_ctx { ++ struct trace_entry ent; ++ u32 info; ++ u32 info2; ++ u32 tt_info; ++ u32 state; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_ring { ++ struct trace_entry ent; ++ u32 type; ++ void *ring; ++ dma_addr_t enq; ++ dma_addr_t deq; ++ dma_addr_t enq_seg; ++ dma_addr_t deq_seg; ++ unsigned int num_segs; ++ unsigned int stream_id; ++ unsigned int cycle_state; ++ unsigned int num_trbs_free; ++ unsigned int bounce_buf_len; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_log_portsc { ++ struct trace_entry ent; ++ u32 portnum; ++ u32 portsc; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_xhci_dbc_log_request { ++ struct trace_entry ent; ++ struct dbc_request *req; ++ bool dir; ++ unsigned int actual; ++ unsigned int length; ++ int status; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_xhci_log_msg { ++ u32 msg; ++}; ++ ++struct trace_event_data_offsets_xhci_log_ctx { ++ u32 ctx_data; ++}; ++ ++struct trace_event_data_offsets_xhci_log_trb {}; ++ ++struct trace_event_data_offsets_xhci_log_free_virt_dev {}; ++ ++struct trace_event_data_offsets_xhci_log_virt_dev {}; ++ ++struct trace_event_data_offsets_xhci_log_urb {}; ++ ++struct trace_event_data_offsets_xhci_log_ep_ctx {}; ++ ++struct trace_event_data_offsets_xhci_log_slot_ctx {}; ++ ++struct trace_event_data_offsets_xhci_log_ring {}; ++ ++struct trace_event_data_offsets_xhci_log_portsc {}; ++ ++struct trace_event_data_offsets_xhci_dbc_log_request {}; ++ ++struct xhci_regset { ++ char name[32]; ++ struct debugfs_regset32 regset; ++ size_t nregs; ++ struct dentry *parent; ++ struct list_head list; ++}; ++ ++struct xhci_file_map { ++ const char *name; ++ int (*show)(struct seq_file *, void *); ++}; ++ ++struct xhci_ep_priv { ++ char name[32]; ++ struct dentry *root; ++}; ++ ++struct xhci_slot_priv { ++ char name[32]; ++ struct dentry *root; ++ struct xhci_ep_priv *eps[31]; ++ struct xhci_virt_device *dev; ++}; ++ ++struct usb_otg_caps { ++ u16 otg_rev; ++ bool hnp_support; ++ bool srp_support; ++ bool adp_support; ++}; ++ ++enum usb_dr_mode { ++ USB_DR_MODE_UNKNOWN = 0, ++ USB_DR_MODE_HOST = 1, ++ USB_DR_MODE_PERIPHERAL = 2, ++ USB_DR_MODE_OTG = 3, ++}; ++ ++struct typec_device_id { ++ __u16 svid; ++ __u8 mode; ++ kernel_ulong_t driver_data; ++}; ++ ++enum typec_port_type { ++ TYPEC_PORT_SRC = 0, ++ TYPEC_PORT_SNK = 1, ++ TYPEC_PORT_DRP = 2, ++}; ++ ++enum typec_port_data { ++ TYPEC_PORT_DFP = 0, ++ TYPEC_PORT_UFP = 1, ++ TYPEC_PORT_DRD = 2, ++}; ++ ++enum typec_plug_type { ++ USB_PLUG_NONE = 0, ++ USB_PLUG_TYPE_A = 1, ++ USB_PLUG_TYPE_B = 2, ++ USB_PLUG_TYPE_C = 3, ++ USB_PLUG_CAPTIVE = 4, ++}; ++ ++enum typec_data_role { ++ TYPEC_DEVICE = 0, ++ TYPEC_HOST = 1, ++}; ++ ++enum typec_role { ++ TYPEC_SINK = 0, ++ TYPEC_SOURCE = 1, ++}; ++ ++enum typec_pwr_opmode { ++ TYPEC_PWR_MODE_USB = 0, ++ TYPEC_PWR_MODE_1_5A = 1, ++ TYPEC_PWR_MODE_3_0A = 2, ++ TYPEC_PWR_MODE_PD = 3, ++}; ++ ++enum typec_accessory { ++ TYPEC_ACCESSORY_NONE = 0, ++ TYPEC_ACCESSORY_AUDIO = 1, ++ TYPEC_ACCESSORY_DEBUG = 2, ++}; ++ ++enum typec_orientation { ++ TYPEC_ORIENTATION_NONE = 0, ++ TYPEC_ORIENTATION_NORMAL = 1, ++ TYPEC_ORIENTATION_REVERSE = 2, ++}; ++ ++struct usb_pd_identity { ++ u32 id_header; ++ u32 cert_stat; ++ u32 product; ++}; ++ ++struct typec_altmode_desc { ++ u16 svid; ++ u8 mode; ++ u32 vdo; ++ enum typec_port_data roles; ++}; ++ ++enum typec_plug_index { ++ TYPEC_PLUG_SOP_P = 0, ++ TYPEC_PLUG_SOP_PP = 1, ++}; ++ ++struct typec_plug_desc { ++ enum typec_plug_index index; ++}; ++ ++struct typec_cable_desc { ++ enum typec_plug_type type; ++ unsigned int active: 1; ++ struct usb_pd_identity *identity; ++}; ++ ++struct typec_partner_desc { ++ unsigned int usb_pd: 1; ++ enum typec_accessory accessory; ++ struct usb_pd_identity *identity; ++}; ++ ++struct typec_switch; ++ ++struct typec_mux; ++ ++struct typec_capability { ++ enum typec_port_type type; ++ enum typec_port_data data; ++ u16 revision; ++ u16 pd_revision; ++ int prefer_role; ++ enum typec_accessory accessory[3]; ++ struct typec_switch *sw; ++ struct typec_mux *mux; ++ struct fwnode_handle *fwnode; ++ int (*try_role)(const struct typec_capability *, int); ++ int (*dr_set)(const struct typec_capability *, enum typec_data_role); ++ int (*pr_set)(const struct typec_capability *, enum typec_role); ++ int (*vconn_set)(const struct typec_capability *, enum typec_role); ++ int (*port_type_set)(const struct typec_capability *, enum typec_port_type); ++}; ++ ++struct typec_switch { ++ struct device *dev; ++ struct list_head entry; ++ int (*set)(struct typec_switch *, enum typec_orientation); ++}; ++ ++struct typec_mux { ++ struct device *dev; ++ struct list_head entry; ++ int (*set)(struct typec_mux *, int); ++}; ++ ++struct typec_altmode_ops; ++ ++struct typec_altmode { ++ struct device dev; ++ u16 svid; ++ int mode; ++ u32 vdo; ++ unsigned int active: 1; ++ char *desc; ++ const struct typec_altmode_ops *ops; ++}; ++ ++struct typec_altmode_ops { ++ int (*enter)(struct typec_altmode *); ++ int (*exit)(struct typec_altmode *); ++ void (*attention)(struct typec_altmode *, u32); ++ int (*vdm)(struct typec_altmode *, const u32, const u32 *, int); ++ int (*notify)(struct typec_altmode *, long unsigned int, void *); ++ int (*activate)(struct typec_altmode *, int); ++}; ++ ++enum { ++ TYPEC_STATE_SAFE = 0, ++ TYPEC_STATE_USB = 1, ++ TYPEC_STATE_MODAL = 2, ++}; ++ ++struct altmode { ++ unsigned int id; ++ struct typec_altmode adev; ++ struct typec_mux *mux; ++ enum typec_port_data roles; ++ struct attribute *attrs[5]; ++ char group_name[8]; ++ struct attribute_group group; ++ const struct attribute_group *groups[2]; ++ struct altmode *partner; ++ struct altmode *plug[2]; ++ struct blocking_notifier_head nh; ++}; ++ ++struct typec_plug { ++ struct device dev; ++ enum typec_plug_index index; ++ struct ida mode_ids; ++}; ++ ++struct typec_cable { ++ struct device dev; ++ enum typec_plug_type type; ++ struct usb_pd_identity *identity; ++ unsigned int active: 1; ++}; ++ ++struct typec_partner { ++ struct device dev; ++ unsigned int usb_pd: 1; ++ struct usb_pd_identity *identity; ++ enum typec_accessory accessory; ++ struct ida mode_ids; ++}; ++ ++struct typec_port { ++ unsigned int id; ++ struct device dev; ++ struct ida mode_ids; ++ int prefer_role; ++ enum typec_data_role data_role; ++ enum typec_role pwr_role; ++ enum typec_role vconn_role; ++ enum typec_pwr_opmode pwr_opmode; ++ enum typec_port_type port_type; ++ struct mutex port_type_lock; ++ enum typec_orientation orientation; ++ struct typec_switch *sw; ++ struct typec_mux *mux; ++ const struct typec_capability *cap; ++}; ++ ++struct typec_altmode_driver { ++ const struct typec_device_id *id_table; ++ int (*probe)(struct typec_altmode *); ++ void (*remove)(struct typec_altmode *); ++ struct device_driver driver; ++}; ++ ++struct ucsi_cci { ++ char: 1; ++ u8 connector_change: 7; ++ u8 data_length; ++ short: 9; ++ u16 not_supported: 1; ++ u16 cancel_complete: 1; ++ u16 reset_complete: 1; ++ u16 busy: 1; ++ u16 ack_complete: 1; ++ u16 error: 1; ++ u16 cmd_complete: 1; ++}; ++ ++struct ucsi_command { ++ u8 cmd; ++ u8 length; ++ u64 data: 48; ++}; ++ ++struct ucsi_ack_cmd { ++ u8 cmd; ++ u8 length; ++ u8 cci_ack: 1; ++ u8 cmd_ack: 1; ++}; ++ ++struct ucsi_con_rst { ++ u8 cmd; ++ u8 length; ++ u8 con_num: 7; ++ u8 hard_reset: 1; ++}; ++ ++struct ucsi_uor_cmd { ++ u8 cmd; ++ u8 length; ++ u16 con_num: 7; ++ u16 role: 3; ++}; ++ ++struct ucsi_control { ++ union { ++ u64 raw_cmd; ++ struct ucsi_command cmd; ++ struct ucsi_uor_cmd uor; ++ struct ucsi_ack_cmd ack; ++ struct ucsi_con_rst con_rst; ++ }; ++}; ++ ++struct ucsi_capability { ++ u32 attributes; ++ u32 num_connectors: 8; ++ u32 features: 24; ++ u8 num_alt_modes; ++ u8 reserved; ++ u16 bc_version; ++ u16 pd_version; ++ u16 typec_version; ++}; ++ ++struct ucsi_connector_capability { ++ u8 op_mode; ++ u8 provider: 1; ++ u8 consumer: 1; ++}; ++ ++struct ucsi_connector_status { ++ u16 change; ++ u16 pwr_op_mode: 3; ++ u16 connected: 1; ++ u16 pwr_dir: 1; ++ u16 partner_flags: 8; ++ u16 partner_type: 3; ++ u32 request_data_obj; ++ u8 bc_status: 2; ++ u8 provider_cap_limit_reason: 4; ++} __attribute__((packed)); ++ ++struct ucsi_data { ++ u16 version; ++ u16 reserved; ++ union { ++ u32 raw_cci; ++ struct ucsi_cci cci; ++ }; ++ struct ucsi_control ctrl; ++ u32 message_in[4]; ++ u32 message_out[4]; ++}; ++ ++struct ucsi_ppm { ++ struct ucsi_data *data; ++ int (*cmd)(struct ucsi_ppm *, struct ucsi_control *); ++ int (*sync)(struct ucsi_ppm *); ++}; ++ ++enum ucsi_status { ++ UCSI_IDLE = 0, ++ UCSI_BUSY = 1, ++ UCSI_ERROR = 2, ++}; ++ ++struct typec_port___2; ++ ++struct typec_partner___2; ++ ++struct ucsi; ++ ++struct ucsi_connector { ++ int num; ++ struct ucsi *ucsi; ++ struct work_struct work; ++ struct completion complete; ++ struct typec_port___2 *port; ++ struct typec_partner___2 *partner; ++ struct typec_capability typec_cap; ++ struct ucsi_connector_status status; ++ struct ucsi_connector_capability cap; ++}; ++ ++struct ucsi { ++ struct device *dev; ++ struct ucsi_ppm *ppm; ++ enum ucsi_status status; ++ struct completion complete; ++ struct ucsi_capability cap; ++ struct ucsi_connector *connector; ++ struct work_struct work; ++ struct mutex ppm_lock; ++ long unsigned int flags; ++}; ++ ++struct trace_event_raw_ucsi_log_ack { ++ struct trace_entry ent; ++ u8 ack; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_ucsi_log_control { ++ struct trace_entry ent; ++ u64 ctrl; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_ucsi_log_command { ++ struct trace_entry ent; ++ u64 ctrl; ++ int ret; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_ucsi_log_cci { ++ struct trace_entry ent; ++ u32 cci; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_ucsi_log_connector_status { ++ struct trace_entry ent; ++ int port; ++ u16 change; ++ u8 opmode; ++ u8 connected; ++ u8 pwr_dir; ++ u8 partner_flags; ++ u8 partner_type; ++ u32 request_data_obj; ++ u8 bc_status; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_ucsi_log_ack {}; ++ ++struct trace_event_data_offsets_ucsi_log_control {}; ++ ++struct trace_event_data_offsets_ucsi_log_command {}; ++ ++struct trace_event_data_offsets_ucsi_log_cci {}; ++ ++struct trace_event_data_offsets_ucsi_log_connector_status {}; ++ ++struct ucsi___2; ++ ++struct ucsi_acpi { ++ struct device *dev; ++ struct ucsi___2 *ucsi; ++ struct ucsi_ppm ppm; ++ guid_t guid; ++}; ++ ++struct serio_device_id { ++ __u8 type; ++ __u8 extra; ++ __u8 id; ++ __u8 proto; ++}; ++ ++struct serio_driver; ++ ++struct serio { ++ void *port_data; ++ char name[32]; ++ char phys[32]; ++ char firmware_id[128]; ++ bool manual_bind; ++ struct serio_device_id id; ++ spinlock_t lock; ++ int (*write)(struct serio *, unsigned char); ++ int (*open)(struct serio *); ++ void (*close)(struct serio *); ++ int (*start)(struct serio *); ++ void (*stop)(struct serio *); ++ struct serio *parent; ++ struct list_head child_node; ++ struct list_head children; ++ unsigned int depth; ++ struct serio_driver *drv; ++ struct mutex drv_mutex; ++ struct device dev; ++ struct list_head node; ++ struct mutex *ps2_cmd_mutex; ++}; ++ ++struct serio_driver { ++ const char *description; ++ const struct serio_device_id *id_table; ++ bool manual_bind; ++ void (*write_wakeup)(struct serio *); ++ irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); ++ int (*connect)(struct serio *, struct serio_driver *); ++ int (*reconnect)(struct serio *); ++ int (*fast_reconnect)(struct serio *); ++ void (*disconnect)(struct serio *); ++ void (*cleanup)(struct serio *); ++ struct device_driver driver; ++}; ++ ++enum serio_event_type { ++ SERIO_RESCAN_PORT = 0, ++ SERIO_RECONNECT_PORT = 1, ++ SERIO_RECONNECT_SUBTREE = 2, ++ SERIO_REGISTER_PORT = 3, ++ SERIO_ATTACH_DRIVER = 4, ++}; ++ ++struct serio_event { ++ enum serio_event_type type; ++ void *object; ++ struct module *owner; ++ struct list_head node; ++}; ++ ++struct serport { ++ struct tty_struct *tty; ++ wait_queue_head_t wait; ++ struct serio *serio; ++ struct serio_device_id id; ++ spinlock_t lock; ++ long unsigned int flags; ++}; ++ ++struct amba_kmi_port { ++ struct serio *io; ++ struct clk *clk; ++ void *base; ++ unsigned int irq; ++ unsigned int divisor; ++ unsigned int open; ++}; ++ ++struct ps2dev { ++ struct serio *serio; ++ struct mutex cmd_mutex; ++ wait_queue_head_t wait; ++ long unsigned int flags; ++ u8 cmdbuf[8]; ++ u8 cmdcnt; ++ u8 nak; ++}; ++ ++struct input_mt_slot { ++ int abs[14]; ++ unsigned int frame; ++ unsigned int key; ++}; ++ ++struct input_mt { ++ int trkid; ++ int num_slots; ++ int slot; ++ unsigned int flags; ++ unsigned int frame; ++ int *red; ++ struct input_mt_slot slots[0]; ++}; ++ ++union input_seq_state { ++ struct { ++ short unsigned int pos; ++ bool mutex_acquired; ++ }; ++ void *p; ++}; ++ ++struct input_devres { ++ struct input_dev *input; ++}; ++ ++struct input_event { ++ __kernel_ulong_t __sec; ++ __kernel_ulong_t __usec; ++ __u16 type; ++ __u16 code; ++ __s32 value; ++}; ++ ++struct input_event_compat { ++ compat_ulong_t sec; ++ compat_ulong_t usec; ++ __u16 type; ++ __u16 code; ++ __s32 value; ++}; ++ ++struct ff_periodic_effect_compat { ++ __u16 waveform; ++ __u16 period; ++ __s16 magnitude; ++ __s16 offset; ++ __u16 phase; ++ struct ff_envelope envelope; ++ __u32 custom_len; ++ compat_uptr_t custom_data; ++}; ++ ++struct ff_effect_compat { ++ __u16 type; ++ __s16 id; ++ __u16 direction; ++ struct ff_trigger trigger; ++ struct ff_replay replay; ++ union { ++ struct ff_constant_effect constant; ++ struct ff_ramp_effect ramp; ++ struct ff_periodic_effect_compat periodic; ++ struct ff_condition_effect condition[2]; ++ struct ff_rumble_effect rumble; ++ } u; ++}; ++ ++struct input_mt_pos { ++ s16 x; ++ s16 y; ++}; ++ ++struct input_led { ++ struct led_classdev cdev; ++ struct input_handle *handle; ++ unsigned int code; ++}; ++ ++struct input_leds { ++ struct input_handle handle; ++ unsigned int num_leds; ++ struct input_led leds[0]; ++}; ++ ++struct mousedev_hw_data { ++ int dx; ++ int dy; ++ int dz; ++ int x; ++ int y; ++ int abs_event; ++ long unsigned int buttons; ++}; ++ ++struct mousedev { ++ int open; ++ struct input_handle handle; ++ wait_queue_head_t wait; ++ struct list_head client_list; ++ spinlock_t client_lock; ++ struct mutex mutex; ++ struct device dev; ++ struct cdev cdev; ++ bool exist; ++ struct list_head mixdev_node; ++ bool opened_by_mixdev; ++ struct mousedev_hw_data packet; ++ unsigned int pkt_count; ++ int old_x[4]; ++ int old_y[4]; ++ int frac_dx; ++ int frac_dy; ++ long unsigned int touch; ++ int (*open_device)(struct mousedev *); ++ void (*close_device)(struct mousedev *); ++}; ++ ++enum mousedev_emul { ++ MOUSEDEV_EMUL_PS2 = 0, ++ MOUSEDEV_EMUL_IMPS = 1, ++ MOUSEDEV_EMUL_EXPS = 2, ++}; ++ ++struct mousedev_motion { ++ int dx; ++ int dy; ++ int dz; ++ long unsigned int buttons; ++}; ++ ++struct mousedev_client { ++ struct fasync_struct *fasync; ++ struct mousedev *mousedev; ++ struct list_head node; ++ struct mousedev_motion packets[16]; ++ unsigned int head; ++ unsigned int tail; ++ spinlock_t packet_lock; ++ int pos_x; ++ int pos_y; ++ u8 ps2[6]; ++ unsigned char ready; ++ unsigned char buffer; ++ unsigned char bufsiz; ++ unsigned char imexseq; ++ unsigned char impsseq; ++ enum mousedev_emul mode; ++ long unsigned int last_buttons; ++}; ++ ++enum { ++ FRACTION_DENOM = 128, ++}; ++ ++struct input_mask { ++ __u32 type; ++ __u32 codes_size; ++ __u64 codes_ptr; ++}; ++ ++enum evdev_clock_type { ++ EV_CLK_REAL = 0, ++ EV_CLK_MONO = 1, ++ EV_CLK_BOOT = 2, ++ EV_CLK_MAX = 3, ++}; ++ ++struct evdev_client; ++ ++struct evdev { ++ int open; ++ struct input_handle handle; ++ wait_queue_head_t wait; ++ struct evdev_client *grab; ++ struct list_head client_list; ++ spinlock_t client_lock; ++ struct mutex mutex; ++ struct device dev; ++ struct cdev cdev; ++ bool exist; ++}; ++ ++struct evdev_client { ++ unsigned int head; ++ unsigned int tail; ++ unsigned int packet_head; ++ spinlock_t buffer_lock; ++ struct fasync_struct *fasync; ++ struct evdev *evdev; ++ struct list_head node; ++ unsigned int clk_type; ++ bool revoked; ++ long unsigned int *evmasks[32]; ++ unsigned int bufsize; ++ struct input_event buffer[0]; ++}; ++ ++struct atkbd { ++ struct ps2dev ps2dev; ++ struct input_dev *dev; ++ char name[64]; ++ char phys[32]; ++ short unsigned int id; ++ short unsigned int keycode[512]; ++ long unsigned int force_release_mask[8]; ++ unsigned char set; ++ bool translated; ++ bool extra; ++ bool write; ++ bool softrepeat; ++ bool softraw; ++ bool scroll; ++ bool enabled; ++ unsigned char emul; ++ bool resend; ++ bool release; ++ long unsigned int xl_bit; ++ unsigned int last; ++ long unsigned int time; ++ long unsigned int err_count; ++ struct delayed_work event_work; ++ long unsigned int event_jiffies; ++ long unsigned int event_mask; ++ struct mutex mutex; ++}; ++ ++enum psmouse_state { ++ PSMOUSE_IGNORE = 0, ++ PSMOUSE_INITIALIZING = 1, ++ PSMOUSE_RESYNCING = 2, ++ PSMOUSE_CMD_MODE = 3, ++ PSMOUSE_ACTIVATED = 4, ++}; ++ ++typedef enum { ++ PSMOUSE_BAD_DATA = 0, ++ PSMOUSE_GOOD_DATA = 1, ++ PSMOUSE_FULL_PACKET = 2, ++} psmouse_ret_t; ++ ++enum psmouse_scale { ++ PSMOUSE_SCALE11 = 0, ++ PSMOUSE_SCALE21 = 1, ++}; ++ ++enum psmouse_type { ++ PSMOUSE_NONE = 0, ++ PSMOUSE_PS2 = 1, ++ PSMOUSE_PS2PP = 2, ++ PSMOUSE_THINKPS = 3, ++ PSMOUSE_GENPS = 4, ++ PSMOUSE_IMPS = 5, ++ PSMOUSE_IMEX = 6, ++ PSMOUSE_SYNAPTICS = 7, ++ PSMOUSE_ALPS = 8, ++ PSMOUSE_LIFEBOOK = 9, ++ PSMOUSE_TRACKPOINT = 10, ++ PSMOUSE_TOUCHKIT_PS2 = 11, ++ PSMOUSE_CORTRON = 12, ++ PSMOUSE_HGPK = 13, ++ PSMOUSE_ELANTECH = 14, ++ PSMOUSE_FSP = 15, ++ PSMOUSE_SYNAPTICS_RELATIVE = 16, ++ PSMOUSE_CYPRESS = 17, ++ PSMOUSE_FOCALTECH = 18, ++ PSMOUSE_VMMOUSE = 19, ++ PSMOUSE_BYD = 20, ++ PSMOUSE_SYNAPTICS_SMBUS = 21, ++ PSMOUSE_ELANTECH_SMBUS = 22, ++ PSMOUSE_AUTO = 23, ++}; ++ ++struct psmouse; ++ ++struct psmouse_protocol { ++ enum psmouse_type type; ++ bool maxproto; ++ bool ignore_parity; ++ bool try_passthru; ++ bool smbus_companion; ++ const char *name; ++ const char *alias; ++ int (*detect)(struct psmouse *, bool); ++ int (*init)(struct psmouse *); ++}; ++ ++struct psmouse { ++ void *private; ++ struct input_dev *dev; ++ struct ps2dev ps2dev; ++ struct delayed_work resync_work; ++ const char *vendor; ++ const char *name; ++ const struct psmouse_protocol *protocol; ++ unsigned char packet[8]; ++ unsigned char badbyte; ++ unsigned char pktcnt; ++ unsigned char pktsize; ++ unsigned char oob_data_type; ++ unsigned char extra_buttons; ++ bool acks_disable_command; ++ unsigned int model; ++ long unsigned int last; ++ long unsigned int out_of_sync_cnt; ++ long unsigned int num_resyncs; ++ enum psmouse_state state; ++ char devname[64]; ++ char phys[32]; ++ unsigned int rate; ++ unsigned int resolution; ++ unsigned int resetafter; ++ unsigned int resync_time; ++ bool smartscroll; ++ psmouse_ret_t (*protocol_handler)(struct psmouse *); ++ void (*set_rate)(struct psmouse *, unsigned int); ++ void (*set_resolution)(struct psmouse *, unsigned int); ++ void (*set_scale)(struct psmouse *, enum psmouse_scale); ++ int (*reconnect)(struct psmouse *); ++ int (*fast_reconnect)(struct psmouse *); ++ void (*disconnect)(struct psmouse *); ++ void (*cleanup)(struct psmouse *); ++ int (*poll)(struct psmouse *); ++ void (*pt_activate)(struct psmouse *); ++ void (*pt_deactivate)(struct psmouse *); ++}; ++ ++struct psmouse_attribute { ++ struct device_attribute dattr; ++ void *data; ++ ssize_t (*show)(struct psmouse *, void *, char *); ++ ssize_t (*set)(struct psmouse *, void *, const char *, size_t); ++ bool protect; ++}; ++ ++struct rmi_2d_axis_alignment { ++ bool swap_axes; ++ bool flip_x; ++ bool flip_y; ++ u16 clip_x_low; ++ u16 clip_y_low; ++ u16 clip_x_high; ++ u16 clip_y_high; ++ u16 offset_x; ++ u16 offset_y; ++ u8 delta_x_threshold; ++ u8 delta_y_threshold; ++}; ++ ++enum rmi_sensor_type { ++ rmi_sensor_default = 0, ++ rmi_sensor_touchscreen = 1, ++ rmi_sensor_touchpad = 2, ++}; ++ ++struct rmi_2d_sensor_platform_data { ++ struct rmi_2d_axis_alignment axis_align; ++ enum rmi_sensor_type sensor_type; ++ int x_mm; ++ int y_mm; ++ int disable_report_mask; ++ u16 rezero_wait; ++ bool topbuttonpad; ++ bool kernel_tracking; ++ int dmax; ++ int dribble; ++ int palm_detect; ++}; ++ ++struct rmi_f30_data { ++ bool buttonpad; ++ bool trackstick_buttons; ++ bool disable; ++}; ++ ++enum rmi_reg_state { ++ RMI_REG_STATE_DEFAULT = 0, ++ RMI_REG_STATE_OFF = 1, ++ RMI_REG_STATE_ON = 2, ++}; ++ ++struct rmi_f01_power_management { ++ enum rmi_reg_state nosleep; ++ u8 wakeup_threshold; ++ u8 doze_holdoff; ++ u8 doze_interval; ++}; ++ ++struct rmi_device_platform_data_spi { ++ u32 block_delay_us; ++ u32 split_read_block_delay_us; ++ u32 read_delay_us; ++ u32 write_delay_us; ++ u32 split_read_byte_delay_us; ++ u32 pre_delay_us; ++ u32 post_delay_us; ++ u8 bits_per_word; ++ u16 mode; ++ void *cs_assert_data; ++ int (*cs_assert)(const void *, const bool); ++}; ++ ++struct rmi_device_platform_data { ++ int reset_delay_ms; ++ int irq; ++ struct rmi_device_platform_data_spi spi_data; ++ struct rmi_2d_sensor_platform_data sensor_pdata; ++ struct rmi_f01_power_management power_management; ++ struct rmi_f30_data f30_data; ++}; ++ ++enum synaptics_pkt_type { ++ SYN_NEWABS = 0, ++ SYN_NEWABS_STRICT = 1, ++ SYN_NEWABS_RELAXED = 2, ++ SYN_OLDABS = 3, ++}; ++ ++struct synaptics_hw_state { ++ int x; ++ int y; ++ int z; ++ int w; ++ unsigned int left: 1; ++ unsigned int right: 1; ++ unsigned int middle: 1; ++ unsigned int up: 1; ++ unsigned int down: 1; ++ u8 ext_buttons; ++ s8 scroll; ++}; ++ ++struct synaptics_device_info { ++ u32 model_id; ++ u32 firmware_id; ++ u32 board_id; ++ u32 capabilities; ++ u32 ext_cap; ++ u32 ext_cap_0c; ++ u32 ext_cap_10; ++ u32 identity; ++ u32 x_res; ++ u32 y_res; ++ u32 x_max; ++ u32 y_max; ++ u32 x_min; ++ u32 y_min; ++}; ++ ++struct synaptics_data { ++ struct synaptics_device_info info; ++ enum synaptics_pkt_type pkt_type; ++ u8 mode; ++ int scroll; ++ bool absolute_mode; ++ bool disable_gesture; ++ struct serio *pt_port; ++ struct synaptics_hw_state agm; ++ unsigned int agm_count; ++ long unsigned int press_start; ++ bool press; ++ bool report_press; ++ bool is_forcepad; ++}; ++ ++struct min_max_quirk { ++ const char * const *pnp_ids; ++ struct { ++ u32 min; ++ u32 max; ++ } board_id; ++ u32 x_min; ++ u32 x_max; ++ u32 y_min; ++ u32 y_max; ++}; ++ ++enum { ++ SYNAPTICS_INTERTOUCH_NOT_SET = 4294967295, ++ SYNAPTICS_INTERTOUCH_OFF = 0, ++ SYNAPTICS_INTERTOUCH_ON = 1, ++}; ++ ++struct focaltech_finger_state { ++ bool active; ++ bool valid; ++ unsigned int x; ++ unsigned int y; ++}; ++ ++struct focaltech_hw_state { ++ struct focaltech_finger_state fingers[5]; ++ unsigned int width; ++ bool pressed; ++}; ++ ++struct focaltech_data { ++ unsigned int x_max; ++ unsigned int y_max; ++ struct focaltech_hw_state state; ++}; ++ ++enum SS4_PACKET_ID { ++ SS4_PACKET_ID_IDLE = 0, ++ SS4_PACKET_ID_ONE = 1, ++ SS4_PACKET_ID_TWO = 2, ++ SS4_PACKET_ID_MULTI = 3, ++ SS4_PACKET_ID_STICK = 4, ++}; ++ ++enum V7_PACKET_ID { ++ V7_PACKET_ID_IDLE = 0, ++ V7_PACKET_ID_TWO = 1, ++ V7_PACKET_ID_MULTI = 2, ++ V7_PACKET_ID_NEW = 3, ++ V7_PACKET_ID_UNKNOWN = 4, ++}; ++ ++struct alps_protocol_info { ++ u16 version; ++ u8 byte0; ++ u8 mask0; ++ unsigned int flags; ++}; ++ ++struct alps_model_info { ++ u8 signature[3]; ++ struct alps_protocol_info protocol_info; ++}; ++ ++struct alps_nibble_commands { ++ int command; ++ unsigned char data; ++}; ++ ++struct alps_bitmap_point { ++ int start_bit; ++ int num_bits; ++}; ++ ++struct alps_fields { ++ unsigned int x_map; ++ unsigned int y_map; ++ unsigned int fingers; ++ int pressure; ++ struct input_mt_pos st; ++ struct input_mt_pos mt[4]; ++ unsigned int first_mp: 1; ++ unsigned int is_mp: 1; ++ unsigned int left: 1; ++ unsigned int right: 1; ++ unsigned int middle: 1; ++ unsigned int ts_left: 1; ++ unsigned int ts_right: 1; ++ unsigned int ts_middle: 1; ++}; ++ ++struct alps_data { ++ struct psmouse *psmouse; ++ struct input_dev *dev2; ++ struct input_dev *dev3; ++ char phys2[32]; ++ char phys3[32]; ++ struct delayed_work dev3_register_work; ++ const struct alps_nibble_commands *nibble_commands; ++ int addr_command; ++ u16 proto_version; ++ u8 byte0; ++ u8 mask0; ++ u8 dev_id[3]; ++ u8 fw_ver[3]; ++ int flags; ++ int x_max; ++ int y_max; ++ int x_bits; ++ int y_bits; ++ unsigned int x_res; ++ unsigned int y_res; ++ int (*hw_init)(struct psmouse *); ++ void (*process_packet)(struct psmouse *); ++ int (*decode_fields)(struct alps_fields *, unsigned char *, struct psmouse *); ++ void (*set_abs_params)(struct alps_data *, struct input_dev *); ++ int prev_fin; ++ int multi_packet; ++ int second_touch; ++ unsigned char multi_data[6]; ++ struct alps_fields f; ++ u8 quirks; ++ struct timer_list timer; ++}; ++ ++struct byd_data { ++ struct timer_list timer; ++ struct psmouse *psmouse; ++ s32 abs_x; ++ s32 abs_y; ++ volatile long unsigned int last_touch_time; ++ bool btn_left; ++ bool btn_right; ++ bool touch; ++}; ++ ++struct finger_pos { ++ unsigned int x; ++ unsigned int y; ++}; ++ ++struct elantech_device_info { ++ unsigned char capabilities[3]; ++ unsigned char samples[3]; ++ unsigned char debug; ++ unsigned char hw_version; ++ unsigned int fw_version; ++ unsigned int x_res; ++ unsigned int y_res; ++ unsigned int bus; ++ bool paritycheck; ++ bool jumpy_cursor; ++ bool reports_pressure; ++ bool crc_enabled; ++ bool set_hw_resolution; ++ bool has_trackpoint; ++ int (*send_cmd)(struct psmouse *, unsigned char, unsigned char *); ++}; ++ ++struct elantech_data { ++ struct input_dev *tp_dev; ++ char tp_phys[32]; ++ unsigned char reg_07; ++ unsigned char reg_10; ++ unsigned char reg_11; ++ unsigned char reg_20; ++ unsigned char reg_21; ++ unsigned char reg_22; ++ unsigned char reg_23; ++ unsigned char reg_24; ++ unsigned char reg_25; ++ unsigned char reg_26; ++ unsigned int single_finger_reports; ++ unsigned int y_max; ++ unsigned int width; ++ struct finger_pos mt[5]; ++ unsigned char parity[256]; ++ struct elantech_device_info info; ++ void (*original_set_rate)(struct psmouse *, unsigned int); ++}; ++ ++enum tp_mode { ++ IAP_MODE = 1, ++ MAIN_MODE = 2, ++}; ++ ++struct elan_transport_ops { ++ int (*initialize)(struct i2c_client *); ++ int (*sleep_control)(struct i2c_client *, bool); ++ int (*power_control)(struct i2c_client *, bool); ++ int (*set_mode)(struct i2c_client *, u8); ++ int (*calibrate)(struct i2c_client *); ++ int (*calibrate_result)(struct i2c_client *, u8 *); ++ int (*get_baseline_data)(struct i2c_client *, bool, u8 *); ++ int (*get_version)(struct i2c_client *, bool, u8 *); ++ int (*get_sm_version)(struct i2c_client *, u16 *, u8 *, u8 *); ++ int (*get_checksum)(struct i2c_client *, bool, u16 *); ++ int (*get_product_id)(struct i2c_client *, u16 *); ++ int (*get_max)(struct i2c_client *, unsigned int *, unsigned int *); ++ int (*get_resolution)(struct i2c_client *, u8 *, u8 *); ++ int (*get_num_traces)(struct i2c_client *, unsigned int *, unsigned int *); ++ int (*iap_get_mode)(struct i2c_client *, enum tp_mode *); ++ int (*iap_reset)(struct i2c_client *); ++ int (*prepare_fw_update)(struct i2c_client *); ++ int (*write_fw_block)(struct i2c_client *, const u8 *, u16, int); ++ int (*finish_fw_update)(struct i2c_client *, struct completion *); ++ int (*get_report)(struct i2c_client *, u8 *); ++ int (*get_pressure_adjustment)(struct i2c_client *, int *); ++ int (*get_pattern)(struct i2c_client *, u8 *); ++}; ++ ++struct elantech_attr_data { ++ size_t field_offset; ++ unsigned char reg; ++}; ++ ++enum { ++ ELANTECH_SMBUS_NOT_SET = 4294967295, ++ ELANTECH_SMBUS_OFF = 0, ++ ELANTECH_SMBUS_ON = 1, ++}; ++ ++struct ps2pp_info { ++ u8 model; ++ u8 kind; ++ u16 features; ++}; ++ ++struct fsp_data { ++ unsigned char ver; ++ unsigned char rev; ++ unsigned int buttons; ++ unsigned int flags; ++ bool vscroll; ++ bool hscroll; ++ unsigned char last_reg; ++ unsigned char last_val; ++ unsigned int last_mt_fgr; ++}; ++ ++struct trackpoint_data { ++ u8 variant_id; ++ u8 firmware_id; ++ u8 sensitivity; ++ u8 speed; ++ u8 inertia; ++ u8 reach; ++ u8 draghys; ++ u8 mindrag; ++ u8 thresh; ++ u8 upthresh; ++ u8 ztime; ++ u8 jenks; ++ u8 drift_time; ++ bool press_to_select; ++ bool skipback; ++ bool ext_dev; ++}; ++ ++struct trackpoint_attr_data { ++ size_t field_offset; ++ u8 command; ++ u8 mask; ++ bool inverted; ++ u8 power_on_default; ++}; ++ ++struct cytp_contact { ++ int x; ++ int y; ++ int z; ++}; ++ ++struct cytp_report_data { ++ int contact_cnt; ++ struct cytp_contact contacts[2]; ++ unsigned int left: 1; ++ unsigned int right: 1; ++ unsigned int middle: 1; ++ unsigned int tap: 1; ++}; ++ ++struct cytp_data { ++ int fw_version; ++ int pkt_size; ++ int mode; ++ int tp_min_pressure; ++ int tp_max_pressure; ++ int tp_width; ++ int tp_high; ++ int tp_max_abs_x; ++ int tp_max_abs_y; ++ int tp_res_x; ++ int tp_res_y; ++ int tp_metrics_supported; ++}; ++ ++struct psmouse_smbus_dev { ++ struct i2c_board_info board; ++ struct psmouse *psmouse; ++ struct i2c_client *client; ++ struct list_head node; ++ bool dead; ++ bool need_deactivate; ++}; ++ ++struct psmouse_smbus_removal_work { ++ struct work_struct work; ++ struct i2c_client *client; ++}; ++ ++struct rtc; ++ ++struct trace_event_raw_rtc_time_alarm_class { ++ struct trace_entry ent; ++ time64_t secs; ++ int err; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_rtc_irq_set_freq { ++ struct trace_entry ent; ++ int freq; ++ int err; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_rtc_irq_set_state { ++ struct trace_entry ent; ++ int enabled; ++ int err; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_rtc_alarm_irq_enable { ++ struct trace_entry ent; ++ unsigned int enabled; ++ int err; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_rtc_offset_class { ++ struct trace_entry ent; ++ long int offset; ++ int err; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_rtc_timer_class { ++ struct trace_entry ent; ++ struct rtc_timer *timer; ++ ktime_t expires; ++ ktime_t period; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_rtc_time_alarm_class {}; ++ ++struct trace_event_data_offsets_rtc_irq_set_freq {}; ++ ++struct trace_event_data_offsets_rtc_irq_set_state {}; ++ ++struct trace_event_data_offsets_rtc_alarm_irq_enable {}; ++ ++struct trace_event_data_offsets_rtc_offset_class {}; ++ ++struct trace_event_data_offsets_rtc_timer_class {}; ++ ++enum { ++ none = 0, ++ day = 1, ++ month = 2, ++ year = 3, ++}; ++ ++struct nvmem_cell_info { ++ const char *name; ++ unsigned int offset; ++ unsigned int bytes; ++ unsigned int bit_offset; ++ unsigned int nbits; ++}; ++ ++typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); ++ ++typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); ++ ++struct nvmem_config { ++ struct device *dev; ++ const char *name; ++ int id; ++ struct module *owner; ++ const struct nvmem_cell_info *cells; ++ int ncells; ++ bool read_only; ++ bool root_only; ++ nvmem_reg_read_t reg_read; ++ nvmem_reg_write_t reg_write; ++ int size; ++ int word_size; ++ int stride; ++ void *priv; ++ bool compat; ++ struct device *base_dev; ++}; ++ ++struct pl031_vendor_data { ++ struct rtc_class_ops ops; ++ bool clockwatch; ++ bool st_weekday; ++ long unsigned int irqflags; ++}; ++ ++struct pl031_local { ++ struct pl031_vendor_data *vendor; ++ struct rtc_device *rtc; ++ void *base; ++}; ++ ++struct i2c_devinfo { ++ struct list_head list; ++ int busnum; ++ struct i2c_board_info board_info; ++}; ++ ++struct i2c_device_identity { ++ u16 manufacturer_id; ++ u16 part_id; ++ u8 die_revision; ++}; ++ ++struct i2c_timings { ++ u32 bus_freq_hz; ++ u32 scl_rise_ns; ++ u32 scl_fall_ns; ++ u32 scl_int_delay_ns; ++ u32 sda_fall_ns; ++ u32 sda_hold_ns; ++}; ++ ++struct trace_event_raw_i2c_write { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 msg_nr; ++ __u16 addr; ++ __u16 flags; ++ __u16 len; ++ u32 __data_loc_buf; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_i2c_read { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 msg_nr; ++ __u16 addr; ++ __u16 flags; ++ __u16 len; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_i2c_reply { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 msg_nr; ++ __u16 addr; ++ __u16 flags; ++ __u16 len; ++ u32 __data_loc_buf; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_i2c_result { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 nr_msgs; ++ __s16 ret; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_i2c_write { ++ u32 buf; ++}; ++ ++struct trace_event_data_offsets_i2c_read {}; ++ ++struct trace_event_data_offsets_i2c_reply { ++ u32 buf; ++}; ++ ++struct trace_event_data_offsets_i2c_result {}; ++ ++struct class_compat___2; ++ ++struct i2c_cmd_arg { ++ unsigned int cmd; ++ void *arg; ++}; ++ ++struct i2c_smbus_alert_setup { ++ int irq; ++}; ++ ++struct trace_event_raw_smbus_write { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 addr; ++ __u16 flags; ++ __u8 command; ++ __u8 len; ++ __u32 protocol; ++ __u8 buf[34]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_smbus_read { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 flags; ++ __u16 addr; ++ __u8 command; ++ __u32 protocol; ++ __u8 buf[34]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_smbus_reply { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 addr; ++ __u16 flags; ++ __u8 command; ++ __u8 len; ++ __u32 protocol; ++ __u8 buf[34]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_smbus_result { ++ struct trace_entry ent; ++ int adapter_nr; ++ __u16 addr; ++ __u16 flags; ++ __u8 read_write; ++ __u8 command; ++ __s16 res; ++ __u32 protocol; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_smbus_write {}; ++ ++struct trace_event_data_offsets_smbus_read {}; ++ ++struct trace_event_data_offsets_smbus_reply {}; ++ ++struct trace_event_data_offsets_smbus_result {}; ++ ++struct i2c_acpi_handler_data { ++ struct acpi_connection_info info; ++ struct i2c_adapter *adapter; ++}; ++ ++struct gsb_buffer { ++ u8 status; ++ u8 len; ++ union { ++ u16 wdata; ++ u8 bdata; ++ u8 data[0]; ++ }; ++}; ++ ++struct i2c_acpi_lookup { ++ struct i2c_board_info *info; ++ acpi_handle adapter_handle; ++ acpi_handle device_handle; ++ acpi_handle search_handle; ++ int n; ++ int index; ++ u32 speed; ++ u32 min_speed; ++ u32 force_speed; ++}; ++ ++struct i2c_algo_bit_data { ++ void *data; ++ void (*setsda)(void *, int); ++ void (*setscl)(void *, int); ++ int (*getsda)(void *); ++ int (*getscl)(void *); ++ int (*pre_xfer)(struct i2c_adapter *); ++ void (*post_xfer)(struct i2c_adapter *); ++ int udelay; ++ int timeout; ++}; ++ ++struct pps_ktime { ++ __s64 sec; ++ __s32 nsec; ++ __u32 flags; ++}; ++ ++struct pps_ktime_compat { ++ __s64 sec; ++ __s32 nsec; ++ __u32 flags; ++}; ++ ++struct pps_kinfo { ++ __u32 assert_sequence; ++ __u32 clear_sequence; ++ struct pps_ktime assert_tu; ++ struct pps_ktime clear_tu; ++ int current_mode; ++}; ++ ++struct pps_kinfo_compat { ++ __u32 assert_sequence; ++ __u32 clear_sequence; ++ struct pps_ktime_compat assert_tu; ++ struct pps_ktime_compat clear_tu; ++ int current_mode; ++} __attribute__((packed)); ++ ++struct pps_kparams { ++ int api_version; ++ int mode; ++ struct pps_ktime assert_off_tu; ++ struct pps_ktime clear_off_tu; ++}; ++ ++struct pps_fdata { ++ struct pps_kinfo info; ++ struct pps_ktime timeout; ++}; ++ ++struct pps_fdata_compat { ++ struct pps_kinfo_compat info; ++ struct pps_ktime_compat timeout; ++} __attribute__((packed)); ++ ++struct pps_bind_args { ++ int tsformat; ++ int edge; ++ int consumer; ++}; ++ ++struct pps_device; ++ ++struct pps_source_info { ++ char name[32]; ++ char path[32]; ++ int mode; ++ void (*echo)(struct pps_device *, int, void *); ++ struct module *owner; ++ struct device *dev; ++}; ++ ++struct pps_device { ++ struct pps_source_info info; ++ struct pps_kparams params; ++ __u32 assert_sequence; ++ __u32 clear_sequence; ++ struct pps_ktime assert_tu; ++ struct pps_ktime clear_tu; ++ int current_mode; ++ unsigned int last_ev; ++ wait_queue_head_t queue; ++ unsigned int id; ++ const void *lookup_cookie; ++ struct cdev cdev; ++ struct device *dev; ++ struct fasync_struct *async_queue; ++ spinlock_t lock; ++}; ++ ++struct pps_event_time { ++ struct timespec64 ts_real; ++}; ++ ++struct ptp_extts_event { ++ struct ptp_clock_time t; ++ unsigned int index; ++ unsigned int flags; ++ unsigned int rsv[2]; ++}; ++ ++enum ptp_clock_events { ++ PTP_CLOCK_ALARM = 0, ++ PTP_CLOCK_EXTTS = 1, ++ PTP_CLOCK_PPS = 2, ++ PTP_CLOCK_PPSUSR = 3, ++}; ++ ++struct ptp_clock_event { ++ int type; ++ int index; ++ union { ++ u64 timestamp; ++ struct pps_event_time pps_times; ++ }; ++}; ++ ++struct timestamp_event_queue { ++ struct ptp_extts_event buf[128]; ++ int head; ++ int tail; ++ spinlock_t lock; ++}; ++ ++struct ptp_clock___2 { ++ struct posix_clock clock; ++ struct device dev; ++ struct ptp_clock_info *info; ++ dev_t devid; ++ int index; ++ struct pps_device *pps_source; ++ long int dialed_frequency; ++ struct timestamp_event_queue tsevq; ++ struct mutex tsevq_mux; ++ struct mutex pincfg_mux; ++ wait_queue_head_t tsev_wq; ++ int defunct; ++ struct device_attribute *pin_dev_attr; ++ struct attribute **pin_attr; ++ struct attribute_group pin_attr_group; ++ const struct attribute_group *pin_attr_groups[2]; ++ struct kthread_worker *kworker; ++ struct kthread_delayed_work aux_work; ++}; ++ ++struct ptp_clock_caps { ++ int max_adj; ++ int n_alarm; ++ int n_ext_ts; ++ int n_per_out; ++ int pps; ++ int n_pins; ++ int cross_timestamping; ++ int rsv[13]; ++}; ++ ++struct ptp_sys_offset { ++ unsigned int n_samples; ++ unsigned int rsv[3]; ++ struct ptp_clock_time ts[51]; ++}; ++ ++struct ptp_sys_offset_precise { ++ struct ptp_clock_time device; ++ struct ptp_clock_time sys_realtime; ++ struct ptp_clock_time sys_monoraw; ++ unsigned int rsv[4]; ++}; ++ ++struct gpio_restart { ++ struct gpio_desc___2 *reset_gpio; ++ struct notifier_block restart_handler; ++ u32 active_delay_ms; ++ u32 inactive_delay_ms; ++ u32 wait_delay_ms; ++}; ++ ++enum vexpress_reset_func { ++ FUNC_RESET = 0, ++ FUNC_SHUTDOWN = 1, ++ FUNC_REBOOT = 2, ++}; ++ ++struct syscon_reboot_context { ++ struct regmap *map; ++ u32 offset; ++ u32 mask; ++ struct notifier_block restart_handler; ++}; ++ ++enum power_supply_property { ++ POWER_SUPPLY_PROP_STATUS = 0, ++ POWER_SUPPLY_PROP_CHARGE_TYPE = 1, ++ POWER_SUPPLY_PROP_HEALTH = 2, ++ POWER_SUPPLY_PROP_PRESENT = 3, ++ POWER_SUPPLY_PROP_ONLINE = 4, ++ POWER_SUPPLY_PROP_AUTHENTIC = 5, ++ POWER_SUPPLY_PROP_TECHNOLOGY = 6, ++ POWER_SUPPLY_PROP_CYCLE_COUNT = 7, ++ POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, ++ POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, ++ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, ++ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, ++ POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, ++ POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, ++ POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, ++ POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, ++ POWER_SUPPLY_PROP_CURRENT_MAX = 16, ++ POWER_SUPPLY_PROP_CURRENT_NOW = 17, ++ POWER_SUPPLY_PROP_CURRENT_AVG = 18, ++ POWER_SUPPLY_PROP_CURRENT_BOOT = 19, ++ POWER_SUPPLY_PROP_POWER_NOW = 20, ++ POWER_SUPPLY_PROP_POWER_AVG = 21, ++ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, ++ POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, ++ POWER_SUPPLY_PROP_CHARGE_FULL = 24, ++ POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, ++ POWER_SUPPLY_PROP_CHARGE_NOW = 26, ++ POWER_SUPPLY_PROP_CHARGE_AVG = 27, ++ POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, ++ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, ++ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, ++ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, ++ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, ++ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, ++ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, ++ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 35, ++ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 36, ++ POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 37, ++ POWER_SUPPLY_PROP_ENERGY_FULL = 38, ++ POWER_SUPPLY_PROP_ENERGY_EMPTY = 39, ++ POWER_SUPPLY_PROP_ENERGY_NOW = 40, ++ POWER_SUPPLY_PROP_ENERGY_AVG = 41, ++ POWER_SUPPLY_PROP_CAPACITY = 42, ++ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 43, ++ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 44, ++ POWER_SUPPLY_PROP_CAPACITY_LEVEL = 45, ++ POWER_SUPPLY_PROP_TEMP = 46, ++ POWER_SUPPLY_PROP_TEMP_MAX = 47, ++ POWER_SUPPLY_PROP_TEMP_MIN = 48, ++ POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 49, ++ POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 50, ++ POWER_SUPPLY_PROP_TEMP_AMBIENT = 51, ++ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 52, ++ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 53, ++ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 54, ++ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 55, ++ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 56, ++ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 57, ++ POWER_SUPPLY_PROP_TYPE = 58, ++ POWER_SUPPLY_PROP_USB_TYPE = 59, ++ POWER_SUPPLY_PROP_SCOPE = 60, ++ POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 61, ++ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 62, ++ POWER_SUPPLY_PROP_CALIBRATE = 63, ++ POWER_SUPPLY_PROP_MODEL_NAME = 64, ++ POWER_SUPPLY_PROP_MANUFACTURER = 65, ++ POWER_SUPPLY_PROP_SERIAL_NUMBER = 66, ++}; ++ ++enum power_supply_type { ++ POWER_SUPPLY_TYPE_UNKNOWN = 0, ++ POWER_SUPPLY_TYPE_BATTERY = 1, ++ POWER_SUPPLY_TYPE_UPS = 2, ++ POWER_SUPPLY_TYPE_MAINS = 3, ++ POWER_SUPPLY_TYPE_USB = 4, ++ POWER_SUPPLY_TYPE_USB_DCP = 5, ++ POWER_SUPPLY_TYPE_USB_CDP = 6, ++ POWER_SUPPLY_TYPE_USB_ACA = 7, ++ POWER_SUPPLY_TYPE_USB_TYPE_C = 8, ++ POWER_SUPPLY_TYPE_USB_PD = 9, ++ POWER_SUPPLY_TYPE_USB_PD_DRP = 10, ++ POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, ++}; ++ ++enum power_supply_usb_type { ++ POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, ++ POWER_SUPPLY_USB_TYPE_SDP = 1, ++ POWER_SUPPLY_USB_TYPE_DCP = 2, ++ POWER_SUPPLY_USB_TYPE_CDP = 3, ++ POWER_SUPPLY_USB_TYPE_ACA = 4, ++ POWER_SUPPLY_USB_TYPE_C = 5, ++ POWER_SUPPLY_USB_TYPE_PD = 6, ++ POWER_SUPPLY_USB_TYPE_PD_DRP = 7, ++ POWER_SUPPLY_USB_TYPE_PD_PPS = 8, ++ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, ++}; ++ ++enum power_supply_notifier_events { ++ PSY_EVENT_PROP_CHANGED = 0, ++}; ++ ++union power_supply_propval { ++ int intval; ++ const char *strval; ++}; ++ ++struct power_supply_config { ++ struct device_node *of_node; ++ struct fwnode_handle *fwnode; ++ void *drv_data; ++ char **supplied_to; ++ size_t num_supplicants; ++}; ++ ++struct power_supply; ++ ++struct power_supply_desc { ++ const char *name; ++ enum power_supply_type type; ++ enum power_supply_usb_type *usb_types; ++ size_t num_usb_types; ++ enum power_supply_property *properties; ++ size_t num_properties; ++ int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); ++ int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); ++ int (*property_is_writeable)(struct power_supply *, enum power_supply_property); ++ void (*external_power_changed)(struct power_supply *); ++ void (*set_charged)(struct power_supply *); ++ bool no_thermal; ++ int use_for_apm; ++}; ++ ++struct power_supply { ++ const struct power_supply_desc *desc; ++ char **supplied_to; ++ size_t num_supplicants; ++ char **supplied_from; ++ size_t num_supplies; ++ struct device_node *of_node; ++ void *drv_data; ++ struct device dev; ++ struct work_struct changed_work; ++ struct delayed_work deferred_register_work; ++ spinlock_t changed_lock; ++ bool changed; ++ bool initialized; ++ bool removing; ++ atomic_t use_cnt; ++ struct thermal_zone_device *tzd; ++ struct thermal_cooling_device *tcd; ++ struct led_trigger *charging_full_trig; ++ char *charging_full_trig_name; ++ struct led_trigger *charging_trig; ++ char *charging_trig_name; ++ struct led_trigger *full_trig; ++ char *full_trig_name; ++ struct led_trigger *online_trig; ++ char *online_trig_name; ++ struct led_trigger *charging_blink_full_solid_trig; ++ char *charging_blink_full_solid_trig_name; ++}; ++ ++struct power_supply_battery_info { ++ int energy_full_design_uwh; ++ int charge_full_design_uah; ++ int voltage_min_design_uv; ++ int precharge_current_ua; ++ int charge_term_current_ua; ++ int constant_charge_current_max_ua; ++ int constant_charge_voltage_max_uv; ++}; ++ ++struct psy_am_i_supplied_data { ++ struct power_supply *psy; ++ unsigned int count; ++}; ++ ++enum { ++ POWER_SUPPLY_STATUS_UNKNOWN = 0, ++ POWER_SUPPLY_STATUS_CHARGING = 1, ++ POWER_SUPPLY_STATUS_DISCHARGING = 2, ++ POWER_SUPPLY_STATUS_NOT_CHARGING = 3, ++ POWER_SUPPLY_STATUS_FULL = 4, ++}; ++ ++enum hwmon_sensor_types { ++ hwmon_chip = 0, ++ hwmon_temp = 1, ++ hwmon_in = 2, ++ hwmon_curr = 3, ++ hwmon_power = 4, ++ hwmon_energy = 5, ++ hwmon_humidity = 6, ++ hwmon_fan = 7, ++ hwmon_pwm = 8, ++ hwmon_max = 9, ++}; ++ ++enum hwmon_chip_attributes { ++ hwmon_chip_temp_reset_history = 0, ++ hwmon_chip_in_reset_history = 1, ++ hwmon_chip_curr_reset_history = 2, ++ hwmon_chip_power_reset_history = 3, ++ hwmon_chip_register_tz = 4, ++ hwmon_chip_update_interval = 5, ++ hwmon_chip_alarms = 6, ++}; ++ ++enum hwmon_temp_attributes { ++ hwmon_temp_input = 0, ++ hwmon_temp_type = 1, ++ hwmon_temp_lcrit = 2, ++ hwmon_temp_lcrit_hyst = 3, ++ hwmon_temp_min = 4, ++ hwmon_temp_min_hyst = 5, ++ hwmon_temp_max = 6, ++ hwmon_temp_max_hyst = 7, ++ hwmon_temp_crit = 8, ++ hwmon_temp_crit_hyst = 9, ++ hwmon_temp_emergency = 10, ++ hwmon_temp_emergency_hyst = 11, ++ hwmon_temp_alarm = 12, ++ hwmon_temp_lcrit_alarm = 13, ++ hwmon_temp_min_alarm = 14, ++ hwmon_temp_max_alarm = 15, ++ hwmon_temp_crit_alarm = 16, ++ hwmon_temp_emergency_alarm = 17, ++ hwmon_temp_fault = 18, ++ hwmon_temp_offset = 19, ++ hwmon_temp_label = 20, ++ hwmon_temp_lowest = 21, ++ hwmon_temp_highest = 22, ++ hwmon_temp_reset_history = 23, ++}; ++ ++enum hwmon_in_attributes { ++ hwmon_in_input = 0, ++ hwmon_in_min = 1, ++ hwmon_in_max = 2, ++ hwmon_in_lcrit = 3, ++ hwmon_in_crit = 4, ++ hwmon_in_average = 5, ++ hwmon_in_lowest = 6, ++ hwmon_in_highest = 7, ++ hwmon_in_reset_history = 8, ++ hwmon_in_label = 9, ++ hwmon_in_alarm = 10, ++ hwmon_in_min_alarm = 11, ++ hwmon_in_max_alarm = 12, ++ hwmon_in_lcrit_alarm = 13, ++ hwmon_in_crit_alarm = 14, ++}; ++ ++enum hwmon_curr_attributes { ++ hwmon_curr_input = 0, ++ hwmon_curr_min = 1, ++ hwmon_curr_max = 2, ++ hwmon_curr_lcrit = 3, ++ hwmon_curr_crit = 4, ++ hwmon_curr_average = 5, ++ hwmon_curr_lowest = 6, ++ hwmon_curr_highest = 7, ++ hwmon_curr_reset_history = 8, ++ hwmon_curr_label = 9, ++ hwmon_curr_alarm = 10, ++ hwmon_curr_min_alarm = 11, ++ hwmon_curr_max_alarm = 12, ++ hwmon_curr_lcrit_alarm = 13, ++ hwmon_curr_crit_alarm = 14, ++}; ++ ++enum hwmon_power_attributes { ++ hwmon_power_average = 0, ++ hwmon_power_average_interval = 1, ++ hwmon_power_average_interval_max = 2, ++ hwmon_power_average_interval_min = 3, ++ hwmon_power_average_highest = 4, ++ hwmon_power_average_lowest = 5, ++ hwmon_power_average_max = 6, ++ hwmon_power_average_min = 7, ++ hwmon_power_input = 8, ++ hwmon_power_input_highest = 9, ++ hwmon_power_input_lowest = 10, ++ hwmon_power_reset_history = 11, ++ hwmon_power_accuracy = 12, ++ hwmon_power_cap = 13, ++ hwmon_power_cap_hyst = 14, ++ hwmon_power_cap_max = 15, ++ hwmon_power_cap_min = 16, ++ hwmon_power_min = 17, ++ hwmon_power_max = 18, ++ hwmon_power_crit = 19, ++ hwmon_power_lcrit = 20, ++ hwmon_power_label = 21, ++ hwmon_power_alarm = 22, ++ hwmon_power_cap_alarm = 23, ++ hwmon_power_min_alarm = 24, ++ hwmon_power_max_alarm = 25, ++ hwmon_power_lcrit_alarm = 26, ++ hwmon_power_crit_alarm = 27, ++}; ++ ++enum hwmon_energy_attributes { ++ hwmon_energy_input = 0, ++ hwmon_energy_label = 1, ++}; ++ ++enum hwmon_humidity_attributes { ++ hwmon_humidity_input = 0, ++ hwmon_humidity_label = 1, ++ hwmon_humidity_min = 2, ++ hwmon_humidity_min_hyst = 3, ++ hwmon_humidity_max = 4, ++ hwmon_humidity_max_hyst = 5, ++ hwmon_humidity_alarm = 6, ++ hwmon_humidity_fault = 7, ++}; ++ ++enum hwmon_fan_attributes { ++ hwmon_fan_input = 0, ++ hwmon_fan_label = 1, ++ hwmon_fan_min = 2, ++ hwmon_fan_max = 3, ++ hwmon_fan_div = 4, ++ hwmon_fan_pulses = 5, ++ hwmon_fan_target = 6, ++ hwmon_fan_alarm = 7, ++ hwmon_fan_min_alarm = 8, ++ hwmon_fan_max_alarm = 9, ++ hwmon_fan_fault = 10, ++}; ++ ++enum hwmon_pwm_attributes { ++ hwmon_pwm_input = 0, ++ hwmon_pwm_enable = 1, ++ hwmon_pwm_mode = 2, ++ hwmon_pwm_freq = 3, ++}; ++ ++struct hwmon_ops { ++ umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); ++ int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *); ++ int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); ++ int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int); ++}; ++ ++struct hwmon_channel_info { ++ enum hwmon_sensor_types type; ++ const u32 *config; ++}; ++ ++struct hwmon_chip_info { ++ const struct hwmon_ops *ops; ++ const struct hwmon_channel_info **info; ++}; ++ ++struct thermal_zone_of_device_ops { ++ int (*get_temp)(void *, int *); ++ int (*get_trend)(void *, int, enum thermal_trend *); ++ int (*set_trips)(void *, int, int); ++ int (*set_emul_temp)(void *, int); ++ int (*set_trip_temp)(void *, int, int); ++}; ++ ++struct hwmon_device { ++ const char *name; ++ struct device dev; ++ const struct hwmon_chip_info *chip; ++ struct attribute_group group; ++ const struct attribute_group **groups; ++}; ++ ++struct hwmon_device_attribute { ++ struct device_attribute dev_attr; ++ const struct hwmon_ops *ops; ++ enum hwmon_sensor_types type; ++ u32 attr; ++ int index; ++ char name[32]; ++}; ++ ++struct hwmon_thermal_data { ++ struct hwmon_device *hwdev; ++ int index; ++}; ++ ++enum events { ++ THERMAL_AUX0 = 0, ++ THERMAL_AUX1 = 1, ++ THERMAL_CRITICAL = 2, ++ THERMAL_DEV_FAULT = 3, ++}; ++ ++enum { ++ THERMAL_GENL_ATTR_UNSPEC = 0, ++ THERMAL_GENL_ATTR_EVENT = 1, ++ __THERMAL_GENL_ATTR_MAX = 2, ++}; ++ ++enum { ++ THERMAL_GENL_CMD_UNSPEC = 0, ++ THERMAL_GENL_CMD_EVENT = 1, ++ __THERMAL_GENL_CMD_MAX = 2, ++}; ++ ++struct thermal_genl_event { ++ u32 orig; ++ enum events event; ++}; ++ ++struct trace_event_raw_thermal_temperature { ++ struct trace_entry ent; ++ u32 __data_loc_thermal_zone; ++ int id; ++ int temp_prev; ++ int temp; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_cdev_update { ++ struct trace_entry ent; ++ u32 __data_loc_type; ++ long unsigned int target; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_thermal_zone_trip { ++ struct trace_entry ent; ++ u32 __data_loc_thermal_zone; ++ int id; ++ int trip; ++ enum thermal_trip_type trip_type; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_thermal_power_cpu_get_power { ++ struct trace_entry ent; ++ u32 __data_loc_cpumask; ++ long unsigned int freq; ++ u32 __data_loc_load; ++ size_t load_len; ++ u32 dynamic_power; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_thermal_power_cpu_limit { ++ struct trace_entry ent; ++ u32 __data_loc_cpumask; ++ unsigned int freq; ++ long unsigned int cdev_state; ++ u32 power; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_thermal_temperature { ++ u32 thermal_zone; ++}; ++ ++struct trace_event_data_offsets_cdev_update { ++ u32 type; ++}; ++ ++struct trace_event_data_offsets_thermal_zone_trip { ++ u32 thermal_zone; ++}; ++ ++struct trace_event_data_offsets_thermal_power_cpu_get_power { ++ u32 cpumask; ++ u32 load; ++}; ++ ++struct trace_event_data_offsets_thermal_power_cpu_limit { ++ u32 cpumask; ++}; ++ ++struct thermal_instance { ++ int id; ++ char name[20]; ++ struct thermal_zone_device *tz; ++ struct thermal_cooling_device *cdev; ++ int trip; ++ bool initialized; ++ long unsigned int upper; ++ long unsigned int lower; ++ long unsigned int target; ++ char attr_name[20]; ++ struct device_attribute attr; ++ char weight_attr_name[20]; ++ struct device_attribute weight_attr; ++ struct list_head tz_node; ++ struct list_head cdev_node; ++ unsigned int weight; ++}; ++ ++struct thermal_hwmon_device { ++ char type[20]; ++ struct device *device; ++ int count; ++ struct list_head tz_list; ++ struct list_head node; ++}; ++ ++struct thermal_hwmon_attr { ++ struct device_attribute attr; ++ char name[16]; ++}; ++ ++struct thermal_hwmon_temp { ++ struct list_head hwmon_node; ++ struct thermal_zone_device *tz; ++ struct thermal_hwmon_attr temp_input; ++ struct thermal_hwmon_attr temp_crit; ++}; ++ ++struct thermal_trip { ++ struct device_node *np; ++ int temperature; ++ int hysteresis; ++ enum thermal_trip_type type; ++}; ++ ++struct __thermal_bind_params { ++ struct device_node *cooling_device; ++ unsigned int trip_id; ++ unsigned int usage; ++ long unsigned int min; ++ long unsigned int max; ++}; ++ ++struct __thermal_zone { ++ enum thermal_device_mode mode; ++ int passive_delay; ++ int polling_delay; ++ int slope; ++ int offset; ++ int ntrips; ++ struct thermal_trip *trips; ++ int num_tbps; ++ struct __thermal_bind_params *tbps; ++ void *sensor_data; ++ const struct thermal_zone_of_device_ops *ops; ++}; ++ ++struct freq_table { ++ u32 frequency; ++ u32 power; ++}; ++ ++struct time_in_idle { ++ u64 time; ++ u64 timestamp; ++}; ++ ++struct cpufreq_cooling_device { ++ int id; ++ u32 last_load; ++ unsigned int cpufreq_state; ++ unsigned int clipped_freq; ++ unsigned int max_level; ++ struct freq_table *freq_table; ++ struct thermal_cooling_device *cdev; ++ struct cpufreq_policy *policy; ++ struct list_head node; ++ struct time_in_idle *idle_time; ++}; ++ ++struct hisi_thermal_sensor { ++ struct thermal_zone_device *tzd; ++ uint32_t id; ++ uint32_t thres_temp; ++}; ++ ++struct hisi_thermal_data { ++ int (*get_temp)(struct hisi_thermal_data *); ++ int (*enable_sensor)(struct hisi_thermal_data *); ++ int (*disable_sensor)(struct hisi_thermal_data *); ++ int (*irq_handler)(struct hisi_thermal_data *); ++ struct platform_device *pdev; ++ struct clk *clk; ++ struct hisi_thermal_sensor sensor; ++ void *regs; ++ int irq; ++}; ++ ++struct watchdog_info { ++ __u32 options; ++ __u32 firmware_version; ++ __u8 identity[32]; ++}; ++ ++struct watchdog_device; ++ ++struct watchdog_ops { ++ struct module *owner; ++ int (*start)(struct watchdog_device *); ++ int (*stop)(struct watchdog_device *); ++ int (*ping)(struct watchdog_device *); ++ unsigned int (*status)(struct watchdog_device *); ++ int (*set_timeout)(struct watchdog_device *, unsigned int); ++ int (*set_pretimeout)(struct watchdog_device *, unsigned int); ++ unsigned int (*get_timeleft)(struct watchdog_device *); ++ int (*restart)(struct watchdog_device *, long unsigned int, void *); ++ long int (*ioctl)(struct watchdog_device *, unsigned int, long unsigned int); ++}; ++ ++struct watchdog_governor; ++ ++struct watchdog_core_data; ++ ++struct watchdog_device { ++ int id; ++ struct device *parent; ++ const struct attribute_group **groups; ++ const struct watchdog_info *info; ++ const struct watchdog_ops *ops; ++ const struct watchdog_governor *gov; ++ unsigned int bootstatus; ++ unsigned int timeout; ++ unsigned int pretimeout; ++ unsigned int min_timeout; ++ unsigned int max_timeout; ++ unsigned int min_hw_heartbeat_ms; ++ unsigned int max_hw_heartbeat_ms; ++ struct notifier_block reboot_nb; ++ struct notifier_block restart_nb; ++ void *driver_data; ++ struct watchdog_core_data *wd_data; ++ long unsigned int status; ++ struct list_head deferred; ++}; ++ ++struct watchdog_governor { ++ const char name[20]; ++ void (*pretimeout)(struct watchdog_device *); ++}; ++ ++struct watchdog_core_data { ++ struct device dev; ++ struct cdev cdev; ++ struct watchdog_device *wdd; ++ struct mutex lock; ++ ktime_t last_keepalive; ++ ktime_t last_hw_keepalive; ++ struct hrtimer timer; ++ struct kthread_work work; ++ long unsigned int status; ++}; ++ ++struct mdp_device_descriptor_s { ++ __u32 number; ++ __u32 major; ++ __u32 minor; ++ __u32 raid_disk; ++ __u32 state; ++ __u32 reserved[27]; ++}; ++ ++typedef struct mdp_device_descriptor_s mdp_disk_t; ++ ++struct mdp_superblock_s { ++ __u32 md_magic; ++ __u32 major_version; ++ __u32 minor_version; ++ __u32 patch_version; ++ __u32 gvalid_words; ++ __u32 set_uuid0; ++ __u32 ctime; ++ __u32 level; ++ __u32 size; ++ __u32 nr_disks; ++ __u32 raid_disks; ++ __u32 md_minor; ++ __u32 not_persistent; ++ __u32 set_uuid1; ++ __u32 set_uuid2; ++ __u32 set_uuid3; ++ __u32 gstate_creserved[16]; ++ __u32 utime; ++ __u32 state; ++ __u32 active_disks; ++ __u32 working_disks; ++ __u32 failed_disks; ++ __u32 spare_disks; ++ __u32 sb_csum; ++ __u32 events_lo; ++ __u32 events_hi; ++ __u32 cp_events_lo; ++ __u32 cp_events_hi; ++ __u32 recovery_cp; ++ __u64 reshape_position; ++ __u32 new_level; ++ __u32 delta_disks; ++ __u32 new_layout; ++ __u32 new_chunk; ++ __u32 gstate_sreserved[14]; ++ __u32 layout; ++ __u32 chunk_size; ++ __u32 root_pv; ++ __u32 root_block; ++ __u32 pstate_reserved[60]; ++ mdp_disk_t disks[27]; ++ __u32 reserved[0]; ++ mdp_disk_t this_disk; ++}; ++ ++typedef struct mdp_superblock_s mdp_super_t; ++ ++struct mdp_superblock_1 { ++ __le32 magic; ++ __le32 major_version; ++ __le32 feature_map; ++ __le32 pad0; ++ __u8 set_uuid[16]; ++ char set_name[32]; ++ __le64 ctime; ++ __le32 level; ++ __le32 layout; ++ __le64 size; ++ __le32 chunksize; ++ __le32 raid_disks; ++ union { ++ __le32 bitmap_offset; ++ struct { ++ __le16 offset; ++ __le16 size; ++ } ppl; ++ }; ++ __le32 new_level; ++ __le64 reshape_position; ++ __le32 delta_disks; ++ __le32 new_layout; ++ __le32 new_chunk; ++ __le32 new_offset; ++ __le64 data_offset; ++ __le64 data_size; ++ __le64 super_offset; ++ union { ++ __le64 recovery_offset; ++ __le64 journal_tail; ++ }; ++ __le32 dev_number; ++ __le32 cnt_corrected_read; ++ __u8 device_uuid[16]; ++ __u8 devflags; ++ __u8 bblog_shift; ++ __le16 bblog_size; ++ __le32 bblog_offset; ++ __le64 utime; ++ __le64 events; ++ __le64 resync_offset; ++ __le32 sb_csum; ++ __le32 max_dev; ++ __u8 pad3[32]; ++ __le16 dev_roles[0]; ++}; ++ ++struct mdu_version_s { ++ int major; ++ int minor; ++ int patchlevel; ++}; ++ ++typedef struct mdu_version_s mdu_version_t; ++ ++struct mdu_bitmap_file_s { ++ char pathname[4096]; ++}; ++ ++typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; ++ ++struct mddev; ++ ++struct md_rdev; ++ ++struct md_cluster_operations { ++ int (*join)(struct mddev *, int); ++ int (*leave)(struct mddev *); ++ int (*slot_number)(struct mddev *); ++ int (*resync_info_update)(struct mddev *, sector_t, sector_t); ++ int (*metadata_update_start)(struct mddev *); ++ int (*metadata_update_finish)(struct mddev *); ++ void (*metadata_update_cancel)(struct mddev *); ++ int (*resync_start)(struct mddev *); ++ int (*resync_finish)(struct mddev *); ++ int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); ++ int (*add_new_disk)(struct mddev *, struct md_rdev *); ++ void (*add_new_disk_cancel)(struct mddev *); ++ int (*new_disk_ack)(struct mddev *, bool); ++ int (*remove_disk)(struct mddev *, struct md_rdev *); ++ void (*load_bitmaps)(struct mddev *, int); ++ int (*gather_bitmaps)(struct md_rdev *); ++ int (*lock_all_bitmaps)(struct mddev *); ++ void (*unlock_all_bitmaps)(struct mddev *); ++ void (*update_size)(struct mddev *, sector_t); ++}; ++ ++struct md_cluster_info; ++ ++struct md_personality; ++ ++struct md_thread; ++ ++struct bitmap; ++ ++struct mddev { ++ void *private; ++ struct md_personality *pers; ++ dev_t unit; ++ int md_minor; ++ struct list_head disks; ++ long unsigned int flags; ++ long unsigned int sb_flags; ++ int suspended; ++ atomic_t active_io; ++ int ro; ++ int sysfs_active; ++ struct gendisk *gendisk; ++ struct kobject kobj; ++ int hold_active; ++ int major_version; ++ int minor_version; ++ int patch_version; ++ int persistent; ++ int external; ++ char metadata_type[17]; ++ int chunk_sectors; ++ time64_t ctime; ++ time64_t utime; ++ int level; ++ int layout; ++ char clevel[16]; ++ int raid_disks; ++ int max_disks; ++ sector_t dev_sectors; ++ sector_t array_sectors; ++ int external_size; ++ __u64 events; ++ int can_decrease_events; ++ char uuid[16]; ++ sector_t reshape_position; ++ int delta_disks; ++ int new_level; ++ int new_layout; ++ int new_chunk_sectors; ++ int reshape_backwards; ++ struct md_thread *thread; ++ struct md_thread *sync_thread; ++ char *last_sync_action; ++ sector_t curr_resync; ++ sector_t curr_resync_completed; ++ long unsigned int resync_mark; ++ sector_t resync_mark_cnt; ++ sector_t curr_mark_cnt; ++ sector_t resync_max_sectors; ++ atomic64_t resync_mismatches; ++ sector_t suspend_lo; ++ sector_t suspend_hi; ++ int sync_speed_min; ++ int sync_speed_max; ++ int parallel_resync; ++ int ok_start_degraded; ++ long unsigned int recovery; ++ int recovery_disabled; ++ int in_sync; ++ struct mutex open_mutex; ++ struct mutex reconfig_mutex; ++ atomic_t active; ++ atomic_t openers; ++ int changed; ++ int degraded; ++ atomic_t recovery_active; ++ wait_queue_head_t recovery_wait; ++ sector_t recovery_cp; ++ sector_t resync_min; ++ sector_t resync_max; ++ struct kernfs_node *sysfs_state; ++ struct kernfs_node *sysfs_action; ++ struct work_struct del_work; ++ spinlock_t lock; ++ wait_queue_head_t sb_wait; ++ atomic_t pending_writes; ++ unsigned int safemode; ++ unsigned int safemode_delay; ++ struct timer_list safemode_timer; ++ struct percpu_ref writes_pending; ++ int sync_checkers; ++ struct request_queue *queue; ++ struct bitmap *bitmap; ++ struct { ++ struct file *file; ++ loff_t offset; ++ long unsigned int space; ++ loff_t default_offset; ++ long unsigned int default_space; ++ struct mutex mutex; ++ long unsigned int chunksize; ++ long unsigned int daemon_sleep; ++ long unsigned int max_write_behind; ++ int external; ++ int nodes; ++ char cluster_name[64]; ++ } bitmap_info; ++ atomic_t max_corr_read_errors; ++ struct list_head all_mddevs; ++ struct attribute_group *to_remove; ++ struct bio_set bio_set; ++ struct bio_set sync_set; ++ struct bio *flush_bio; ++ atomic_t flush_pending; ++ ktime_t start_flush; ++ ktime_t last_flush; ++ struct work_struct flush_work; ++ struct work_struct event_work; ++ void (*sync_super)(struct mddev *, struct md_rdev *); ++ struct md_cluster_info *cluster_info; ++ unsigned int good_device_nr; ++ bool has_superblocks: 1; ++}; ++ ++struct md_rdev { ++ struct list_head same_set; ++ sector_t sectors; ++ struct mddev *mddev; ++ int last_events; ++ struct block_device *meta_bdev; ++ struct block_device *bdev; ++ struct page *sb_page; ++ struct page *bb_page; ++ int sb_loaded; ++ __u64 sb_events; ++ sector_t data_offset; ++ sector_t new_data_offset; ++ sector_t sb_start; ++ int sb_size; ++ int preferred_minor; ++ struct kobject kobj; ++ long unsigned int flags; ++ wait_queue_head_t blocked_wait; ++ int desc_nr; ++ int raid_disk; ++ int new_raid_disk; ++ int saved_raid_disk; ++ union { ++ sector_t recovery_offset; ++ sector_t journal_tail; ++ }; ++ atomic_t nr_pending; ++ atomic_t read_errors; ++ time64_t last_read_error; ++ atomic_t corrected_errors; ++ struct work_struct del_work; ++ struct kernfs_node *sysfs_state; ++ struct badblocks badblocks; ++ struct { ++ short int offset; ++ unsigned int size; ++ sector_t sector; ++ } ppl; ++}; ++ ++enum flag_bits { ++ Faulty = 0, ++ In_sync = 1, ++ Bitmap_sync = 2, ++ WriteMostly = 3, ++ AutoDetected = 4, ++ Blocked = 5, ++ WriteErrorSeen = 6, ++ FaultRecorded = 7, ++ BlockedBadBlocks = 8, ++ WantReplacement = 9, ++ Replacement = 10, ++ Candidate = 11, ++ Journal = 12, ++ ClusterRemove = 13, ++ RemoveSynchronized = 14, ++ ExternalBbl = 15, ++ FailFast = 16, ++ LastDev = 17, ++ WantRemove = 18, ++}; ++ ++enum mddev_flags { ++ MD_ARRAY_FIRST_USE = 0, ++ MD_CLOSING = 1, ++ MD_JOURNAL_CLEAN = 2, ++ MD_HAS_JOURNAL = 3, ++ MD_CLUSTER_RESYNC_LOCKED = 4, ++ MD_FAILFAST_SUPPORTED = 5, ++ MD_HAS_PPL = 6, ++ MD_HAS_MULTIPLE_PPLS = 7, ++ MD_ALLOW_SB_UPDATE = 8, ++ MD_UPDATING_SB = 9, ++ MD_NOT_READY = 10, ++}; ++ ++enum mddev_sb_flags { ++ MD_SB_CHANGE_DEVS = 0, ++ MD_SB_CHANGE_CLEAN = 1, ++ MD_SB_CHANGE_PENDING = 2, ++ MD_SB_NEED_REWRITE = 3, ++}; ++ ++struct md_personality { ++ char *name; ++ int level; ++ struct list_head list; ++ struct module *owner; ++ bool (*make_request)(struct mddev *, struct bio *); ++ int (*run)(struct mddev *); ++ int (*start)(struct mddev *); ++ void (*free)(struct mddev *, void *); ++ void (*status)(struct seq_file *, struct mddev *); ++ void (*error_handler)(struct mddev *, struct md_rdev *); ++ int (*hot_add_disk)(struct mddev *, struct md_rdev *); ++ int (*hot_remove_disk)(struct mddev *, struct md_rdev *); ++ int (*spare_active)(struct mddev *); ++ sector_t (*sync_request)(struct mddev *, sector_t, int *); ++ int (*resize)(struct mddev *, sector_t); ++ sector_t (*size)(struct mddev *, sector_t, int); ++ int (*check_reshape)(struct mddev *); ++ int (*start_reshape)(struct mddev *); ++ void (*finish_reshape)(struct mddev *); ++ void (*quiesce)(struct mddev *, int); ++ void * (*takeover)(struct mddev *); ++ int (*congested)(struct mddev *, int); ++ int (*change_consistency_policy)(struct mddev *, const char *); ++}; ++ ++struct md_thread { ++ void (*run)(struct md_thread *); ++ struct mddev *mddev; ++ wait_queue_head_t wqueue; ++ long unsigned int flags; ++ struct task_struct *tsk; ++ long unsigned int timeout; ++ void *private; ++}; ++ ++struct bitmap_page; ++ ++struct bitmap_counts { ++ spinlock_t lock; ++ struct bitmap_page *bp; ++ long unsigned int pages; ++ long unsigned int missing_pages; ++ long unsigned int chunkshift; ++ long unsigned int chunks; ++}; ++ ++struct bitmap_storage { ++ struct file *file; ++ struct page *sb_page; ++ struct page **filemap; ++ long unsigned int *filemap_attr; ++ long unsigned int file_pages; ++ long unsigned int bytes; ++}; ++ ++struct bitmap { ++ struct bitmap_counts counts; ++ struct mddev *mddev; ++ __u64 events_cleared; ++ int need_sync; ++ struct bitmap_storage storage; ++ long unsigned int flags; ++ int allclean; ++ atomic_t behind_writes; ++ long unsigned int behind_writes_used; ++ long unsigned int daemon_lastrun; ++ long unsigned int last_end_sync; ++ atomic_t pending_writes; ++ wait_queue_head_t write_wait; ++ wait_queue_head_t overflow_wait; ++ wait_queue_head_t behind_wait; ++ struct kernfs_node *sysfs_can_clear; ++ int cluster_slot; ++}; ++ ++enum recovery_flags { ++ MD_RECOVERY_RUNNING = 0, ++ MD_RECOVERY_SYNC = 1, ++ MD_RECOVERY_RECOVER = 2, ++ MD_RECOVERY_INTR = 3, ++ MD_RECOVERY_DONE = 4, ++ MD_RECOVERY_NEEDED = 5, ++ MD_RECOVERY_REQUESTED = 6, ++ MD_RECOVERY_CHECK = 7, ++ MD_RECOVERY_RESHAPE = 8, ++ MD_RECOVERY_FROZEN = 9, ++ MD_RECOVERY_ERROR = 10, ++ MD_RECOVERY_WAIT = 11, ++ MD_RESYNCING_REMOTE = 12, ++}; ++ ++struct md_sysfs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct mddev *, char *); ++ ssize_t (*store)(struct mddev *, const char *, size_t); ++}; ++ ++struct bitmap_page { ++ char *map; ++ unsigned int hijacked: 1; ++ unsigned int pending: 1; ++ unsigned int count: 30; ++}; ++ ++struct super_type { ++ char *name; ++ struct module *owner; ++ int (*load_super)(struct md_rdev *, struct md_rdev *, int); ++ int (*validate_super)(struct mddev *, struct md_rdev *); ++ void (*sync_super)(struct mddev *, struct md_rdev *); ++ long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t); ++ int (*allow_new_offset)(struct md_rdev *, long long unsigned int); ++}; ++ ++struct rdev_sysfs_entry { ++ struct attribute attr; ++ ssize_t (*show)(struct md_rdev *, char *); ++ ssize_t (*store)(struct md_rdev *, const char *, size_t); ++}; ++ ++enum array_state { ++ clear = 0, ++ inactive = 1, ++ suspended = 2, ++ readonly = 3, ++ read_auto = 4, ++ clean = 5, ++ active = 6, ++ write_pending = 7, ++ active_idle = 8, ++ bad_word = 9, ++}; ++ ++struct detected_devices_node { ++ struct list_head list; ++ dev_t dev; ++}; ++ ++typedef __u16 bitmap_counter_t; ++ ++enum bitmap_state { ++ BITMAP_STALE = 1, ++ BITMAP_WRITE_ERROR = 2, ++ BITMAP_HOSTENDIAN = 15, ++}; ++ ++struct bitmap_super_s { ++ __le32 magic; ++ __le32 version; ++ __u8 uuid[16]; ++ __le64 events; ++ __le64 events_cleared; ++ __le64 sync_size; ++ __le32 state; ++ __le32 chunksize; ++ __le32 daemon_sleep; ++ __le32 write_behind; ++ __le32 sectors_reserved; ++ __le32 nodes; ++ __u8 cluster_name[64]; ++ __u8 pad[120]; ++}; ++ ++typedef struct bitmap_super_s bitmap_super_t; ++ ++enum bitmap_page_attr { ++ BITMAP_PAGE_DIRTY = 0, ++ BITMAP_PAGE_PENDING = 1, ++ BITMAP_PAGE_NEEDWRITE = 2, ++}; ++ ++struct dm_kobject_holder { ++ struct kobject kobj; ++ struct completion completion; ++}; ++ ++enum { ++ EDAC_REPORTING_ENABLED = 0, ++ EDAC_REPORTING_DISABLED = 1, ++ EDAC_REPORTING_FORCE = 2, ++}; ++ ++enum dev_type { ++ DEV_UNKNOWN = 0, ++ DEV_X1 = 1, ++ DEV_X2 = 2, ++ DEV_X4 = 3, ++ DEV_X8 = 4, ++ DEV_X16 = 5, ++ DEV_X32 = 6, ++ DEV_X64 = 7, ++}; ++ ++enum hw_event_mc_err_type { ++ HW_EVENT_ERR_CORRECTED = 0, ++ HW_EVENT_ERR_UNCORRECTED = 1, ++ HW_EVENT_ERR_DEFERRED = 2, ++ HW_EVENT_ERR_FATAL = 3, ++ HW_EVENT_ERR_INFO = 4, ++}; ++ ++enum mem_type { ++ MEM_EMPTY = 0, ++ MEM_RESERVED = 1, ++ MEM_UNKNOWN = 2, ++ MEM_FPM = 3, ++ MEM_EDO = 4, ++ MEM_BEDO = 5, ++ MEM_SDR = 6, ++ MEM_RDR = 7, ++ MEM_DDR = 8, ++ MEM_RDDR = 9, ++ MEM_RMBS = 10, ++ MEM_DDR2 = 11, ++ MEM_FB_DDR2 = 12, ++ MEM_RDDR2 = 13, ++ MEM_XDR = 14, ++ MEM_DDR3 = 15, ++ MEM_RDDR3 = 16, ++ MEM_LRDDR3 = 17, ++ MEM_DDR4 = 18, ++ MEM_RDDR4 = 19, ++ MEM_LRDDR4 = 20, ++ MEM_NVDIMM = 21, ++}; ++ ++enum edac_type { ++ EDAC_UNKNOWN = 0, ++ EDAC_NONE = 1, ++ EDAC_RESERVED = 2, ++ EDAC_PARITY = 3, ++ EDAC_EC = 4, ++ EDAC_SECDED = 5, ++ EDAC_S2ECD2ED = 6, ++ EDAC_S4ECD4ED = 7, ++ EDAC_S8ECD8ED = 8, ++ EDAC_S16ECD16ED = 9, ++}; ++ ++enum scrub_type { ++ SCRUB_UNKNOWN = 0, ++ SCRUB_NONE = 1, ++ SCRUB_SW_PROG = 2, ++ SCRUB_SW_SRC = 3, ++ SCRUB_SW_PROG_SRC = 4, ++ SCRUB_SW_TUNABLE = 5, ++ SCRUB_HW_PROG = 6, ++ SCRUB_HW_SRC = 7, ++ SCRUB_HW_PROG_SRC = 8, ++ SCRUB_HW_TUNABLE = 9, ++}; ++ ++enum edac_mc_layer_type { ++ EDAC_MC_LAYER_BRANCH = 0, ++ EDAC_MC_LAYER_CHANNEL = 1, ++ EDAC_MC_LAYER_SLOT = 2, ++ EDAC_MC_LAYER_CHIP_SELECT = 3, ++ EDAC_MC_LAYER_ALL_MEM = 4, ++}; ++ ++struct edac_mc_layer { ++ enum edac_mc_layer_type type; ++ unsigned int size; ++ bool is_virt_csrow; ++}; ++ ++struct mem_ctl_info; ++ ++struct dimm_info { ++ struct device dev; ++ char label[32]; ++ unsigned int location[3]; ++ struct mem_ctl_info *mci; ++ u32 grain; ++ enum dev_type dtype; ++ enum mem_type mtype; ++ enum edac_type edac_mode; ++ u32 nr_pages; ++ unsigned int csrow; ++ unsigned int cschannel; ++}; ++ ++struct mcidev_sysfs_attribute; ++ ++struct edac_raw_error_desc { ++ char location[256]; ++ char label[296]; ++ long int grain; ++ u16 error_count; ++ int top_layer; ++ int mid_layer; ++ int low_layer; ++ long unsigned int page_frame_number; ++ long unsigned int offset_in_page; ++ long unsigned int syndrome; ++ const char *msg; ++ const char *other_detail; ++ bool enable_per_layer_report; ++}; ++ ++struct csrow_info; ++ ++struct mem_ctl_info { ++ struct device dev; ++ struct bus_type *bus; ++ struct list_head link; ++ struct module *owner; ++ long unsigned int mtype_cap; ++ long unsigned int edac_ctl_cap; ++ long unsigned int edac_cap; ++ long unsigned int scrub_cap; ++ enum scrub_type scrub_mode; ++ int (*set_sdram_scrub_rate)(struct mem_ctl_info *, u32); ++ int (*get_sdram_scrub_rate)(struct mem_ctl_info *); ++ void (*edac_check)(struct mem_ctl_info *); ++ long unsigned int (*ctl_page_to_phys)(struct mem_ctl_info *, long unsigned int); ++ int mc_idx; ++ struct csrow_info **csrows; ++ unsigned int nr_csrows; ++ unsigned int num_cschannel; ++ unsigned int n_layers; ++ struct edac_mc_layer *layers; ++ bool csbased; ++ unsigned int tot_dimms; ++ struct dimm_info **dimms; ++ struct device *pdev; ++ const char *mod_name; ++ const char *ctl_name; ++ const char *dev_name; ++ void *pvt_info; ++ long unsigned int start_time; ++ u32 ce_noinfo_count; ++ u32 ue_noinfo_count; ++ u32 ue_mc; ++ u32 ce_mc; ++ u32 *ce_per_layer[3]; ++ u32 *ue_per_layer[3]; ++ struct completion complete; ++ const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; ++ struct delayed_work work; ++ struct edac_raw_error_desc error_desc; ++ int op_state; ++ struct dentry *debugfs; ++ u8 fake_inject_layer[3]; ++ bool fake_inject_ue; ++ u16 fake_inject_count; ++}; ++ ++struct rank_info { ++ int chan_idx; ++ struct csrow_info *csrow; ++ struct dimm_info *dimm; ++ u32 ce_count; ++}; ++ ++struct csrow_info { ++ struct device dev; ++ long unsigned int first_page; ++ long unsigned int last_page; ++ long unsigned int page_mask; ++ int csrow_idx; ++ u32 ue_count; ++ u32 ce_count; ++ struct mem_ctl_info *mci; ++ u32 nr_channels; ++ struct rank_info **channels; ++}; ++ ++struct edac_device_counter { ++ u32 ue_count; ++ u32 ce_count; ++}; ++ ++struct edac_device_ctl_info; ++ ++struct edac_dev_sysfs_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct edac_device_ctl_info *, char *); ++ ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); ++}; ++ ++struct edac_device_instance; ++ ++struct edac_device_ctl_info { ++ struct list_head link; ++ struct module *owner; ++ int dev_idx; ++ int log_ue; ++ int log_ce; ++ int panic_on_ue; ++ unsigned int poll_msec; ++ long unsigned int delay; ++ struct edac_dev_sysfs_attribute *sysfs_attributes; ++ struct bus_type *edac_subsys; ++ int op_state; ++ struct delayed_work work; ++ void (*edac_check)(struct edac_device_ctl_info *); ++ struct device *dev; ++ const char *mod_name; ++ const char *ctl_name; ++ const char *dev_name; ++ void *pvt_info; ++ long unsigned int start_time; ++ struct completion removal_complete; ++ char name[32]; ++ u32 nr_instances; ++ struct edac_device_instance *instances; ++ struct edac_device_counter counters; ++ struct kobject kobj; ++}; ++ ++struct edac_device_block; ++ ++struct edac_dev_sysfs_block_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct kobject *, struct attribute *, char *); ++ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); ++ struct edac_device_block *block; ++ unsigned int value; ++}; ++ ++struct edac_device_block { ++ struct edac_device_instance *instance; ++ char name[32]; ++ struct edac_device_counter counters; ++ int nr_attribs; ++ struct edac_dev_sysfs_block_attribute *block_attributes; ++ struct kobject kobj; ++}; ++ ++struct edac_device_instance { ++ struct edac_device_ctl_info *ctl; ++ char name[35]; ++ struct edac_device_counter counters; ++ u32 nr_blocks; ++ struct edac_device_block *blocks; ++ struct kobject kobj; ++}; ++ ++struct dev_ch_attribute { ++ struct device_attribute attr; ++ int channel; ++}; ++ ++struct ctl_info_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct edac_device_ctl_info *, char *); ++ ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); ++}; ++ ++struct instance_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct edac_device_instance *, char *); ++ ssize_t (*store)(struct edac_device_instance *, const char *, size_t); ++}; ++ ++struct edac_pci_counter { ++ atomic_t pe_count; ++ atomic_t npe_count; ++}; ++ ++struct edac_pci_ctl_info { ++ struct list_head link; ++ int pci_idx; ++ struct bus_type *edac_subsys; ++ int op_state; ++ struct delayed_work work; ++ void (*edac_check)(struct edac_pci_ctl_info *); ++ struct device *dev; ++ const char *mod_name; ++ const char *ctl_name; ++ const char *dev_name; ++ void *pvt_info; ++ long unsigned int start_time; ++ struct completion complete; ++ char name[32]; ++ struct edac_pci_counter counters; ++ struct kobject kobj; ++}; ++ ++struct edac_pci_gen_data { ++ int edac_idx; ++}; ++ ++struct instance_attribute___2 { ++ struct attribute attr; ++ ssize_t (*show)(struct edac_pci_ctl_info *, char *); ++ ssize_t (*store)(struct edac_pci_ctl_info *, const char *, size_t); ++}; ++ ++struct edac_pci_dev_attribute { ++ struct attribute attr; ++ void *value; ++ ssize_t (*show)(void *, char *); ++ ssize_t (*store)(void *, const char *, size_t); ++}; ++ ++typedef void (*pci_parity_check_fn_t)(struct pci_dev *); ++ ++enum dmi_entry_type { ++ DMI_ENTRY_BIOS = 0, ++ DMI_ENTRY_SYSTEM = 1, ++ DMI_ENTRY_BASEBOARD = 2, ++ DMI_ENTRY_CHASSIS = 3, ++ DMI_ENTRY_PROCESSOR = 4, ++ DMI_ENTRY_MEM_CONTROLLER = 5, ++ DMI_ENTRY_MEM_MODULE = 6, ++ DMI_ENTRY_CACHE = 7, ++ DMI_ENTRY_PORT_CONNECTOR = 8, ++ DMI_ENTRY_SYSTEM_SLOT = 9, ++ DMI_ENTRY_ONBOARD_DEVICE = 10, ++ DMI_ENTRY_OEMSTRINGS = 11, ++ DMI_ENTRY_SYSCONF = 12, ++ DMI_ENTRY_BIOS_LANG = 13, ++ DMI_ENTRY_GROUP_ASSOC = 14, ++ DMI_ENTRY_SYSTEM_EVENT_LOG = 15, ++ DMI_ENTRY_PHYS_MEM_ARRAY = 16, ++ DMI_ENTRY_MEM_DEVICE = 17, ++ DMI_ENTRY_32_MEM_ERROR = 18, ++ DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19, ++ DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20, ++ DMI_ENTRY_BUILTIN_POINTING_DEV = 21, ++ DMI_ENTRY_PORTABLE_BATTERY = 22, ++ DMI_ENTRY_SYSTEM_RESET = 23, ++ DMI_ENTRY_HW_SECURITY = 24, ++ DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25, ++ DMI_ENTRY_VOLTAGE_PROBE = 26, ++ DMI_ENTRY_COOLING_DEV = 27, ++ DMI_ENTRY_TEMP_PROBE = 28, ++ DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29, ++ DMI_ENTRY_OOB_REMOTE_ACCESS = 30, ++ DMI_ENTRY_BIS_ENTRY = 31, ++ DMI_ENTRY_SYSTEM_BOOT = 32, ++ DMI_ENTRY_MGMT_DEV = 33, ++ DMI_ENTRY_MGMT_DEV_COMPONENT = 34, ++ DMI_ENTRY_MGMT_DEV_THRES = 35, ++ DMI_ENTRY_MEM_CHANNEL = 36, ++ DMI_ENTRY_IPMI_DEV = 37, ++ DMI_ENTRY_SYS_POWER_SUPPLY = 38, ++ DMI_ENTRY_ADDITIONAL = 39, ++ DMI_ENTRY_ONBOARD_DEV_EXT = 40, ++ DMI_ENTRY_MGMT_CONTROLLER_HOST = 41, ++ DMI_ENTRY_INACTIVE = 126, ++ DMI_ENTRY_END_OF_TABLE = 127, ++}; ++ ++struct ghes_edac_pvt { ++ struct list_head list; ++ struct ghes *ghes; ++ struct mem_ctl_info *mci; ++ char detail_location[240]; ++ char other_detail[160]; ++ char msg[80]; ++}; ++ ++struct memdev_dmi_entry { ++ u8 type; ++ u8 length; ++ u16 handle; ++ u16 phys_mem_array_handle; ++ u16 mem_err_info_handle; ++ u16 total_width; ++ u16 data_width; ++ u16 size; ++ u8 form_factor; ++ u8 device_set; ++ u8 device_locator; ++ u8 bank_locator; ++ u8 memory_type; ++ u16 type_detail; ++ u16 speed; ++ u8 manufacturer; ++ u8 serial_number; ++ u8 asset_tag; ++ u8 part_number; ++ u8 attributes; ++ u32 extended_size; ++ u16 conf_mem_clk_speed; ++} __attribute__((packed)); ++ ++struct ghes_edac_dimm_fill { ++ struct mem_ctl_info *mci; ++ unsigned int count; ++}; ++ ++struct cpufreq_freqs { ++ unsigned int cpu; ++ unsigned int old; ++ unsigned int new; ++ u8 flags; ++}; ++ ++struct cpufreq_driver { ++ char name[16]; ++ u8 flags; ++ void *driver_data; ++ int (*init)(struct cpufreq_policy *); ++ int (*verify)(struct cpufreq_policy *); ++ int (*setpolicy)(struct cpufreq_policy *); ++ int (*target)(struct cpufreq_policy *, unsigned int, unsigned int); ++ int (*target_index)(struct cpufreq_policy *, unsigned int); ++ unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int); ++ unsigned int (*resolve_freq)(struct cpufreq_policy *, unsigned int); ++ unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int); ++ int (*target_intermediate)(struct cpufreq_policy *, unsigned int); ++ unsigned int (*get)(unsigned int); ++ int (*bios_limit)(int, unsigned int *); ++ int (*exit)(struct cpufreq_policy *); ++ void (*stop_cpu)(struct cpufreq_policy *); ++ int (*suspend)(struct cpufreq_policy *); ++ int (*resume)(struct cpufreq_policy *); ++ void (*ready)(struct cpufreq_policy *); ++ struct freq_attr **attr; ++ bool boost_enabled; ++ int (*set_boost)(int); ++}; ++ ++struct cpufreq_stats { ++ unsigned int total_trans; ++ long long unsigned int last_time; ++ unsigned int max_state; ++ unsigned int state_num; ++ unsigned int last_index; ++ u64 *time_in_state; ++ unsigned int *freq_table; ++ unsigned int *trans_table; ++}; ++ ++struct gov_attr_set { ++ struct kobject kobj; ++ struct list_head policy_list; ++ struct mutex update_lock; ++ int usage_count; ++}; ++ ++struct governor_attr { ++ struct attribute attr; ++ ssize_t (*show)(struct gov_attr_set *, char *); ++ ssize_t (*store)(struct gov_attr_set *, const char *, size_t); ++}; ++ ++enum { ++ OD_NORMAL_SAMPLE = 0, ++ OD_SUB_SAMPLE = 1, ++}; ++ ++struct dbs_data { ++ struct gov_attr_set attr_set; ++ void *tuners; ++ unsigned int ignore_nice_load; ++ unsigned int sampling_rate; ++ unsigned int sampling_down_factor; ++ unsigned int up_threshold; ++ unsigned int io_is_busy; ++}; ++ ++struct policy_dbs_info { ++ struct cpufreq_policy *policy; ++ struct mutex update_mutex; ++ u64 last_sample_time; ++ s64 sample_delay_ns; ++ atomic_t work_count; ++ struct irq_work irq_work; ++ struct work_struct work; ++ struct dbs_data *dbs_data; ++ struct list_head list; ++ unsigned int rate_mult; ++ unsigned int idle_periods; ++ bool is_shared; ++ bool work_in_progress; ++}; ++ ++struct dbs_governor { ++ struct cpufreq_governor gov; ++ struct kobj_type kobj_type; ++ struct dbs_data *gdbs_data; ++ unsigned int (*gov_dbs_update)(struct cpufreq_policy *); ++ struct policy_dbs_info * (*alloc)(); ++ void (*free)(struct policy_dbs_info *); ++ int (*init)(struct dbs_data *); ++ void (*exit)(struct dbs_data *); ++ void (*start)(struct cpufreq_policy *); ++}; ++ ++struct od_ops { ++ unsigned int (*powersave_bias_target)(struct cpufreq_policy *, unsigned int, unsigned int); ++}; ++ ++struct od_policy_dbs_info { ++ struct policy_dbs_info policy_dbs; ++ unsigned int freq_lo; ++ unsigned int freq_lo_delay_us; ++ unsigned int freq_hi_delay_us; ++ unsigned int sample_type: 1; ++}; ++ ++struct od_dbs_tuners { ++ unsigned int powersave_bias; ++}; ++ ++struct cs_policy_dbs_info { ++ struct policy_dbs_info policy_dbs; ++ unsigned int down_skip; ++ unsigned int requested_freq; ++}; ++ ++struct cs_dbs_tuners { ++ unsigned int down_threshold; ++ unsigned int freq_step; ++}; ++ ++struct cpu_dbs_info { ++ u64 prev_cpu_idle; ++ u64 prev_update_time; ++ u64 prev_cpu_nice; ++ unsigned int prev_load; ++ struct update_util_data update_util; ++ struct policy_dbs_info *policy_dbs; ++}; ++ ++struct cppc_workaround_oem_info { ++ char oem_id[7]; ++ char oem_table_id[9]; ++ u32 oem_revision; ++}; ++ ++struct cpuidle_governor { ++ char name[16]; ++ struct list_head governor_list; ++ unsigned int rating; ++ int (*enable)(struct cpuidle_driver *, struct cpuidle_device *); ++ void (*disable)(struct cpuidle_driver *, struct cpuidle_device *); ++ int (*select)(struct cpuidle_driver *, struct cpuidle_device *, bool *); ++ void (*reflect)(struct cpuidle_device *, int); ++}; ++ ++struct cpuidle_state_kobj { ++ struct cpuidle_state *state; ++ struct cpuidle_state_usage *state_usage; ++ struct completion kobj_unregister; ++ struct kobject kobj; ++}; ++ ++struct cpuidle_device_kobj { ++ struct cpuidle_device *dev; ++ struct completion kobj_unregister; ++ struct kobject kobj; ++}; ++ ++struct cpuidle_attr { ++ struct attribute attr; ++ ssize_t (*show)(struct cpuidle_device *, char *); ++ ssize_t (*store)(struct cpuidle_device *, const char *, size_t); ++}; ++ ++struct cpuidle_state_attr { ++ struct attribute attr; ++ ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *); ++ ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t); ++}; ++ ++struct menu_device { ++ int last_state_idx; ++ int needs_update; ++ int tick_wakeup; ++ unsigned int next_timer_us; ++ unsigned int predicted_us; ++ unsigned int bucket; ++ unsigned int correction_factor[12]; ++ unsigned int intervals[8]; ++ int interval_ptr; ++}; ++ ++struct pci_dev___2; ++ ++struct sdhci_pci_data { ++ struct pci_dev___2 *pdev; ++ int slotno; ++ int rst_n_gpio; ++ int cd_gpio; ++ int (*setup)(struct sdhci_pci_data *); ++ void (*cleanup)(struct sdhci_pci_data *); ++}; ++ ++struct led_cdev; ++ ++typedef long unsigned int psci_fn(long unsigned int, long unsigned int, long unsigned int, long unsigned int); ++ ++enum psci_function { ++ PSCI_FN_CPU_SUSPEND = 0, ++ PSCI_FN_CPU_ON = 1, ++ PSCI_FN_CPU_OFF = 2, ++ PSCI_FN_MIGRATE = 3, ++ PSCI_FN_MAX = 4, ++}; ++ ++typedef int (*psci_initcall_t)(const struct device_node *); ++ ++struct sdei_event { ++ struct list_head list; ++ bool reregister; ++ bool reenable; ++ u32 event_num; ++ u8 type; ++ u8 priority; ++ union { ++ struct sdei_registered_event *registered; ++ struct sdei_registered_event *private_registered; ++ }; ++}; ++ ++struct sdei_crosscall_args { ++ struct sdei_event *event; ++ atomic_t errors; ++ int first_error; ++}; ++ ++struct dmi_memdev_info { ++ const char *device; ++ const char *bank; ++ u64 size; ++ u16 handle; ++}; ++ ++struct dmi_sysfs_entry { ++ struct dmi_header dh; ++ struct kobject kobj; ++ int instance; ++ int position; ++ struct list_head list; ++ struct kobject *child; ++}; ++ ++struct dmi_sysfs_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct dmi_sysfs_entry *, char *); ++}; ++ ++struct dmi_sysfs_mapped_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct dmi_sysfs_entry *, const struct dmi_header *, char *); ++}; ++ ++typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *, const struct dmi_header *, void *); ++ ++struct find_dmi_data { ++ struct dmi_sysfs_entry *entry; ++ dmi_callback callback; ++ void *private; ++ int instance_countdown; ++ ssize_t ret; ++}; ++ ++struct dmi_read_state { ++ char *buf; ++ loff_t pos; ++ size_t count; ++}; ++ ++struct dmi_entry_attr_show_data { ++ struct attribute *attr; ++ char *buf; ++}; ++ ++struct dmi_system_event_log { ++ struct dmi_header header; ++ u16 area_length; ++ u16 header_start_offset; ++ u16 data_start_offset; ++ u8 access_method; ++ u8 status; ++ u32 change_token; ++ union { ++ struct { ++ u16 index_addr; ++ u16 data_addr; ++ } io; ++ u32 phys_addr32; ++ u16 gpnv_handle; ++ u32 access_method_address; ++ }; ++ u8 header_format; ++ u8 type_descriptors_supported_count; ++ u8 per_log_type_descriptor_length; ++ u8 supported_log_type_descriptos[0]; ++} __attribute__((packed)); ++ ++typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *, loff_t); ++ ++struct dmi_device_attribute { ++ struct device_attribute dev_attr; ++ int field; ++}; ++ ++struct mafield { ++ const char *prefix; ++ int field; ++}; ++ ++struct fw_cfg_file { ++ __be32 size; ++ __be16 select; ++ __u16 reserved; ++ char name[56]; ++}; ++ ++struct fw_cfg_dma_access { ++ __be32 control; ++ __be32 length; ++ __be64 address; ++}; ++ ++struct fw_cfg_vmcoreinfo { ++ __le16 host_format; ++ __le16 guest_format; ++ __le32 size; ++ __le64 paddr; ++}; ++ ++struct fw_cfg_sysfs_entry { ++ struct kobject kobj; ++ u32 size; ++ u16 select; ++ char name[56]; ++ struct list_head list; ++}; ++ ++struct fw_cfg_sysfs_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct fw_cfg_sysfs_entry *, char *); ++}; ++ ++typedef efi_status_t efi_query_variable_store_t(u32, long unsigned int, bool); ++ ++typedef struct { ++ efi_guid_t guid; ++ u64 table; ++} efi_config_table_64_t; ++ ++typedef struct { ++ efi_guid_t guid; ++ u32 table; ++} efi_config_table_32_t; ++ ++typedef struct { ++ efi_guid_t guid; ++ const char *name; ++ long unsigned int *ptr; ++} efi_config_table_type_t; ++ ++struct efi_fdt_params { ++ u64 system_table; ++ u64 mmap; ++ u32 mmap_size; ++ u32 desc_size; ++ u32 desc_ver; ++}; ++ ++typedef struct { ++ u32 version; ++ u32 length; ++ u64 memory_protection_attribute; ++} efi_properties_table_t; ++ ++struct efivar_operations { ++ efi_get_variable_t *get_variable; ++ efi_get_next_variable_t *get_next_variable; ++ efi_set_variable_t *set_variable; ++ efi_set_variable_t *set_variable_nonblocking; ++ efi_query_variable_store_t *query_variable_store; ++}; ++ ++struct efivars { ++ struct kset *kset; ++ struct kobject *kobject; ++ const struct efivar_operations *ops; ++}; ++ ++struct params { ++ const char name[32]; ++ const char propname[32]; ++ int offset; ++ int size; ++}; ++ ++struct param_info { ++ int found; ++ void *params; ++ const char *missing; ++}; ++ ++struct efi_generic_dev_path { ++ u8 type; ++ u8 sub_type; ++ u16 length; ++}; ++ ++struct variable_validate { ++ efi_guid_t vendor; ++ char *name; ++ bool (*validate)(efi_char16_t *, int, u8 *, long unsigned int); ++}; ++ ++typedef struct { ++ u32 version; ++ u32 num_entries; ++ u32 desc_size; ++ u32 reserved; ++ efi_memory_desc_t entry[0]; ++} efi_memory_attributes_table_t; ++ ++typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *); ++ ++typedef struct { ++ u64 length; ++ u64 data; ++} efi_capsule_block_desc_t; ++ ++struct efi_memory_map_data { ++ phys_addr_t phys_map; ++ long unsigned int size; ++ long unsigned int desc_version; ++ long unsigned int desc_size; ++}; ++ ++struct efi_mem_range { ++ struct range range; ++ u64 attribute; ++}; ++ ++struct efi_system_resource_entry_v1 { ++ efi_guid_t fw_class; ++ u32 fw_type; ++ u32 fw_version; ++ u32 lowest_supported_fw_version; ++ u32 capsule_flags; ++ u32 last_attempt_version; ++ u32 last_attempt_status; ++}; ++ ++struct efi_system_resource_table { ++ u32 fw_resource_count; ++ u32 fw_resource_count_max; ++ u64 fw_resource_version; ++ u8 entries[0]; ++}; ++ ++struct esre_entry { ++ union { ++ struct efi_system_resource_entry_v1 *esre1; ++ } esre; ++ struct kobject kobj; ++ struct list_head list; ++}; ++ ++struct esre_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct esre_entry *, char *); ++ ssize_t (*store)(struct esre_entry *, const char *, size_t); ++}; ++ ++struct cper_sec_proc_generic { ++ __u64 validation_bits; ++ __u8 proc_type; ++ __u8 proc_isa; ++ __u8 proc_error_type; ++ __u8 operation; ++ __u8 flags; ++ __u8 level; ++ __u16 reserved; ++ __u64 cpu_version; ++ char cpu_brand[128]; ++ __u64 proc_id; ++ __u64 target_addr; ++ __u64 requestor_id; ++ __u64 responder_id; ++ __u64 ip; ++}; ++ ++struct cper_mem_err_compact { ++ __u64 validation_bits; ++ __u16 node; ++ __u16 card; ++ __u16 module; ++ __u16 bank; ++ __u16 device; ++ __u16 row; ++ __u16 column; ++ __u16 bit_pos; ++ __u64 requestor_id; ++ __u64 responder_id; ++ __u64 target_id; ++ __u16 rank; ++ __u16 mem_array_handle; ++ __u16 mem_dev_handle; ++} __attribute__((packed)); ++ ++enum efi_rts_ids { ++ GET_TIME = 0, ++ SET_TIME = 1, ++ GET_WAKEUP_TIME = 2, ++ SET_WAKEUP_TIME = 3, ++ GET_VARIABLE = 4, ++ GET_NEXT_VARIABLE = 5, ++ SET_VARIABLE = 6, ++ QUERY_VARIABLE_INFO = 7, ++ GET_NEXT_HIGH_MONO_COUNT = 8, ++ UPDATE_CAPSULE = 9, ++ QUERY_CAPSULE_CAPS = 10, ++}; ++ ++struct efi_runtime_work { ++ void *arg1; ++ void *arg2; ++ void *arg3; ++ void *arg4; ++ void *arg5; ++ efi_status_t status; ++ struct work_struct work; ++ enum efi_rts_ids efi_rts_id; ++ struct completion efi_rts_comp; ++}; ++ ++struct cper_arm_ctx_info { ++ __u16 version; ++ __u16 type; ++ __u32 size; ++}; ++ ++struct of_timer_irq { ++ int irq; ++ int index; ++ int percpu; ++ const char *name; ++ long unsigned int flags; ++ irq_handler_t handler; ++}; ++ ++struct of_timer_base { ++ void *base; ++ const char *name; ++ int index; ++}; ++ ++struct of_timer_clk { ++ struct clk *clk; ++ const char *name; ++ int index; ++ long unsigned int rate; ++ long unsigned int period; ++}; ++ ++struct timer_of { ++ unsigned int flags; ++ struct device_node *np; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct clock_event_device clkevt; ++ struct of_timer_base of_base; ++ struct of_timer_irq of_irq; ++ struct of_timer_clk of_clk; ++ void *private_data; ++ long: 64; ++ long: 64; ++}; ++ ++typedef int (*of_init_fn_1_ret)(struct device_node *); ++ ++struct clocksource_mmio { ++ void *reg; ++ struct clocksource clksrc; ++}; ++ ++enum arch_timer_reg { ++ ARCH_TIMER_REG_CTRL = 0, ++ ARCH_TIMER_REG_TVAL = 1, ++}; ++ ++enum arch_timer_spi_nr { ++ ARCH_TIMER_PHYS_SPI = 0, ++ ARCH_TIMER_VIRT_SPI = 1, ++ ARCH_TIMER_MAX_TIMER_SPI = 2, ++}; ++ ++struct arch_timer { ++ void *base; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct clock_event_device evt; ++}; ++ ++struct ate_acpi_oem_info { ++ char oem_id[7]; ++ char oem_table_id[9]; ++ u32 oem_revision; ++}; ++ ++typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, const void *); ++ ++struct hid_device_id { ++ __u16 bus; ++ __u16 group; ++ __u32 vendor; ++ __u32 product; ++ kernel_ulong_t driver_data; ++}; ++ ++struct hid_item { ++ unsigned int format; ++ __u8 size; ++ __u8 type; ++ __u8 tag; ++ union { ++ __u8 u8; ++ __s8 s8; ++ __u16 u16; ++ __s16 s16; ++ __u32 u32; ++ __s32 s32; ++ __u8 *longdata; ++ } data; ++}; ++ ++struct hid_global { ++ unsigned int usage_page; ++ __s32 logical_minimum; ++ __s32 logical_maximum; ++ __s32 physical_minimum; ++ __s32 physical_maximum; ++ __s32 unit_exponent; ++ unsigned int unit; ++ unsigned int report_id; ++ unsigned int report_size; ++ unsigned int report_count; ++}; ++ ++struct hid_local { ++ unsigned int usage[12288]; ++ u8 usage_size[12288]; ++ unsigned int collection_index[12288]; ++ unsigned int usage_index; ++ unsigned int usage_minimum; ++ unsigned int delimiter_depth; ++ unsigned int delimiter_branch; ++}; ++ ++struct hid_collection { ++ unsigned int type; ++ unsigned int usage; ++ unsigned int level; ++}; ++ ++struct hid_usage { ++ unsigned int hid; ++ unsigned int collection_index; ++ unsigned int usage_index; ++ __u16 code; ++ __u8 type; ++ __s8 hat_min; ++ __s8 hat_max; ++ __s8 hat_dir; ++}; ++ ++struct hid_report; ++ ++struct hid_input; ++ ++struct hid_field { ++ unsigned int physical; ++ unsigned int logical; ++ unsigned int application; ++ struct hid_usage *usage; ++ unsigned int maxusage; ++ unsigned int flags; ++ unsigned int report_offset; ++ unsigned int report_size; ++ unsigned int report_count; ++ unsigned int report_type; ++ __s32 *value; ++ __s32 logical_minimum; ++ __s32 logical_maximum; ++ __s32 physical_minimum; ++ __s32 physical_maximum; ++ __s32 unit_exponent; ++ unsigned int unit; ++ struct hid_report *report; ++ unsigned int index; ++ struct hid_input *hidinput; ++ __u16 dpad; ++}; ++ ++struct hid_device; ++ ++struct hid_report { ++ struct list_head list; ++ struct list_head hidinput_list; ++ unsigned int id; ++ unsigned int type; ++ unsigned int application; ++ struct hid_field *field[256]; ++ unsigned int maxfield; ++ unsigned int size; ++ struct hid_device *device; ++}; ++ ++struct hid_input { ++ struct list_head list; ++ struct hid_report *report; ++ struct input_dev *input; ++ const char *name; ++ bool registered; ++ struct list_head reports; ++ unsigned int application; ++}; ++ ++enum hid_type { ++ HID_TYPE_OTHER = 0, ++ HID_TYPE_USBMOUSE = 1, ++ HID_TYPE_USBNONE = 2, ++}; ++ ++struct hid_report_enum { ++ unsigned int numbered; ++ struct list_head report_list; ++ struct hid_report *report_id_hash[256]; ++}; ++ ++enum hid_battery_status { ++ HID_BATTERY_UNKNOWN = 0, ++ HID_BATTERY_QUERIED = 1, ++ HID_BATTERY_REPORTED = 2, ++}; ++ ++struct hid_driver; ++ ++struct hid_ll_driver; ++ ++struct hid_device { ++ __u8 *dev_rdesc; ++ unsigned int dev_rsize; ++ __u8 *rdesc; ++ unsigned int rsize; ++ struct hid_collection *collection; ++ unsigned int collection_size; ++ unsigned int maxcollection; ++ unsigned int maxapplication; ++ __u16 bus; ++ __u16 group; ++ __u32 vendor; ++ __u32 product; ++ __u32 version; ++ enum hid_type type; ++ unsigned int country; ++ struct hid_report_enum report_enum[3]; ++ struct work_struct led_work; ++ struct semaphore driver_input_lock; ++ struct device dev; ++ struct hid_driver *driver; ++ struct hid_ll_driver *ll_driver; ++ struct mutex ll_open_lock; ++ unsigned int ll_open_count; ++ struct power_supply *battery; ++ __s32 battery_capacity; ++ __s32 battery_min; ++ __s32 battery_max; ++ __s32 battery_report_type; ++ __s32 battery_report_id; ++ enum hid_battery_status battery_status; ++ bool battery_avoid_query; ++ long unsigned int status; ++ unsigned int claimed; ++ unsigned int quirks; ++ bool io_started; ++ struct list_head inputs; ++ void *hiddev; ++ void *hidraw; ++ char name[128]; ++ char phys[64]; ++ char uniq[64]; ++ void *driver_data; ++ int (*ff_init)(struct hid_device *); ++ int (*hiddev_connect)(struct hid_device *, unsigned int); ++ void (*hiddev_disconnect)(struct hid_device *); ++ void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); ++ void (*hiddev_report_event)(struct hid_device *, struct hid_report *); ++ short unsigned int debug; ++ struct dentry *debug_dir; ++ struct dentry *debug_rdesc; ++ struct dentry *debug_events; ++ struct list_head debug_list; ++ spinlock_t debug_list_lock; ++ wait_queue_head_t debug_wait; ++}; ++ ++struct hid_report_id; ++ ++struct hid_usage_id; ++ ++struct hid_driver { ++ char *name; ++ const struct hid_device_id *id_table; ++ struct list_head dyn_list; ++ spinlock_t dyn_lock; ++ bool (*match)(struct hid_device *, bool); ++ int (*probe)(struct hid_device *, const struct hid_device_id *); ++ void (*remove)(struct hid_device *); ++ const struct hid_report_id *report_table; ++ int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); ++ const struct hid_usage_id *usage_table; ++ int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); ++ void (*report)(struct hid_device *, struct hid_report *); ++ __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *); ++ int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); ++ int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); ++ int (*input_configured)(struct hid_device *, struct hid_input *); ++ void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); ++ int (*suspend)(struct hid_device *, pm_message_t); ++ int (*resume)(struct hid_device *); ++ int (*reset_resume)(struct hid_device *); ++ struct device_driver driver; ++}; ++ ++struct hid_ll_driver { ++ int (*start)(struct hid_device *); ++ void (*stop)(struct hid_device *); ++ int (*open)(struct hid_device *); ++ void (*close)(struct hid_device *); ++ int (*power)(struct hid_device *, int); ++ int (*parse)(struct hid_device *); ++ void (*request)(struct hid_device *, struct hid_report *, int); ++ int (*wait)(struct hid_device *); ++ int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int); ++ int (*output_report)(struct hid_device *, __u8 *, size_t); ++ int (*idle)(struct hid_device *, int, int, int); ++}; ++ ++struct hid_parser { ++ struct hid_global global; ++ struct hid_global global_stack[4]; ++ unsigned int global_stack_ptr; ++ struct hid_local local; ++ unsigned int *collection_stack; ++ unsigned int collection_stack_ptr; ++ unsigned int collection_stack_size; ++ struct hid_device *device; ++ unsigned int scan_flags; ++}; ++ ++struct hid_report_id { ++ __u32 report_type; ++}; ++ ++struct hid_usage_id { ++ __u32 usage_hid; ++ __u32 usage_type; ++ __u32 usage_code; ++}; ++ ++struct hiddev { ++ int minor; ++ int exist; ++ int open; ++ struct mutex existancelock; ++ wait_queue_head_t wait; ++ struct hid_device *hid; ++ struct list_head list; ++ spinlock_t list_lock; ++ bool initialized; ++}; ++ ++struct hidraw { ++ unsigned int minor; ++ int exist; ++ int open; ++ wait_queue_head_t wait; ++ struct hid_device *hid; ++ struct device *dev; ++ spinlock_t list_lock; ++ struct list_head list; ++}; ++ ++struct hid_dynid { ++ struct list_head list; ++ struct hid_device_id id; ++}; ++ ++enum { ++ POWER_SUPPLY_SCOPE_UNKNOWN = 0, ++ POWER_SUPPLY_SCOPE_SYSTEM = 1, ++ POWER_SUPPLY_SCOPE_DEVICE = 2, ++}; ++ ++typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int); ++ ++struct quirks_list_struct { ++ struct hid_device_id hid_bl_item; ++ struct list_head node; ++}; ++ ++struct hid_debug_list { ++ struct { ++ union { ++ struct __kfifo kfifo; ++ char *type; ++ const char *const_type; ++ char (*rectype)[0]; ++ char *ptr; ++ const char *ptr_const; ++ }; ++ char buf[0]; ++ } hid_debug_fifo; ++ struct fasync_struct *fasync; ++ struct hid_device *hdev; ++ struct list_head node; ++ struct mutex read_mutex; ++}; ++ ++struct hid_usage_entry { ++ unsigned int page; ++ unsigned int usage; ++ const char *description; ++}; ++ ++struct hidraw_devinfo { ++ __u32 bustype; ++ __s16 vendor; ++ __s16 product; ++}; ++ ++struct hidraw_report { ++ __u8 *value; ++ int len; ++}; ++ ++struct hidraw_list { ++ struct hidraw_report buffer[64]; ++ int head; ++ int tail; ++ struct fasync_struct *fasync; ++ struct hidraw *hidraw; ++ struct list_head node; ++ struct mutex read_mutex; ++}; ++ ++struct a4tech_sc { ++ long unsigned int quirks; ++ unsigned int hw_wheel; ++ __s32 delayed_value; ++}; ++ ++struct apple_sc { ++ long unsigned int quirks; ++ unsigned int fn_on; ++ long unsigned int pressed_numlock[12]; ++}; ++ ++struct apple_key_translation { ++ u16 from; ++ u16 to; ++ u8 flags; ++}; ++ ++struct lg_drv_data { ++ long unsigned int quirks; ++ void *device_props; ++}; ++ ++struct magicmouse_sc { ++ struct input_dev *input; ++ long unsigned int quirks; ++ int ntouches; ++ int scroll_accel; ++ long unsigned int scroll_jiffies; ++ struct { ++ short int x; ++ short int y; ++ short int scroll_x; ++ short int scroll_y; ++ u8 size; ++ } touches[16]; ++ int tracking_ids[16]; ++}; ++ ++struct ntrig_data { ++ __u16 x; ++ __u16 y; ++ __u16 w; ++ __u16 h; ++ __u16 id; ++ bool tipswitch; ++ bool confidence; ++ bool first_contact_touch; ++ bool reading_mt; ++ __u8 mt_footer[4]; ++ __u8 mt_foot_count; ++ __s8 act_state; ++ __s8 deactivate_slack; ++ __s8 activate_slack; ++ __u16 min_width; ++ __u16 min_height; ++ __u16 activation_width; ++ __u16 activation_height; ++ __u16 sensor_logical_width; ++ __u16 sensor_logical_height; ++ __u16 sensor_physical_width; ++ __u16 sensor_physical_height; ++}; ++ ++struct hid_control_fifo { ++ unsigned char dir; ++ struct hid_report *report; ++ char *raw_report; ++}; ++ ++struct hid_output_fifo { ++ struct hid_report *report; ++ char *raw_report; ++}; ++ ++struct hid_class_descriptor { ++ __u8 bDescriptorType; ++ __le16 wDescriptorLength; ++} __attribute__((packed)); ++ ++struct hid_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ __le16 bcdHID; ++ __u8 bCountryCode; ++ __u8 bNumDescriptors; ++ struct hid_class_descriptor desc[1]; ++} __attribute__((packed)); ++ ++struct usbhid_device { ++ struct hid_device *hid; ++ struct usb_interface *intf; ++ int ifnum; ++ unsigned int bufsize; ++ struct urb *urbin; ++ char *inbuf; ++ dma_addr_t inbuf_dma; ++ struct urb *urbctrl; ++ struct usb_ctrlrequest *cr; ++ struct hid_control_fifo ctrl[256]; ++ unsigned char ctrlhead; ++ unsigned char ctrltail; ++ char *ctrlbuf; ++ dma_addr_t ctrlbuf_dma; ++ long unsigned int last_ctrl; ++ struct urb *urbout; ++ struct hid_output_fifo out[256]; ++ unsigned char outhead; ++ unsigned char outtail; ++ char *outbuf; ++ dma_addr_t outbuf_dma; ++ long unsigned int last_out; ++ spinlock_t lock; ++ long unsigned int iofl; ++ struct timer_list io_retry; ++ long unsigned int stop_retry; ++ unsigned int retry_delay; ++ struct work_struct reset_work; ++ wait_queue_head_t wait; ++}; ++ ++struct hiddev_event { ++ unsigned int hid; ++ int value; ++}; ++ ++struct hiddev_devinfo { ++ __u32 bustype; ++ __u32 busnum; ++ __u32 devnum; ++ __u32 ifnum; ++ __s16 vendor; ++ __s16 product; ++ __s16 version; ++ __u32 num_applications; ++}; ++ ++struct hiddev_collection_info { ++ __u32 index; ++ __u32 type; ++ __u32 usage; ++ __u32 level; ++}; ++ ++struct hiddev_report_info { ++ __u32 report_type; ++ __u32 report_id; ++ __u32 num_fields; ++}; ++ ++struct hiddev_field_info { ++ __u32 report_type; ++ __u32 report_id; ++ __u32 field_index; ++ __u32 maxusage; ++ __u32 flags; ++ __u32 physical; ++ __u32 logical; ++ __u32 application; ++ __s32 logical_minimum; ++ __s32 logical_maximum; ++ __s32 physical_minimum; ++ __s32 physical_maximum; ++ __u32 unit_exponent; ++ __u32 unit; ++}; ++ ++struct hiddev_usage_ref { ++ __u32 report_type; ++ __u32 report_id; ++ __u32 field_index; ++ __u32 usage_index; ++ __u32 usage_code; ++ __s32 value; ++}; ++ ++struct hiddev_usage_ref_multi { ++ struct hiddev_usage_ref uref; ++ __u32 num_values; ++ __s32 values[1024]; ++}; ++ ++struct hiddev_list { ++ struct hiddev_usage_ref buffer[2048]; ++ int head; ++ int tail; ++ unsigned int flags; ++ struct fasync_struct *fasync; ++ struct hiddev *hiddev; ++ struct list_head node; ++ struct mutex thread_lock; ++}; ++ ++struct pidff_usage { ++ struct hid_field *field; ++ s32 *value; ++}; ++ ++struct pidff_device { ++ struct hid_device *hid; ++ struct hid_report *reports[13]; ++ struct pidff_usage set_effect[7]; ++ struct pidff_usage set_envelope[5]; ++ struct pidff_usage set_condition[8]; ++ struct pidff_usage set_periodic[5]; ++ struct pidff_usage set_constant[2]; ++ struct pidff_usage set_ramp[3]; ++ struct pidff_usage device_gain[1]; ++ struct pidff_usage block_load[2]; ++ struct pidff_usage pool[3]; ++ struct pidff_usage effect_operation[2]; ++ struct pidff_usage block_free[1]; ++ struct hid_field *create_new_effect_type; ++ struct hid_field *set_effect_type; ++ struct hid_field *effect_direction; ++ struct hid_field *device_control; ++ struct hid_field *block_load_status; ++ struct hid_field *effect_operation_status; ++ int control_id[2]; ++ int type_id[11]; ++ int status_id[2]; ++ int operation_id[2]; ++ int pid_id[64]; ++}; ++ ++struct alias_prop { ++ struct list_head link; ++ const char *alias; ++ struct device_node *np; ++ int id; ++ char stem[0]; ++}; ++ ++struct of_dev_auxdata { ++ char *compatible; ++ resource_size_t phys_addr; ++ char *name; ++ void *platform_data; ++}; ++ ++struct of_changeset_entry { ++ struct list_head node; ++ long unsigned int action; ++ struct device_node *np; ++ struct property *prop; ++ struct property *old_prop; ++}; ++ ++struct of_changeset { ++ struct list_head entries; ++}; ++ ++struct of_bus { ++ void (*count_cells)(const void *, int, int *, int *); ++ u64 (*map)(__be32 *, const __be32 *, int, int, int); ++ int (*translate)(__be32 *, u64, int); ++}; ++ ++struct of_bus___2 { ++ const char *name; ++ const char *addresses; ++ int (*match)(struct device_node *); ++ void (*count_cells)(struct device_node *, int *, int *); ++ u64 (*map)(__be32 *, const __be32 *, int, int, int); ++ int (*translate)(__be32 *, u64, int); ++ unsigned int (*get_flags)(const __be32 *); ++}; ++ ++typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); ++ ++struct of_intc_desc { ++ struct list_head list; ++ of_irq_init_cb_t irq_init_cb; ++ struct device_node *dev; ++ struct device_node *interrupt_parent; ++}; ++ ++struct nvmem_cell; ++ ++struct rmem_assigned_device { ++ struct device *dev; ++ struct reserved_mem *rmem; ++ struct list_head list; ++}; ++ ++enum of_overlay_notify_action { ++ OF_OVERLAY_PRE_APPLY = 0, ++ OF_OVERLAY_POST_APPLY = 1, ++ OF_OVERLAY_PRE_REMOVE = 2, ++ OF_OVERLAY_POST_REMOVE = 3, ++}; ++ ++struct of_overlay_notify_data { ++ struct device_node *overlay; ++ struct device_node *target; ++}; ++ ++struct target { ++ struct device_node *np; ++ bool in_livetree; ++}; ++ ++struct fragment { ++ struct device_node *target; ++ struct device_node *overlay; ++}; ++ ++struct overlay_changeset { ++ int id; ++ struct list_head ovcs_list; ++ const void *fdt; ++ struct device_node *overlay_tree; ++ int count; ++ struct fragment *fragments; ++ bool symbols_fragment; ++ struct of_changeset cset; ++}; ++ ++struct acpi_table_pcct { ++ struct acpi_table_header header; ++ u32 flags; ++ u64 reserved; ++}; ++ ++enum acpi_pcct_type { ++ ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, ++ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, ++ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, ++ ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, ++ ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, ++ ACPI_PCCT_TYPE_RESERVED = 5, ++}; ++ ++struct acpi_pcct_subspace { ++ struct acpi_subtable_header header; ++ u8 reserved[6]; ++ u64 base_address; ++ u64 length; ++ struct acpi_generic_address doorbell_register; ++ u64 preserve_mask; ++ u64 write_mask; ++ u32 latency; ++ u32 max_access_rate; ++ u16 min_turnaround_time; ++} __attribute__((packed)); ++ ++struct acpi_pcct_hw_reduced_type2 { ++ struct acpi_subtable_header header; ++ u32 platform_interrupt; ++ u8 flags; ++ u8 reserved; ++ u64 base_address; ++ u64 length; ++ struct acpi_generic_address doorbell_register; ++ u64 preserve_mask; ++ u64 write_mask; ++ u32 latency; ++ u32 max_access_rate; ++ u16 min_turnaround_time; ++ struct acpi_generic_address platform_ack_register; ++ u64 ack_preserve_mask; ++ u64 ack_write_mask; ++} __attribute__((packed)); ++ ++struct hi3660_chan_info { ++ unsigned int dst_irq; ++ unsigned int ack_irq; ++}; ++ ++struct hi3660_mbox { ++ struct device *dev; ++ void *base; ++ struct mbox_chan chan[32]; ++ struct hi3660_chan_info mchan[32]; ++ struct mbox_controller controller; ++}; ++ ++struct hi6220_mbox; ++ ++struct hi6220_mbox_chan { ++ unsigned int dir; ++ unsigned int dst_irq; ++ unsigned int ack_irq; ++ unsigned int slot; ++ struct hi6220_mbox *parent; ++}; ++ ++struct hi6220_mbox { ++ struct device *dev; ++ int irq; ++ bool tx_irq_mode; ++ void *ipc; ++ void *base; ++ unsigned int chan_num; ++ struct hi6220_mbox_chan *mchan; ++ void *irq_map_chan[32]; ++ struct mbox_chan *chan; ++ struct mbox_controller controller; ++}; ++ ++struct hwspinlock___2; ++ ++struct hwspinlock_ops { ++ int (*trylock)(struct hwspinlock___2 *); ++ void (*unlock)(struct hwspinlock___2 *); ++ void (*relax)(struct hwspinlock___2 *); ++}; ++ ++struct hwspinlock_device; ++ ++struct hwspinlock___2 { ++ struct hwspinlock_device *bank; ++ spinlock_t lock; ++ void *priv; ++}; ++ ++struct hwspinlock_device { ++ struct device *dev; ++ const struct hwspinlock_ops *ops; ++ int base_id; ++ int num_locks; ++ struct hwspinlock___2 lock[0]; ++}; ++ ++union extcon_property_value { ++ int intval; ++}; ++ ++struct extcon_cable; ++ ++struct extcon_dev___2 { ++ const char *name; ++ const unsigned int *supported_cable; ++ const u32 *mutually_exclusive; ++ struct device dev; ++ struct raw_notifier_head nh_all; ++ struct raw_notifier_head *nh; ++ struct list_head entry; ++ int max_supported; ++ spinlock_t lock; ++ u32 state; ++ struct device_type extcon_dev_type; ++ struct extcon_cable *cables; ++ struct attribute_group attr_g_muex; ++ struct attribute **attrs_muex; ++ struct device_attribute *d_attrs_muex; ++}; ++ ++struct extcon_cable { ++ struct extcon_dev___2 *edev; ++ int cable_index; ++ struct attribute_group attr_g; ++ struct device_attribute attr_name; ++ struct device_attribute attr_state; ++ struct attribute *attrs[3]; ++ union extcon_property_value usb_propval[3]; ++ union extcon_property_value chg_propval[1]; ++ union extcon_property_value jack_propval[1]; ++ union extcon_property_value disp_propval[2]; ++ long unsigned int usb_bits[1]; ++ long unsigned int chg_bits[1]; ++ long unsigned int jack_bits[1]; ++ long unsigned int disp_bits[1]; ++}; ++ ++struct __extcon_info { ++ unsigned int type; ++ unsigned int id; ++ const char *name; ++}; ++ ++struct extcon_dev_notifier_devres { ++ struct extcon_dev___2 *edev; ++ unsigned int id; ++ struct notifier_block *nb; ++}; ++ ++struct arm_ccn_component { ++ void *base; ++ u32 type; ++ long unsigned int pmu_events_mask[1]; ++ union { ++ struct { ++ long unsigned int dt_cmp_mask[1]; ++ } xp; ++ }; ++}; ++ ++struct arm_ccn_dt { ++ int id; ++ void *base; ++ spinlock_t config_lock; ++ long unsigned int pmu_counters_mask[1]; ++ struct { ++ struct arm_ccn_component *source; ++ struct perf_event *event; ++ } pmu_counters[9]; ++ struct { ++ u64 l; ++ u64 h; ++ } cmp_mask[12]; ++ struct hrtimer hrtimer; ++ cpumask_t cpu; ++ struct hlist_node node; ++ struct pmu pmu; ++}; ++ ++struct arm_ccn { ++ struct device *dev; ++ void *base; ++ unsigned int irq; ++ unsigned int sbas_present: 1; ++ unsigned int sbsx_present: 1; ++ int num_nodes; ++ struct arm_ccn_component *node; ++ int num_xps; ++ struct arm_ccn_component *xp; ++ struct arm_ccn_dt dt; ++ int mn_id; ++}; ++ ++struct arm_ccn_pmu_event { ++ struct device_attribute attr; ++ u32 type; ++ u32 event; ++ int num_ports; ++ int num_vcs; ++ const char *def; ++ int mask; ++}; ++ ++struct pmu_irq_ops { ++ void (*enable_pmuirq)(unsigned int); ++ void (*disable_pmuirq)(unsigned int); ++ void (*free_pmuirq)(unsigned int, int, void *); ++}; ++ ++typedef int (*armpmu_init_fn)(struct arm_pmu *); ++ ++struct pmu_probe_info { ++ unsigned int cpuid; ++ unsigned int mask; ++ armpmu_init_fn init; ++}; ++ ++struct smmu_pmu { ++ struct hlist_node node; ++ struct perf_event *events[64]; ++ long unsigned int used_counters[1]; ++ long unsigned int supported_events[2]; ++ unsigned int irq; ++ unsigned int on_cpu; ++ struct pmu pmu; ++ unsigned int num_counters; ++ struct device *dev; ++ void *reg_base; ++ void *reloc_base; ++ u32 options; ++ u64 counter_mask; ++ bool global_filter; ++ u32 global_filter_span; ++ u32 global_filter_sid; ++}; ++ ++struct hisi_pmu; ++ ++struct hisi_uncore_ops { ++ void (*write_evtype)(struct hisi_pmu *, int, u32); ++ int (*get_event_idx)(struct perf_event *); ++ u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *); ++ void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64); ++ void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *); ++ void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *); ++ void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); ++ void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); ++ void (*start_counters)(struct hisi_pmu *); ++ void (*stop_counters)(struct hisi_pmu *); ++}; ++ ++struct hisi_pmu_hwevents { ++ struct perf_event *hw_events[16]; ++ long unsigned int used_mask[1]; ++}; ++ ++struct hisi_pmu { ++ struct pmu pmu; ++ const struct hisi_uncore_ops *ops; ++ struct hisi_pmu_hwevents pmu_events; ++ cpumask_t associated_cpus; ++ int on_cpu; ++ int irq; ++ struct device *dev; ++ struct hlist_node node; ++ int sccl_id; ++ int ccl_id; ++ void *base; ++ u32 index_id; ++ int num_counters; ++ int counter_bits; ++ int check_event; ++}; ++ ++struct cluster_pmu; ++ ++struct l2cache_pmu { ++ struct hlist_node node; ++ u32 num_pmus; ++ struct pmu pmu; ++ int num_counters; ++ cpumask_t cpumask; ++ struct platform_device *pdev; ++ struct cluster_pmu **pmu_cluster; ++ struct list_head clusters; ++}; ++ ++struct cluster_pmu { ++ struct list_head next; ++ struct perf_event *events[9]; ++ struct l2cache_pmu *l2cache_pmu; ++ long unsigned int used_counters[1]; ++ long unsigned int used_groups[1]; ++ int irq; ++ int cluster_id; ++ int on_cpu; ++ cpumask_t cluster_cpus; ++ spinlock_t pmu_lock; ++}; ++ ++struct l3cache_pmu { ++ struct pmu pmu; ++ struct hlist_node node; ++ void *regs; ++ struct perf_event *events[8]; ++ long unsigned int used_mask[1]; ++ cpumask_t cpumask; ++}; ++ ++struct l3cache_event_ops { ++ void (*start)(struct perf_event *); ++ void (*stop)(struct perf_event *, int); ++ void (*update)(struct perf_event *); ++}; ++ ++struct hw_pmu_info { ++ u32 type; ++ u32 enable_mask; ++ void *csr; ++}; ++ ++struct xgene_pmu; ++ ++struct xgene_pmu_dev { ++ struct hw_pmu_info *inf; ++ struct xgene_pmu *parent; ++ struct pmu pmu; ++ u8 max_counters; ++ long unsigned int cntr_assign_mask[1]; ++ u64 max_period; ++ const struct attribute_group **attr_groups; ++ struct perf_event *pmu_counter_event[4]; ++}; ++ ++struct xgene_pmu_ops; ++ ++struct xgene_pmu { ++ struct device *dev; ++ int version; ++ void *pcppmu_csr; ++ u32 mcb_active_mask; ++ u32 mc_active_mask; ++ u32 l3c_active_mask; ++ cpumask_t cpu; ++ raw_spinlock_t lock; ++ const struct xgene_pmu_ops *ops; ++ struct list_head l3cpmus; ++ struct list_head iobpmus; ++ struct list_head mcbpmus; ++ struct list_head mcpmus; ++}; ++ ++struct xgene_pmu_ops { ++ void (*mask_int)(struct xgene_pmu *); ++ void (*unmask_int)(struct xgene_pmu *); ++ u64 (*read_counter)(struct xgene_pmu_dev *, int); ++ void (*write_counter)(struct xgene_pmu_dev *, int, u64); ++ void (*write_evttype)(struct xgene_pmu_dev *, int, u32); ++ void (*write_agentmsk)(struct xgene_pmu_dev *, u32); ++ void (*write_agent1msk)(struct xgene_pmu_dev *, u32); ++ void (*enable_counter)(struct xgene_pmu_dev *, int); ++ void (*disable_counter)(struct xgene_pmu_dev *, int); ++ void (*enable_counter_int)(struct xgene_pmu_dev *, int); ++ void (*disable_counter_int)(struct xgene_pmu_dev *, int); ++ void (*reset_counters)(struct xgene_pmu_dev *); ++ void (*start_counters)(struct xgene_pmu_dev *); ++ void (*stop_counters)(struct xgene_pmu_dev *); ++}; ++ ++struct xgene_pmu_dev_ctx { ++ char *name; ++ struct list_head next; ++ struct xgene_pmu_dev *pmu_dev; ++ struct hw_pmu_info inf; ++}; ++ ++struct xgene_pmu_data { ++ int id; ++ u32 data; ++}; ++ ++enum xgene_pmu_version { ++ PCP_PMU_V1 = 1, ++ PCP_PMU_V2 = 2, ++ PCP_PMU_V3 = 3, ++}; ++ ++enum xgene_pmu_dev_type { ++ PMU_TYPE_L3C = 0, ++ PMU_TYPE_IOB = 1, ++ PMU_TYPE_IOB_SLOW = 2, ++ PMU_TYPE_MCB = 3, ++ PMU_TYPE_MC = 4, ++}; ++ ++struct arm_spe_pmu_buf { ++ int nr_pages; ++ bool snapshot; ++ void *base; ++}; ++ ++struct arm_spe_pmu { ++ struct pmu pmu; ++ struct platform_device *pdev; ++ cpumask_t supported_cpus; ++ struct hlist_node hotplug_node; ++ int irq; ++ u16 min_period; ++ u16 counter_sz; ++ u64 features; ++ u16 max_record_sz; ++ u16 align; ++ struct perf_output_handle *handle; ++}; ++ ++enum arm_spe_pmu_buf_fault_action { ++ SPE_PMU_BUF_FAULT_ACT_SPURIOUS = 0, ++ SPE_PMU_BUF_FAULT_ACT_FATAL = 1, ++ SPE_PMU_BUF_FAULT_ACT_OK = 2, ++}; ++ ++enum arm_spe_pmu_capabilities { ++ SPE_PMU_CAP_ARCH_INST = 0, ++ SPE_PMU_CAP_ERND = 1, ++ SPE_PMU_CAP_FEAT_MAX = 2, ++ SPE_PMU_CAP_CNT_SZ = 2, ++ SPE_PMU_CAP_MIN_IVAL = 3, ++}; ++ ++typedef guid_t uuid_le; ++ ++struct trace_event_raw_mc_event { ++ struct trace_entry ent; ++ unsigned int error_type; ++ u32 __data_loc_msg; ++ u32 __data_loc_label; ++ u16 error_count; ++ u8 mc_index; ++ s8 top_layer; ++ s8 middle_layer; ++ s8 lower_layer; ++ long int address; ++ u8 grain_bits; ++ long int syndrome; ++ u32 __data_loc_driver_detail; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_arm_event { ++ struct trace_entry ent; ++ u64 mpidr; ++ u64 midr; ++ u32 running_state; ++ u32 psci_state; ++ u8 affinity; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_non_standard_event { ++ struct trace_entry ent; ++ char sec_type[16]; ++ char fru_id[16]; ++ u32 __data_loc_fru_text; ++ u8 sev; ++ u32 len; ++ u32 __data_loc_buf; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_aer_event { ++ struct trace_entry ent; ++ u32 __data_loc_dev_name; ++ u32 status; ++ u8 severity; ++ u8 tlp_header_valid; ++ u32 tlp_header[4]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_memory_failure_event { ++ struct trace_entry ent; ++ long unsigned int pfn; ++ int type; ++ int result; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_mc_event { ++ u32 msg; ++ u32 label; ++ u32 driver_detail; ++}; ++ ++struct trace_event_data_offsets_arm_event {}; ++ ++struct trace_event_data_offsets_non_standard_event { ++ u32 fru_text; ++ u32 buf; ++}; ++ ++struct trace_event_data_offsets_aer_event { ++ u32 dev_name; ++}; ++ ++struct trace_event_data_offsets_memory_failure_event {}; ++ ++struct nvmem_device___2 { ++ const char *name; ++ struct module *owner; ++ struct device dev; ++ int stride; ++ int word_size; ++ int id; ++ int users; ++ size_t size; ++ bool read_only; ++ int flags; ++ struct bin_attribute eeprom; ++ struct device *base_dev; ++ nvmem_reg_read_t reg_read; ++ nvmem_reg_write_t reg_write; ++ void *priv; ++}; ++ ++struct nvmem_cell___2 { ++ const char *name; ++ int offset; ++ int bytes; ++ int bit_offset; ++ int nbits; ++ struct nvmem_device___2 *nvmem; ++ struct list_head node; ++}; ++ ++struct user_msghdr { ++ void *msg_name; ++ int msg_namelen; ++ struct iovec *msg_iov; ++ __kernel_size_t msg_iovlen; ++ void *msg_control; ++ __kernel_size_t msg_controllen; ++ unsigned int msg_flags; ++}; ++ ++struct mmsghdr { ++ struct user_msghdr msg_hdr; ++ unsigned int msg_len; ++}; ++ ++enum sock_shutdown_cmd { ++ SHUT_RD = 0, ++ SHUT_WR = 1, ++ SHUT_RDWR = 2, ++}; ++ ++struct ifconf { ++ int ifc_len; ++ union { ++ char *ifcu_buf; ++ struct ifreq *ifcu_req; ++ } ifc_ifcu; ++}; ++ ++struct compat_ifmap { ++ compat_ulong_t mem_start; ++ compat_ulong_t mem_end; ++ short unsigned int base_addr; ++ unsigned char irq; ++ unsigned char dma; ++ unsigned char port; ++}; ++ ++struct compat_if_settings { ++ unsigned int type; ++ unsigned int size; ++ compat_uptr_t ifs_ifsu; ++}; ++ ++struct compat_ifreq { ++ union { ++ char ifrn_name[16]; ++ } ifr_ifrn; ++ union { ++ struct sockaddr ifru_addr; ++ struct sockaddr ifru_dstaddr; ++ struct sockaddr ifru_broadaddr; ++ struct sockaddr ifru_netmask; ++ struct sockaddr ifru_hwaddr; ++ short int ifru_flags; ++ compat_int_t ifru_ivalue; ++ compat_int_t ifru_mtu; ++ struct compat_ifmap ifru_map; ++ char ifru_slave[16]; ++ char ifru_newname[16]; ++ compat_caddr_t ifru_data; ++ struct compat_if_settings ifru_settings; ++ } ifr_ifru; ++}; ++ ++struct compat_ifconf { ++ compat_int_t ifc_len; ++ compat_caddr_t ifcbuf; ++}; ++ ++struct compat_ethtool_rx_flow_spec { ++ u32 flow_type; ++ union ethtool_flow_union h_u; ++ struct ethtool_flow_ext h_ext; ++ union ethtool_flow_union m_u; ++ struct ethtool_flow_ext m_ext; ++ compat_u64 ring_cookie; ++ u32 location; ++}; ++ ++struct compat_ethtool_rxnfc { ++ u32 cmd; ++ u32 flow_type; ++ compat_u64 data; ++ struct compat_ethtool_rx_flow_spec fs; ++ u32 rule_cnt; ++ u32 rule_locs[0]; ++}; ++ ++struct compat_msghdr { ++ compat_uptr_t msg_name; ++ compat_int_t msg_namelen; ++ compat_uptr_t msg_iov; ++ compat_size_t msg_iovlen; ++ compat_uptr_t msg_control; ++ compat_size_t msg_controllen; ++ compat_uint_t msg_flags; ++}; ++ ++struct compat_mmsghdr { ++ struct compat_msghdr msg_hdr; ++ compat_uint_t msg_len; ++}; ++ ++struct scm_ts_pktinfo { ++ __u32 if_index; ++ __u32 pkt_length; ++ __u32 reserved[2]; ++}; ++ ++struct sock_skb_cb { ++ u32 dropcount; ++}; ++ ++struct in6_rtmsg { ++ struct in6_addr rtmsg_dst; ++ struct in6_addr rtmsg_src; ++ struct in6_addr rtmsg_gateway; ++ __u32 rtmsg_type; ++ __u16 rtmsg_dst_len; ++ __u16 rtmsg_src_len; ++ __u32 rtmsg_metric; ++ long unsigned int rtmsg_info; ++ __u32 rtmsg_flags; ++ int rtmsg_ifindex; ++}; ++ ++struct rtentry { ++ long unsigned int rt_pad1; ++ struct sockaddr rt_dst; ++ struct sockaddr rt_gateway; ++ struct sockaddr rt_genmask; ++ short unsigned int rt_flags; ++ short int rt_pad2; ++ long unsigned int rt_pad3; ++ void *rt_pad4; ++ short int rt_metric; ++ char *rt_dev; ++ long unsigned int rt_mtu; ++ long unsigned int rt_window; ++ short unsigned int rt_irtt; ++}; ++ ++struct sock_extended_err { ++ __u32 ee_errno; ++ __u8 ee_origin; ++ __u8 ee_type; ++ __u8 ee_code; ++ __u8 ee_pad; ++ __u32 ee_info; ++ __u32 ee_data; ++}; ++ ++struct scm_timestamping { ++ struct timespec ts[3]; ++}; ++ ++struct sock_exterr_skb { ++ union { ++ struct inet_skb_parm h4; ++ struct inet6_skb_parm h6; ++ } header; ++ struct sock_extended_err ee; ++ u16 addr_offset; ++ __be16 port; ++ u8 opt_stats: 1; ++ u8 unused: 7; ++}; ++ ++struct used_address { ++ struct __kernel_sockaddr_storage name; ++ unsigned int name_len; ++ int: 32; ++}; ++ ++struct rtentry32 { ++ u32 rt_pad1; ++ struct sockaddr rt_dst; ++ struct sockaddr rt_gateway; ++ struct sockaddr rt_genmask; ++ short unsigned int rt_flags; ++ short int rt_pad2; ++ u32 rt_pad3; ++ unsigned char rt_tos; ++ unsigned char rt_class; ++ short int rt_pad4; ++ short int rt_metric; ++ u32 rt_dev; ++ u32 rt_mtu; ++ u32 rt_window; ++ short unsigned int rt_irtt; ++}; ++ ++struct in6_rtmsg32 { ++ struct in6_addr rtmsg_dst; ++ struct in6_addr rtmsg_src; ++ struct in6_addr rtmsg_gateway; ++ u32 rtmsg_type; ++ u16 rtmsg_dst_len; ++ u16 rtmsg_src_len; ++ u32 rtmsg_metric; ++ u32 rtmsg_info; ++ u32 rtmsg_flags; ++ s32 rtmsg_ifindex; ++}; ++ ++struct linger { ++ int l_onoff; ++ int l_linger; ++}; ++ ++struct ucred { ++ __u32 pid; ++ __u32 uid; ++ __u32 gid; ++}; ++ ++struct prot_inuse { ++ int val[64]; ++}; ++ ++enum txtime_flags { ++ SOF_TXTIME_DEADLINE_MODE = 1, ++ SOF_TXTIME_REPORT_ERRORS = 2, ++ SOF_TXTIME_FLAGS_LAST = 2, ++ SOF_TXTIME_FLAGS_MASK = 3, ++}; ++ ++struct sock_txtime { ++ __kernel_clockid_t clockid; ++ __u32 flags; ++}; ++ ++enum sk_pacing { ++ SK_PACING_NONE = 0, ++ SK_PACING_NEEDED = 1, ++ SK_PACING_FQ = 2, ++}; ++ ++struct sockcm_cookie { ++ u64 transmit_time; ++ u32 mark; ++ u16 tsflags; ++}; ++ ++struct fastopen_queue { ++ struct request_sock *rskq_rst_head; ++ struct request_sock *rskq_rst_tail; ++ spinlock_t lock; ++ int qlen; ++ int max_qlen; ++ struct tcp_fastopen_context *ctx; ++}; ++ ++struct request_sock_queue { ++ spinlock_t rskq_lock; ++ u8 rskq_defer_accept; ++ u32 synflood_warned; ++ atomic_t qlen; ++ atomic_t young; ++ struct request_sock *rskq_accept_head; ++ struct request_sock *rskq_accept_tail; ++ struct fastopen_queue fastopenq; ++}; ++ ++struct minmax_sample { ++ u32 t; ++ u32 v; ++}; ++ ++struct minmax { ++ struct minmax_sample s[3]; ++}; ++ ++struct inet_connection_sock_af_ops { ++ int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); ++ void (*send_check)(struct sock *, struct sk_buff *); ++ int (*rebuild_header)(struct sock *); ++ void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); ++ int (*conn_request)(struct sock *, struct sk_buff *); ++ struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); ++ u16 net_header_len; ++ u16 net_frag_header_len; ++ u16 sockaddr_len; ++ int (*setsockopt)(struct sock *, int, int, char *, unsigned int); ++ int (*getsockopt)(struct sock *, int, int, char *, int *); ++ int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); ++ int (*compat_getsockopt)(struct sock *, int, int, char *, int *); ++ void (*addr2sockaddr)(struct sock *, struct sockaddr *); ++ void (*mtu_reduced)(struct sock *); ++}; ++ ++struct inet_bind_bucket; ++ ++struct tcp_ulp_ops; ++ ++struct inet_connection_sock { ++ struct inet_sock icsk_inet; ++ struct request_sock_queue icsk_accept_queue; ++ struct inet_bind_bucket *icsk_bind_hash; ++ long unsigned int icsk_timeout; ++ struct timer_list icsk_retransmit_timer; ++ struct timer_list icsk_delack_timer; ++ __u32 icsk_rto; ++ __u32 icsk_pmtu_cookie; ++ const struct tcp_congestion_ops *icsk_ca_ops; ++ const struct inet_connection_sock_af_ops *icsk_af_ops; ++ const struct tcp_ulp_ops *icsk_ulp_ops; ++ void *icsk_ulp_data; ++ void (*icsk_clean_acked)(struct sock *, u32); ++ struct hlist_node icsk_listen_portaddr_node; ++ unsigned int (*icsk_sync_mss)(struct sock *, u32); ++ __u8 icsk_ca_state: 6; ++ __u8 icsk_ca_setsockopt: 1; ++ __u8 icsk_ca_dst_locked: 1; ++ __u8 icsk_retransmits; ++ __u8 icsk_pending; ++ __u8 icsk_backoff; ++ __u8 icsk_syn_retries; ++ __u8 icsk_probes_out; ++ __u16 icsk_ext_hdr_len; ++ struct { ++ __u8 pending; ++ __u8 quick; ++ __u8 pingpong; ++ __u8 blocked; ++ __u32 ato; ++ long unsigned int timeout; ++ __u32 lrcvtime; ++ __u16 last_seg_size; ++ __u16 rcv_mss; ++ } icsk_ack; ++ struct { ++ int enabled; ++ int search_high; ++ int search_low; ++ int probe_size; ++ u32 probe_timestamp; ++ } icsk_mtup; ++ u32 icsk_user_timeout; ++ u64 icsk_ca_priv[13]; ++}; ++ ++struct inet_bind_bucket { ++ possible_net_t ib_net; ++ int l3mdev; ++ short unsigned int port; ++ signed char fastreuse; ++ signed char fastreuseport; ++ kuid_t fastuid; ++ struct in6_addr fast_v6_rcv_saddr; ++ __be32 fast_rcv_saddr; ++ short unsigned int fast_sk_family; ++ bool fast_ipv6_only; ++ struct hlist_node node; ++ struct hlist_head owners; ++}; ++ ++struct tcp_ulp_ops { ++ struct list_head list; ++ int (*init)(struct sock *); ++ void (*release)(struct sock *); ++ int uid; ++ char name[16]; ++ bool user_visible; ++ struct module *owner; ++}; ++ ++struct tcp_fastopen_cookie { ++ union { ++ u8 val[16]; ++ struct in6_addr addr; ++ }; ++ s8 len; ++ bool exp; ++}; ++ ++struct tcp_sack_block { ++ u32 start_seq; ++ u32 end_seq; ++}; ++ ++struct tcp_options_received { ++ int ts_recent_stamp; ++ u32 ts_recent; ++ u32 rcv_tsval; ++ u32 rcv_tsecr; ++ u16 saw_tstamp: 1; ++ u16 tstamp_ok: 1; ++ u16 dsack: 1; ++ u16 wscale_ok: 1; ++ u16 sack_ok: 3; ++ u16 smc_ok: 1; ++ u16 snd_wscale: 4; ++ u16 rcv_wscale: 4; ++ u8 num_sacks; ++ u16 user_mss; ++ u16 mss_clamp; ++}; ++ ++struct tcp_rack { ++ u64 mstamp; ++ u32 rtt_us; ++ u32 end_seq; ++ u32 last_delivered; ++ u8 reo_wnd_steps; ++ u8 reo_wnd_persist: 5; ++ u8 dsack_seen: 1; ++ u8 advanced: 1; ++}; ++ ++struct tcp_sock_af_ops; ++ ++struct tcp_md5sig_info; ++ ++struct tcp_fastopen_request; ++ ++struct tcp_sock { ++ struct inet_connection_sock inet_conn; ++ u16 tcp_header_len; ++ u16 gso_segs; ++ __be32 pred_flags; ++ u64 bytes_received; ++ u32 segs_in; ++ u32 data_segs_in; ++ u32 rcv_nxt; ++ u32 copied_seq; ++ u32 rcv_wup; ++ u32 snd_nxt; ++ u32 segs_out; ++ u32 data_segs_out; ++ u64 bytes_sent; ++ u64 bytes_acked; ++ u32 dsack_dups; ++ u32 snd_una; ++ u32 snd_sml; ++ u32 rcv_tstamp; ++ u32 lsndtime; ++ u32 last_oow_ack_time; ++ u32 compressed_ack_rcv_nxt; ++ u32 tsoffset; ++ struct list_head tsq_node; ++ struct list_head tsorted_sent_queue; ++ u32 snd_wl1; ++ u32 snd_wnd; ++ u32 max_window; ++ u32 mss_cache; ++ u32 window_clamp; ++ u32 rcv_ssthresh; ++ struct tcp_rack rack; ++ u16 advmss; ++ u8 compressed_ack; ++ u8 tlp_retrans: 1; ++ u8 unused_1: 7; ++ u32 chrono_start; ++ u32 chrono_stat[3]; ++ u8 chrono_type: 2; ++ u8 rate_app_limited: 1; ++ u8 fastopen_connect: 1; ++ u8 fastopen_no_cookie: 1; ++ u8 is_sack_reneg: 1; ++ u8 unused: 2; ++ u8 nonagle: 4; ++ u8 thin_lto: 1; ++ u8 recvmsg_inq: 1; ++ u8 repair: 1; ++ u8 frto: 1; ++ u8 repair_queue; ++ u8 syn_data: 1; ++ u8 syn_fastopen: 1; ++ u8 syn_fastopen_exp: 1; ++ u8 syn_fastopen_ch: 1; ++ u8 syn_data_acked: 1; ++ u8 save_syn: 1; ++ u8 is_cwnd_limited: 1; ++ u8 syn_smc: 1; ++ u32 tlp_high_seq; ++ u64 tcp_mstamp; ++ u32 srtt_us; ++ u32 mdev_us; ++ u32 mdev_max_us; ++ u32 rttvar_us; ++ u32 rtt_seq; ++ struct minmax rtt_min; ++ u32 packets_out; ++ u32 retrans_out; ++ u32 max_packets_out; ++ u32 max_packets_seq; ++ u16 urg_data; ++ u8 ecn_flags; ++ u8 keepalive_probes; ++ u32 reordering; ++ u32 reord_seen; ++ u32 snd_up; ++ struct tcp_options_received rx_opt; ++ u32 snd_ssthresh; ++ u32 snd_cwnd; ++ u32 snd_cwnd_cnt; ++ u32 snd_cwnd_clamp; ++ u32 snd_cwnd_used; ++ u32 snd_cwnd_stamp; ++ u32 prior_cwnd; ++ u32 prr_delivered; ++ u32 prr_out; ++ u32 delivered; ++ u32 delivered_ce; ++ u32 lost; ++ u32 app_limited; ++ u64 first_tx_mstamp; ++ u64 delivered_mstamp; ++ u32 rate_delivered; ++ u32 rate_interval_us; ++ u32 rcv_wnd; ++ u32 write_seq; ++ u32 notsent_lowat; ++ u32 pushed_seq; ++ u32 lost_out; ++ u32 sacked_out; ++ struct hrtimer pacing_timer; ++ struct hrtimer compressed_ack_timer; ++ struct sk_buff *lost_skb_hint; ++ struct sk_buff *retransmit_skb_hint; ++ struct rb_root out_of_order_queue; ++ struct sk_buff *ooo_last_skb; ++ struct tcp_sack_block duplicate_sack[1]; ++ struct tcp_sack_block selective_acks[4]; ++ struct tcp_sack_block recv_sack_cache[4]; ++ struct sk_buff *highest_sack; ++ int lost_cnt_hint; ++ u32 prior_ssthresh; ++ u32 high_seq; ++ u32 retrans_stamp; ++ u32 undo_marker; ++ int undo_retrans; ++ u64 bytes_retrans; ++ u32 total_retrans; ++ u32 urg_seq; ++ unsigned int keepalive_time; ++ unsigned int keepalive_intvl; ++ int linger2; ++ u8 bpf_sock_ops_cb_flags; ++ u32 rcv_rtt_last_tsecr; ++ struct { ++ u32 rtt_us; ++ u32 seq; ++ u64 time; ++ } rcv_rtt_est; ++ struct { ++ u32 space; ++ u32 seq; ++ u64 time; ++ } rcvq_space; ++ struct { ++ u32 probe_seq_start; ++ u32 probe_seq_end; ++ } mtu_probe; ++ u32 mtu_info; ++ const struct tcp_sock_af_ops *af_specific; ++ struct tcp_md5sig_info *md5sig_info; ++ struct tcp_fastopen_request *fastopen_req; ++ struct request_sock *fastopen_rsk; ++ u32 *saved_syn; ++}; ++ ++struct tcp_md5sig_key; ++ ++struct tcp_sock_af_ops { ++ struct tcp_md5sig_key * (*md5_lookup)(const struct sock *, const struct sock *); ++ int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); ++ int (*md5_parse)(struct sock *, int, char *, int); ++}; ++ ++struct tcp_md5sig_info { ++ struct hlist_head head; ++ struct callback_head rcu; ++}; ++ ++struct tcp_fastopen_request { ++ struct tcp_fastopen_cookie cookie; ++ struct msghdr *data; ++ size_t size; ++ int copied; ++}; ++ ++union tcp_md5_addr { ++ struct in_addr a4; ++ struct in6_addr a6; ++}; ++ ++struct tcp_md5sig_key { ++ struct hlist_node node; ++ u8 keylen; ++ u8 family; ++ union tcp_md5_addr addr; ++ u8 prefixlen; ++ u8 key[80]; ++ struct callback_head rcu; ++}; ++ ++struct cgroup_cls_state { ++ struct cgroup_subsys_state css; ++ u32 classid; ++}; ++ ++enum { ++ SK_MEMINFO_RMEM_ALLOC = 0, ++ SK_MEMINFO_RCVBUF = 1, ++ SK_MEMINFO_WMEM_ALLOC = 2, ++ SK_MEMINFO_SNDBUF = 3, ++ SK_MEMINFO_FWD_ALLOC = 4, ++ SK_MEMINFO_WMEM_QUEUED = 5, ++ SK_MEMINFO_OPTMEM = 6, ++ SK_MEMINFO_BACKLOG = 7, ++ SK_MEMINFO_DROPS = 8, ++ SK_MEMINFO_VARS = 9, ++}; ++ ++enum sknetlink_groups { ++ SKNLGRP_NONE = 0, ++ SKNLGRP_INET_TCP_DESTROY = 1, ++ SKNLGRP_INET_UDP_DESTROY = 2, ++ SKNLGRP_INET6_TCP_DESTROY = 3, ++ SKNLGRP_INET6_UDP_DESTROY = 4, ++ __SKNLGRP_MAX = 5, ++}; ++ ++struct inet_request_sock { ++ struct request_sock req; ++ u16 snd_wscale: 4; ++ u16 rcv_wscale: 4; ++ u16 tstamp_ok: 1; ++ u16 sack_ok: 1; ++ u16 wscale_ok: 1; ++ u16 ecn_ok: 1; ++ u16 acked: 1; ++ u16 no_srccheck: 1; ++ u16 smc_ok: 1; ++ u32 ir_mark; ++ union { ++ struct ip_options_rcu *ireq_opt; ++ struct { ++ struct ipv6_txoptions *ipv6_opt; ++ struct sk_buff *pktopts; ++ }; ++ }; ++}; ++ ++struct tcp_request_sock_ops; ++ ++struct tcp_request_sock { ++ struct inet_request_sock req; ++ const struct tcp_request_sock_ops *af_specific; ++ u64 snt_synack; ++ bool tfo_listener; ++ u32 txhash; ++ u32 rcv_isn; ++ u32 snt_isn; ++ u32 ts_off; ++ u32 last_oow_ack_time; ++ u32 rcv_nxt; ++}; ++ ++enum tcp_synack_type { ++ TCP_SYNACK_NORMAL = 0, ++ TCP_SYNACK_FASTOPEN = 1, ++ TCP_SYNACK_COOKIE = 2, ++}; ++ ++struct tcp_request_sock_ops { ++ u16 mss_clamp; ++ struct tcp_md5sig_key * (*req_md5_lookup)(const struct sock *, const struct sock *); ++ int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); ++ void (*init_req)(struct request_sock *, const struct sock *, struct sk_buff *); ++ __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); ++ struct dst_entry * (*route_req)(const struct sock *, struct flowi *, const struct request_sock *); ++ u32 (*init_seq)(const struct sk_buff *); ++ u32 (*init_ts_off)(const struct net *, const struct sk_buff *); ++ int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type); ++}; ++ ++struct mmpin { ++ struct user_struct *user; ++ unsigned int num_pg; ++}; ++ ++struct ubuf_info { ++ void (*callback)(struct ubuf_info *, bool); ++ union { ++ struct { ++ long unsigned int desc; ++ void *ctx; ++ }; ++ struct { ++ u32 id; ++ u16 len; ++ u16 zerocopy: 1; ++ u32 bytelen; ++ }; ++ }; ++ refcount_t refcnt; ++ struct mmpin mmp; ++}; ++ ++enum { ++ SKB_FCLONE_UNAVAILABLE = 0, ++ SKB_FCLONE_ORIG = 1, ++ SKB_FCLONE_CLONE = 2, ++}; ++ ++struct sk_buff_fclones { ++ struct sk_buff skb1; ++ struct sk_buff skb2; ++ refcount_t fclone_ref; ++}; ++ ++struct skb_seq_state { ++ __u32 lower_offset; ++ __u32 upper_offset; ++ __u32 frag_idx; ++ __u32 stepped_offset; ++ struct sk_buff *root_skb; ++ struct sk_buff *cur_skb; ++ __u8 *frag_data; ++}; ++ ++struct skb_gso_cb { ++ union { ++ int mac_offset; ++ int data_offset; ++ }; ++ int encap_level; ++ __wsum csum; ++ __u16 csum_start; ++}; ++ ++struct napi_gro_cb { ++ void *frag0; ++ unsigned int frag0_len; ++ int data_offset; ++ u16 flush; ++ u16 flush_id; ++ u16 count; ++ u16 gro_remcsum_start; ++ long unsigned int age; ++ u16 proto; ++ u8 same_flow: 1; ++ u8 encap_mark: 1; ++ u8 csum_valid: 1; ++ u8 csum_cnt: 3; ++ u8 free: 2; ++ u8 is_ipv6: 1; ++ u8 is_fou: 1; ++ u8 is_atomic: 1; ++ u8 recursion_counter: 4; ++ __wsum csum; ++ struct sk_buff *last; ++}; ++ ++struct vlan_hdr { ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++ ++struct vlan_ethhdr { ++ unsigned char h_dest[6]; ++ unsigned char h_source[6]; ++ __be16 h_vlan_proto; ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++ ++struct qdisc_walker { ++ int stop; ++ int skip; ++ int count; ++ int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); ++}; ++ ++struct ip_auth_hdr { ++ __u8 nexthdr; ++ __u8 hdrlen; ++ __be16 reserved; ++ __be32 spi; ++ __be32 seq_no; ++ __u8 auth_data[0]; ++}; ++ ++struct frag_hdr { ++ __u8 nexthdr; ++ __u8 reserved; ++ __be16 frag_off; ++ __be32 identification; ++}; ++ ++enum { ++ SCM_TSTAMP_SND = 0, ++ SCM_TSTAMP_SCHED = 1, ++ SCM_TSTAMP_ACK = 2, ++}; ++ ++struct napi_alloc_cache { ++ struct page_frag_cache page; ++ unsigned int skb_count; ++ void *skb_cache[64]; ++}; ++ ++struct scm_fp_list { ++ short int count; ++ short int max; ++ struct user_struct *user; ++ struct file *fp[253]; ++}; ++ ++struct scm_cookie { ++ struct pid *pid; ++ struct scm_fp_list *fp; ++ struct scm_creds creds; ++ u32 secid; ++}; ++ ++enum { ++ TCA_STATS_UNSPEC = 0, ++ TCA_STATS_BASIC = 1, ++ TCA_STATS_RATE_EST = 2, ++ TCA_STATS_QUEUE = 3, ++ TCA_STATS_APP = 4, ++ TCA_STATS_RATE_EST64 = 5, ++ TCA_STATS_PAD = 6, ++ __TCA_STATS_MAX = 7, ++}; ++ ++struct gnet_stats_basic { ++ __u64 bytes; ++ __u32 packets; ++}; ++ ++struct gnet_stats_rate_est { ++ __u32 bps; ++ __u32 pps; ++}; ++ ++struct gnet_stats_rate_est64 { ++ __u64 bps; ++ __u64 pps; ++}; ++ ++struct gnet_estimator { ++ signed char interval; ++ unsigned char ewma_log; ++}; ++ ++struct net_rate_estimator { ++ struct gnet_stats_basic_packed *bstats; ++ spinlock_t *stats_lock; ++ seqcount_t *running; ++ struct gnet_stats_basic_cpu *cpu_bstats; ++ u8 ewma_log; ++ u8 intvl_log; ++ seqcount_t seq; ++ u32 last_packets; ++ u64 last_bytes; ++ u64 avpps; ++ u64 avbps; ++ long unsigned int next_jiffies; ++ struct timer_list timer; ++ struct callback_head rcu; ++}; ++ ++struct rtgenmsg { ++ unsigned char rtgen_family; ++}; ++ ++enum rtnetlink_groups { ++ RTNLGRP_NONE = 0, ++ RTNLGRP_LINK = 1, ++ RTNLGRP_NOTIFY = 2, ++ RTNLGRP_NEIGH = 3, ++ RTNLGRP_TC = 4, ++ RTNLGRP_IPV4_IFADDR = 5, ++ RTNLGRP_IPV4_MROUTE = 6, ++ RTNLGRP_IPV4_ROUTE = 7, ++ RTNLGRP_IPV4_RULE = 8, ++ RTNLGRP_IPV6_IFADDR = 9, ++ RTNLGRP_IPV6_MROUTE = 10, ++ RTNLGRP_IPV6_ROUTE = 11, ++ RTNLGRP_IPV6_IFINFO = 12, ++ RTNLGRP_DECnet_IFADDR = 13, ++ RTNLGRP_NOP2 = 14, ++ RTNLGRP_DECnet_ROUTE = 15, ++ RTNLGRP_DECnet_RULE = 16, ++ RTNLGRP_NOP4 = 17, ++ RTNLGRP_IPV6_PREFIX = 18, ++ RTNLGRP_IPV6_RULE = 19, ++ RTNLGRP_ND_USEROPT = 20, ++ RTNLGRP_PHONET_IFADDR = 21, ++ RTNLGRP_PHONET_ROUTE = 22, ++ RTNLGRP_DCB = 23, ++ RTNLGRP_IPV4_NETCONF = 24, ++ RTNLGRP_IPV6_NETCONF = 25, ++ RTNLGRP_MDB = 26, ++ RTNLGRP_MPLS_ROUTE = 27, ++ RTNLGRP_NSID = 28, ++ RTNLGRP_MPLS_NETCONF = 29, ++ RTNLGRP_IPV4_MROUTE_R = 30, ++ RTNLGRP_IPV6_MROUTE_R = 31, ++ __RTNLGRP_MAX = 32, ++}; ++ ++enum { ++ NETNSA_NONE = 0, ++ NETNSA_NSID = 1, ++ NETNSA_PID = 2, ++ NETNSA_FD = 3, ++ __NETNSA_MAX = 4, ++}; ++ ++enum rtnl_link_flags { ++ RTNL_FLAG_DOIT_UNLOCKED = 1, ++}; ++ ++struct rtnl_net_dump_cb { ++ struct net *net; ++ struct sk_buff *skb; ++ struct netlink_callback *cb; ++ int idx; ++ int s_idx; ++}; ++ ++struct flow_dissector_key_control { ++ u16 thoff; ++ u16 addr_type; ++ u32 flags; ++}; ++ ++enum flow_dissect_ret { ++ FLOW_DISSECT_RET_OUT_GOOD = 0, ++ FLOW_DISSECT_RET_OUT_BAD = 1, ++ FLOW_DISSECT_RET_PROTO_AGAIN = 2, ++ FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, ++ FLOW_DISSECT_RET_CONTINUE = 4, ++}; ++ ++struct flow_dissector_key_basic { ++ __be16 n_proto; ++ u8 ip_proto; ++ u8 padding; ++}; ++ ++struct flow_dissector_key_tags { ++ u32 flow_label; ++}; ++ ++struct flow_dissector_key_vlan { ++ u16 vlan_id: 12; ++ u16 vlan_priority: 3; ++ __be16 vlan_tpid; ++}; ++ ++struct flow_dissector_key_mpls { ++ u32 mpls_ttl: 8; ++ u32 mpls_bos: 1; ++ u32 mpls_tc: 3; ++ u32 mpls_label: 20; ++}; ++ ++struct flow_dissector_key_enc_opts { ++ u8 data[255]; ++ u8 len; ++ __be16 dst_opt_type; ++}; ++ ++struct flow_dissector_key_keyid { ++ __be32 keyid; ++}; ++ ++struct flow_dissector_key_ipv4_addrs { ++ __be32 src; ++ __be32 dst; ++}; ++ ++struct flow_dissector_key_ipv6_addrs { ++ struct in6_addr src; ++ struct in6_addr dst; ++}; ++ ++struct flow_dissector_key_tipc { ++ __be32 key; ++}; ++ ++struct flow_dissector_key_addrs { ++ union { ++ struct flow_dissector_key_ipv4_addrs v4addrs; ++ struct flow_dissector_key_ipv6_addrs v6addrs; ++ struct flow_dissector_key_tipc tipckey; ++ }; ++}; ++ ++struct flow_dissector_key_arp { ++ __u32 sip; ++ __u32 tip; ++ __u8 op; ++ unsigned char sha[6]; ++ unsigned char tha[6]; ++}; ++ ++struct flow_dissector_key_ports { ++ union { ++ __be32 ports; ++ struct { ++ __be16 src; ++ __be16 dst; ++ }; ++ }; ++}; ++ ++struct flow_dissector_key_icmp { ++ union { ++ __be16 icmp; ++ struct { ++ u8 type; ++ u8 code; ++ }; ++ }; ++}; ++ ++struct flow_dissector_key_eth_addrs { ++ unsigned char dst[6]; ++ unsigned char src[6]; ++}; ++ ++struct flow_dissector_key_tcp { ++ __be16 flags; ++}; ++ ++struct flow_dissector_key_ip { ++ __u8 tos; ++ __u8 ttl; ++}; ++ ++struct flow_dissector_key { ++ enum flow_dissector_key_id key_id; ++ size_t offset; ++}; ++ ++struct flow_keys_basic { ++ struct flow_dissector_key_control control; ++ struct flow_dissector_key_basic basic; ++}; ++ ++struct flow_keys { ++ struct flow_dissector_key_control control; ++ struct flow_dissector_key_basic basic; ++ struct flow_dissector_key_tags tags; ++ struct flow_dissector_key_vlan vlan; ++ struct flow_dissector_key_vlan cvlan; ++ struct flow_dissector_key_keyid keyid; ++ struct flow_dissector_key_ports ports; ++ struct flow_dissector_key_addrs addrs; ++}; ++ ++struct flow_keys_digest { ++ u8 data[16]; ++}; ++ ++union tcp_word_hdr { ++ struct tcphdr hdr; ++ __be32 words[5]; ++}; ++ ++enum devlink_dpipe_field_mapping_type { ++ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0, ++ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 1, ++}; ++ ++struct devlink_dpipe_field { ++ const char *name; ++ unsigned int id; ++ unsigned int bitwidth; ++ enum devlink_dpipe_field_mapping_type mapping_type; ++}; ++ ++struct devlink_dpipe_header { ++ const char *name; ++ unsigned int id; ++ struct devlink_dpipe_field *fields; ++ unsigned int fields_count; ++ bool global; ++}; ++ ++struct arphdr { ++ __be16 ar_hrd; ++ __be16 ar_pro; ++ unsigned char ar_hln; ++ unsigned char ar_pln; ++ __be16 ar_op; ++}; ++ ++struct switchdev_trans { ++ struct list_head item_list; ++ bool ph_prepare; ++}; ++ ++enum switchdev_attr_id { ++ SWITCHDEV_ATTR_ID_UNDEFINED = 0, ++ SWITCHDEV_ATTR_ID_PORT_PARENT_ID = 1, ++ SWITCHDEV_ATTR_ID_PORT_STP_STATE = 2, ++ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 3, ++ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT = 4, ++ SWITCHDEV_ATTR_ID_PORT_MROUTER = 5, ++ SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 6, ++ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 7, ++ SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 8, ++ SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 9, ++}; ++ ++struct switchdev_attr { ++ struct net_device *orig_dev; ++ enum switchdev_attr_id id; ++ u32 flags; ++ void *complete_priv; ++ void (*complete)(struct net_device *, int, void *); ++ union { ++ struct netdev_phys_item_id ppid; ++ u8 stp_state; ++ long unsigned int brport_flags; ++ long unsigned int brport_flags_support; ++ bool mrouter; ++ clock_t ageing_time; ++ bool vlan_filtering; ++ bool mc_disabled; ++ } u; ++}; ++ ++enum switchdev_obj_id { ++ SWITCHDEV_OBJ_ID_UNDEFINED = 0, ++ SWITCHDEV_OBJ_ID_PORT_VLAN = 1, ++ SWITCHDEV_OBJ_ID_PORT_MDB = 2, ++ SWITCHDEV_OBJ_ID_HOST_MDB = 3, ++}; ++ ++struct switchdev_obj { ++ struct net_device *orig_dev; ++ enum switchdev_obj_id id; ++ u32 flags; ++ void *complete_priv; ++ void (*complete)(struct net_device *, int, void *); ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct dst_cache_pcpu; ++ ++struct dst_cache { ++ struct dst_cache_pcpu *cache; ++ long unsigned int reset_ts; ++}; ++ ++struct ip_tunnel_key { ++ __be64 tun_id; ++ union { ++ struct { ++ __be32 src; ++ __be32 dst; ++ } ipv4; ++ struct { ++ struct in6_addr src; ++ struct in6_addr dst; ++ } ipv6; ++ } u; ++ __be16 tun_flags; ++ u8 tos; ++ u8 ttl; ++ __be32 label; ++ __be16 tp_src; ++ __be16 tp_dst; ++}; ++ ++struct ip_tunnel_info { ++ struct ip_tunnel_key key; ++ struct dst_cache dst_cache; ++ u8 options_len; ++ u8 mode; ++}; ++ ++enum metadata_type { ++ METADATA_IP_TUNNEL = 0, ++ METADATA_HW_PORT_MUX = 1, ++}; ++ ++struct hw_port_info { ++ struct net_device *lower_dev; ++ u32 port_id; ++}; ++ ++struct metadata_dst { ++ struct dst_entry dst; ++ enum metadata_type type; ++ union { ++ struct ip_tunnel_info tun_info; ++ struct hw_port_info port_info; ++ } u; ++}; ++ ++struct gre_base_hdr { ++ __be16 flags; ++ __be16 protocol; ++}; ++ ++struct gre_full_hdr { ++ struct gre_base_hdr fixed_header; ++ __be16 csum; ++ __be16 reserved1; ++ __be32 key; ++ __be32 seq; ++}; ++ ++struct pptp_gre_header { ++ struct gre_base_hdr gre_hd; ++ __be16 payload_len; ++ __be16 call_id; ++ __be32 seq; ++ __be32 ack; ++}; ++ ++struct tipc_basic_hdr { ++ __be32 w[4]; ++}; ++ ++enum l2tp_debug_flags { ++ L2TP_MSG_DEBUG = 1, ++ L2TP_MSG_CONTROL = 2, ++ L2TP_MSG_SEQ = 4, ++ L2TP_MSG_DATA = 8, ++}; ++ ++struct pppoe_tag { ++ __be16 tag_type; ++ __be16 tag_len; ++ char tag_data[0]; ++}; ++ ++struct pppoe_hdr { ++ __u8 type: 4; ++ __u8 ver: 4; ++ __u8 code; ++ __be16 sid; ++ __be16 length; ++ struct pppoe_tag tag[0]; ++}; ++ ++struct mpls_label { ++ __be32 entry; ++}; ++ ++enum batadv_packettype { ++ BATADV_IV_OGM = 0, ++ BATADV_BCAST = 1, ++ BATADV_CODED = 2, ++ BATADV_ELP = 3, ++ BATADV_OGM2 = 4, ++ BATADV_UNICAST = 64, ++ BATADV_UNICAST_FRAG = 65, ++ BATADV_UNICAST_4ADDR = 66, ++ BATADV_ICMP = 67, ++ BATADV_UNICAST_TVLV = 68, ++}; ++ ++struct batadv_unicast_packet { ++ __u8 packet_type; ++ __u8 version; ++ __u8 ttl; ++ __u8 ttvn; ++ __u8 dest[6]; ++}; ++ ++struct _flow_keys_digest_data { ++ __be16 n_proto; ++ u8 ip_proto; ++ u8 padding; ++ __be32 ports; ++ __be32 src; ++ __be32 dst; ++}; ++ ++enum { ++ IF_OPER_UNKNOWN = 0, ++ IF_OPER_NOTPRESENT = 1, ++ IF_OPER_DOWN = 2, ++ IF_OPER_LOWERLAYERDOWN = 3, ++ IF_OPER_TESTING = 4, ++ IF_OPER_DORMANT = 5, ++ IF_OPER_UP = 6, ++}; ++ ++enum nf_dev_hooks { ++ NF_NETDEV_INGRESS = 0, ++ NF_NETDEV_NUMHOOKS = 1, ++}; ++ ++struct ifbond { ++ __s32 bond_mode; ++ __s32 num_slaves; ++ __s32 miimon; ++}; ++ ++typedef struct ifbond ifbond; ++ ++struct ifslave { ++ __s32 slave_id; ++ char slave_name[16]; ++ __s8 link; ++ __s8 state; ++ __u32 link_failure_count; ++}; ++ ++typedef struct ifslave ifslave; ++ ++struct netdev_boot_setup { ++ char name[16]; ++ struct ifmap map; ++}; ++ ++enum { ++ NAPIF_STATE_SCHED = 1, ++ NAPIF_STATE_MISSED = 2, ++ NAPIF_STATE_DISABLE = 4, ++ NAPIF_STATE_NPSVC = 8, ++ NAPIF_STATE_HASHED = 16, ++ NAPIF_STATE_NO_BUSY_POLL = 32, ++ NAPIF_STATE_IN_BUSY_POLL = 64, ++}; ++ ++enum gro_result { ++ GRO_MERGED = 0, ++ GRO_MERGED_FREE = 1, ++ GRO_HELD = 2, ++ GRO_NORMAL = 3, ++ GRO_DROP = 4, ++ GRO_CONSUMED = 5, ++}; ++ ++typedef enum gro_result gro_result_t; ++ ++struct netpoll; ++ ++struct netpoll_info { ++ refcount_t refcnt; ++ struct semaphore dev_lock; ++ struct sk_buff_head txq; ++ struct delayed_work tx_work; ++ struct netpoll *netpoll; ++ struct callback_head rcu; ++}; ++ ++struct udp_tunnel_info { ++ short unsigned int type; ++ sa_family_t sa_family; ++ __be16 port; ++}; ++ ++struct packet_type { ++ __be16 type; ++ struct net_device *dev; ++ int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); ++ void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); ++ bool (*id_match)(struct packet_type *, struct sock *); ++ void *af_packet_priv; ++ struct list_head list; ++ long unsigned int kabi_reserved1; ++ long unsigned int kabi_reserved2; ++ long unsigned int kabi_reserved3; ++ long unsigned int kabi_reserved4; ++}; ++ ++struct packet_offload { ++ __be16 type; ++ u16 priority; ++ struct offload_callbacks callbacks; ++ struct list_head list; ++}; ++ ++struct netdev_notifier_info_ext { ++ struct netdev_notifier_info info; ++ union { ++ u32 mtu; ++ } ext; ++}; ++ ++struct netdev_notifier_change_info { ++ struct netdev_notifier_info info; ++ unsigned int flags_changed; ++}; ++ ++struct netdev_notifier_changeupper_info { ++ struct netdev_notifier_info info; ++ struct net_device *upper_dev; ++ bool master; ++ bool linking; ++ void *upper_info; ++}; ++ ++struct netdev_notifier_changelowerstate_info { ++ struct netdev_notifier_info info; ++ void *lower_state_info; ++}; ++ ++typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); ++ ++struct netdev_bonding_info { ++ ifslave slave; ++ ifbond master; ++}; ++ ++struct netdev_notifier_bonding_info { ++ struct netdev_notifier_info info; ++ struct netdev_bonding_info bonding_info; ++}; ++ ++union inet_addr { ++ __u32 all[4]; ++ __be32 ip; ++ __be32 ip6[4]; ++ struct in_addr in; ++ struct in6_addr in6; ++}; ++ ++struct netpoll { ++ struct net_device *dev; ++ char dev_name[16]; ++ const char *name; ++ union inet_addr local_ip; ++ union inet_addr remote_ip; ++ bool ipv6; ++ u16 local_port; ++ u16 remote_port; ++ u8 remote_mac[6]; ++ struct work_struct cleanup_work; ++}; ++ ++enum qdisc_state_t { ++ __QDISC_STATE_SCHED = 0, ++ __QDISC_STATE_DEACTIVATED = 1, ++}; ++ ++struct tcf_walker { ++ int stop; ++ int skip; ++ int count; ++ long unsigned int cookie; ++ int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); ++}; ++ ++enum { ++ IPV4_DEVCONF_FORWARDING = 1, ++ IPV4_DEVCONF_MC_FORWARDING = 2, ++ IPV4_DEVCONF_PROXY_ARP = 3, ++ IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, ++ IPV4_DEVCONF_SECURE_REDIRECTS = 5, ++ IPV4_DEVCONF_SEND_REDIRECTS = 6, ++ IPV4_DEVCONF_SHARED_MEDIA = 7, ++ IPV4_DEVCONF_RP_FILTER = 8, ++ IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, ++ IPV4_DEVCONF_BOOTP_RELAY = 10, ++ IPV4_DEVCONF_LOG_MARTIANS = 11, ++ IPV4_DEVCONF_TAG = 12, ++ IPV4_DEVCONF_ARPFILTER = 13, ++ IPV4_DEVCONF_MEDIUM_ID = 14, ++ IPV4_DEVCONF_NOXFRM = 15, ++ IPV4_DEVCONF_NOPOLICY = 16, ++ IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, ++ IPV4_DEVCONF_ARP_ANNOUNCE = 18, ++ IPV4_DEVCONF_ARP_IGNORE = 19, ++ IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, ++ IPV4_DEVCONF_ARP_ACCEPT = 21, ++ IPV4_DEVCONF_ARP_NOTIFY = 22, ++ IPV4_DEVCONF_ACCEPT_LOCAL = 23, ++ IPV4_DEVCONF_SRC_VMARK = 24, ++ IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, ++ IPV4_DEVCONF_ROUTE_LOCALNET = 26, ++ IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, ++ IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, ++ IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, ++ IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, ++ IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, ++ IPV4_DEVCONF_BC_FORWARDING = 32, ++ __IPV4_DEVCONF_MAX = 33, ++}; ++ ++struct dev_kfree_skb_cb { ++ enum skb_free_reason reason; ++}; ++ ++struct netdev_adjacent { ++ struct net_device *dev; ++ bool master; ++ u16 ref_nr; ++ void *private; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct ethtool_value { ++ __u32 cmd; ++ __u32 data; ++}; ++ ++enum tunable_id { ++ ETHTOOL_ID_UNSPEC = 0, ++ ETHTOOL_RX_COPYBREAK = 1, ++ ETHTOOL_TX_COPYBREAK = 2, ++ ETHTOOL_PFC_PREVENTION_TOUT = 3, ++ __ETHTOOL_TUNABLE_COUNT = 4, ++}; ++ ++enum tunable_type_id { ++ ETHTOOL_TUNABLE_UNSPEC = 0, ++ ETHTOOL_TUNABLE_U8 = 1, ++ ETHTOOL_TUNABLE_U16 = 2, ++ ETHTOOL_TUNABLE_U32 = 3, ++ ETHTOOL_TUNABLE_U64 = 4, ++ ETHTOOL_TUNABLE_STRING = 5, ++ ETHTOOL_TUNABLE_S8 = 6, ++ ETHTOOL_TUNABLE_S16 = 7, ++ ETHTOOL_TUNABLE_S32 = 8, ++ ETHTOOL_TUNABLE_S64 = 9, ++}; ++ ++enum phy_tunable_id { ++ ETHTOOL_PHY_ID_UNSPEC = 0, ++ ETHTOOL_PHY_DOWNSHIFT = 1, ++ __ETHTOOL_PHY_TUNABLE_COUNT = 2, ++}; ++ ++struct ethtool_gstrings { ++ __u32 cmd; ++ __u32 string_set; ++ __u32 len; ++ __u8 data[0]; ++}; ++ ++struct ethtool_sset_info { ++ __u32 cmd; ++ __u32 reserved; ++ __u64 sset_mask; ++ __u32 data[0]; ++}; ++ ++struct ethtool_perm_addr { ++ __u32 cmd; ++ __u32 size; ++ __u8 data[0]; ++}; ++ ++enum ethtool_flags { ++ ETH_FLAG_TXVLAN = 128, ++ ETH_FLAG_RXVLAN = 256, ++ ETH_FLAG_LRO = 32768, ++ ETH_FLAG_NTUPLE = 134217728, ++ ETH_FLAG_RXHASH = 268435456, ++}; ++ ++struct ethtool_rxfh { ++ __u32 cmd; ++ __u32 rss_context; ++ __u32 indir_size; ++ __u32 key_size; ++ __u8 hfunc; ++ __u8 rsvd8[3]; ++ __u32 rsvd32; ++ __u32 rss_config[0]; ++}; ++ ++struct ethtool_get_features_block { ++ __u32 available; ++ __u32 requested; ++ __u32 active; ++ __u32 never_changed; ++}; ++ ++struct ethtool_gfeatures { ++ __u32 cmd; ++ __u32 size; ++ struct ethtool_get_features_block features[0]; ++}; ++ ++struct ethtool_set_features_block { ++ __u32 valid; ++ __u32 requested; ++}; ++ ++struct ethtool_sfeatures { ++ __u32 cmd; ++ __u32 size; ++ struct ethtool_set_features_block features[0]; ++}; ++ ++enum ethtool_sfeatures_retval_bits { ++ ETHTOOL_F_UNSUPPORTED__BIT = 0, ++ ETHTOOL_F_WISH__BIT = 1, ++ ETHTOOL_F_COMPAT__BIT = 2, ++}; ++ ++struct ethtool_per_queue_op { ++ __u32 cmd; ++ __u32 sub_command; ++ __u32 queue_mask[128]; ++ char data[0]; ++}; ++ ++enum { ++ ETH_RSS_HASH_TOP_BIT = 0, ++ ETH_RSS_HASH_XOR_BIT = 1, ++ ETH_RSS_HASH_CRC32_BIT = 2, ++ ETH_RSS_HASH_FUNCS_COUNT = 3, ++}; ++ ++struct ethtool_link_usettings { ++ struct ethtool_link_settings base; ++ struct { ++ __u32 supported[2]; ++ __u32 advertising[2]; ++ __u32 lp_advertising[2]; ++ } link_modes; ++}; ++ ++struct netdev_hw_addr { ++ struct list_head list; ++ unsigned char addr[32]; ++ unsigned char type; ++ bool global_use; ++ int sync_cnt; ++ int refcount; ++ int synced; ++ struct callback_head callback_head; ++}; ++ ++enum { ++ NDA_UNSPEC = 0, ++ NDA_DST = 1, ++ NDA_LLADDR = 2, ++ NDA_CACHEINFO = 3, ++ NDA_PROBES = 4, ++ NDA_VLAN = 5, ++ NDA_PORT = 6, ++ NDA_VNI = 7, ++ NDA_IFINDEX = 8, ++ NDA_MASTER = 9, ++ NDA_LINK_NETNSID = 10, ++ NDA_SRC_VNI = 11, ++ __NDA_MAX = 12, ++}; ++ ++struct nda_cacheinfo { ++ __u32 ndm_confirmed; ++ __u32 ndm_used; ++ __u32 ndm_updated; ++ __u32 ndm_refcnt; ++}; ++ ++struct ndt_stats { ++ __u64 ndts_allocs; ++ __u64 ndts_destroys; ++ __u64 ndts_hash_grows; ++ __u64 ndts_res_failed; ++ __u64 ndts_lookups; ++ __u64 ndts_hits; ++ __u64 ndts_rcv_probes_mcast; ++ __u64 ndts_rcv_probes_ucast; ++ __u64 ndts_periodic_gc_runs; ++ __u64 ndts_forced_gc_runs; ++ __u64 ndts_table_fulls; ++}; ++ ++enum { ++ NDTPA_UNSPEC = 0, ++ NDTPA_IFINDEX = 1, ++ NDTPA_REFCNT = 2, ++ NDTPA_REACHABLE_TIME = 3, ++ NDTPA_BASE_REACHABLE_TIME = 4, ++ NDTPA_RETRANS_TIME = 5, ++ NDTPA_GC_STALETIME = 6, ++ NDTPA_DELAY_PROBE_TIME = 7, ++ NDTPA_QUEUE_LEN = 8, ++ NDTPA_APP_PROBES = 9, ++ NDTPA_UCAST_PROBES = 10, ++ NDTPA_MCAST_PROBES = 11, ++ NDTPA_ANYCAST_DELAY = 12, ++ NDTPA_PROXY_DELAY = 13, ++ NDTPA_PROXY_QLEN = 14, ++ NDTPA_LOCKTIME = 15, ++ NDTPA_QUEUE_LENBYTES = 16, ++ NDTPA_MCAST_REPROBES = 17, ++ NDTPA_PAD = 18, ++ __NDTPA_MAX = 19, ++}; ++ ++struct ndtmsg { ++ __u8 ndtm_family; ++ __u8 ndtm_pad1; ++ __u16 ndtm_pad2; ++}; ++ ++struct ndt_config { ++ __u16 ndtc_key_len; ++ __u16 ndtc_entry_size; ++ __u32 ndtc_entries; ++ __u32 ndtc_last_flush; ++ __u32 ndtc_last_rand; ++ __u32 ndtc_hash_rnd; ++ __u32 ndtc_hash_mask; ++ __u32 ndtc_hash_chain_gc; ++ __u32 ndtc_proxy_qlen; ++}; ++ ++enum { ++ NDTA_UNSPEC = 0, ++ NDTA_NAME = 1, ++ NDTA_THRESH1 = 2, ++ NDTA_THRESH2 = 3, ++ NDTA_THRESH3 = 4, ++ NDTA_CONFIG = 5, ++ NDTA_PARMS = 6, ++ NDTA_STATS = 7, ++ NDTA_GC_INTERVAL = 8, ++ NDTA_PAD = 9, ++ __NDTA_MAX = 10, ++}; ++ ++enum { ++ NEIGH_ARP_TABLE = 0, ++ NEIGH_ND_TABLE = 1, ++ NEIGH_DN_TABLE = 2, ++ NEIGH_NR_TABLES = 3, ++ NEIGH_LINK_TABLE = 3, ++}; ++ ++struct neigh_seq_state { ++ struct seq_net_private p; ++ struct neigh_table *tbl; ++ struct neigh_hash_table *nht; ++ void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); ++ unsigned int bucket; ++ unsigned int flags; ++}; ++ ++struct neighbour_cb { ++ long unsigned int sched_next; ++ unsigned int flags; ++}; ++ ++enum netevent_notif_type { ++ NETEVENT_NEIGH_UPDATE = 1, ++ NETEVENT_REDIRECT = 2, ++ NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, ++ NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, ++ NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, ++ NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, ++}; ++ ++struct neigh_sysctl_table { ++ struct ctl_table_header *sysctl_header; ++ struct ctl_table neigh_vars[21]; ++}; ++ ++struct netlink_dump_control { ++ int (*start)(struct netlink_callback *); ++ int (*dump)(struct sk_buff *, struct netlink_callback *); ++ int (*done)(struct netlink_callback *); ++ void *data; ++ struct module *module; ++ u16 min_dump_alloc; ++}; ++ ++struct rtnl_link_stats { ++ __u32 rx_packets; ++ __u32 tx_packets; ++ __u32 rx_bytes; ++ __u32 tx_bytes; ++ __u32 rx_errors; ++ __u32 tx_errors; ++ __u32 rx_dropped; ++ __u32 tx_dropped; ++ __u32 multicast; ++ __u32 collisions; ++ __u32 rx_length_errors; ++ __u32 rx_over_errors; ++ __u32 rx_crc_errors; ++ __u32 rx_frame_errors; ++ __u32 rx_fifo_errors; ++ __u32 rx_missed_errors; ++ __u32 tx_aborted_errors; ++ __u32 tx_carrier_errors; ++ __u32 tx_fifo_errors; ++ __u32 tx_heartbeat_errors; ++ __u32 tx_window_errors; ++ __u32 rx_compressed; ++ __u32 tx_compressed; ++ __u32 rx_nohandler; ++}; ++ ++struct rtnl_link_ifmap { ++ __u64 mem_start; ++ __u64 mem_end; ++ __u64 base_addr; ++ __u16 irq; ++ __u8 dma; ++ __u8 port; ++}; ++ ++enum { ++ IFLA_UNSPEC = 0, ++ IFLA_ADDRESS = 1, ++ IFLA_BROADCAST = 2, ++ IFLA_IFNAME = 3, ++ IFLA_MTU = 4, ++ IFLA_LINK = 5, ++ IFLA_QDISC = 6, ++ IFLA_STATS = 7, ++ IFLA_COST = 8, ++ IFLA_PRIORITY = 9, ++ IFLA_MASTER = 10, ++ IFLA_WIRELESS = 11, ++ IFLA_PROTINFO = 12, ++ IFLA_TXQLEN = 13, ++ IFLA_MAP = 14, ++ IFLA_WEIGHT = 15, ++ IFLA_OPERSTATE = 16, ++ IFLA_LINKMODE = 17, ++ IFLA_LINKINFO = 18, ++ IFLA_NET_NS_PID = 19, ++ IFLA_IFALIAS = 20, ++ IFLA_NUM_VF = 21, ++ IFLA_VFINFO_LIST = 22, ++ IFLA_STATS64 = 23, ++ IFLA_VF_PORTS = 24, ++ IFLA_PORT_SELF = 25, ++ IFLA_AF_SPEC = 26, ++ IFLA_GROUP = 27, ++ IFLA_NET_NS_FD = 28, ++ IFLA_EXT_MASK = 29, ++ IFLA_PROMISCUITY = 30, ++ IFLA_NUM_TX_QUEUES = 31, ++ IFLA_NUM_RX_QUEUES = 32, ++ IFLA_CARRIER = 33, ++ IFLA_PHYS_PORT_ID = 34, ++ IFLA_CARRIER_CHANGES = 35, ++ IFLA_PHYS_SWITCH_ID = 36, ++ IFLA_LINK_NETNSID = 37, ++ IFLA_PHYS_PORT_NAME = 38, ++ IFLA_PROTO_DOWN = 39, ++ IFLA_GSO_MAX_SEGS = 40, ++ IFLA_GSO_MAX_SIZE = 41, ++ IFLA_PAD = 42, ++ IFLA_XDP = 43, ++ IFLA_EVENT = 44, ++ IFLA_NEW_NETNSID = 45, ++ IFLA_IF_NETNSID = 46, ++ IFLA_CARRIER_UP_COUNT = 47, ++ IFLA_CARRIER_DOWN_COUNT = 48, ++ IFLA_NEW_IFINDEX = 49, ++ IFLA_MIN_MTU = 50, ++ IFLA_MAX_MTU = 51, ++ __IFLA_MAX = 52, ++}; ++ ++enum { ++ IFLA_BRPORT_UNSPEC = 0, ++ IFLA_BRPORT_STATE = 1, ++ IFLA_BRPORT_PRIORITY = 2, ++ IFLA_BRPORT_COST = 3, ++ IFLA_BRPORT_MODE = 4, ++ IFLA_BRPORT_GUARD = 5, ++ IFLA_BRPORT_PROTECT = 6, ++ IFLA_BRPORT_FAST_LEAVE = 7, ++ IFLA_BRPORT_LEARNING = 8, ++ IFLA_BRPORT_UNICAST_FLOOD = 9, ++ IFLA_BRPORT_PROXYARP = 10, ++ IFLA_BRPORT_LEARNING_SYNC = 11, ++ IFLA_BRPORT_PROXYARP_WIFI = 12, ++ IFLA_BRPORT_ROOT_ID = 13, ++ IFLA_BRPORT_BRIDGE_ID = 14, ++ IFLA_BRPORT_DESIGNATED_PORT = 15, ++ IFLA_BRPORT_DESIGNATED_COST = 16, ++ IFLA_BRPORT_ID = 17, ++ IFLA_BRPORT_NO = 18, ++ IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, ++ IFLA_BRPORT_CONFIG_PENDING = 20, ++ IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, ++ IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, ++ IFLA_BRPORT_HOLD_TIMER = 23, ++ IFLA_BRPORT_FLUSH = 24, ++ IFLA_BRPORT_MULTICAST_ROUTER = 25, ++ IFLA_BRPORT_PAD = 26, ++ IFLA_BRPORT_MCAST_FLOOD = 27, ++ IFLA_BRPORT_MCAST_TO_UCAST = 28, ++ IFLA_BRPORT_VLAN_TUNNEL = 29, ++ IFLA_BRPORT_BCAST_FLOOD = 30, ++ IFLA_BRPORT_GROUP_FWD_MASK = 31, ++ IFLA_BRPORT_NEIGH_SUPPRESS = 32, ++ IFLA_BRPORT_ISOLATED = 33, ++ IFLA_BRPORT_BACKUP_PORT = 34, ++ __IFLA_BRPORT_MAX = 35, ++}; ++ ++enum { ++ IFLA_INFO_UNSPEC = 0, ++ IFLA_INFO_KIND = 1, ++ IFLA_INFO_DATA = 2, ++ IFLA_INFO_XSTATS = 3, ++ IFLA_INFO_SLAVE_KIND = 4, ++ IFLA_INFO_SLAVE_DATA = 5, ++ __IFLA_INFO_MAX = 6, ++}; ++ ++enum { ++ IFLA_VF_INFO_UNSPEC = 0, ++ IFLA_VF_INFO = 1, ++ __IFLA_VF_INFO_MAX = 2, ++}; ++ ++enum { ++ IFLA_VF_UNSPEC = 0, ++ IFLA_VF_MAC = 1, ++ IFLA_VF_VLAN = 2, ++ IFLA_VF_TX_RATE = 3, ++ IFLA_VF_SPOOFCHK = 4, ++ IFLA_VF_LINK_STATE = 5, ++ IFLA_VF_RATE = 6, ++ IFLA_VF_RSS_QUERY_EN = 7, ++ IFLA_VF_STATS = 8, ++ IFLA_VF_TRUST = 9, ++ IFLA_VF_IB_NODE_GUID = 10, ++ IFLA_VF_IB_PORT_GUID = 11, ++ IFLA_VF_VLAN_LIST = 12, ++ __IFLA_VF_MAX = 13, ++}; ++ ++struct ifla_vf_mac { ++ __u32 vf; ++ __u8 mac[32]; ++}; ++ ++struct ifla_vf_vlan { ++ __u32 vf; ++ __u32 vlan; ++ __u32 qos; ++}; ++ ++enum { ++ IFLA_VF_VLAN_INFO_UNSPEC = 0, ++ IFLA_VF_VLAN_INFO = 1, ++ __IFLA_VF_VLAN_INFO_MAX = 2, ++}; ++ ++struct ifla_vf_vlan_info { ++ __u32 vf; ++ __u32 vlan; ++ __u32 qos; ++ __be16 vlan_proto; ++}; ++ ++struct ifla_vf_tx_rate { ++ __u32 vf; ++ __u32 rate; ++}; ++ ++struct ifla_vf_rate { ++ __u32 vf; ++ __u32 min_tx_rate; ++ __u32 max_tx_rate; ++}; ++ ++struct ifla_vf_spoofchk { ++ __u32 vf; ++ __u32 setting; ++}; ++ ++struct ifla_vf_guid { ++ __u32 vf; ++ __u64 guid; ++}; ++ ++struct ifla_vf_link_state { ++ __u32 vf; ++ __u32 link_state; ++}; ++ ++struct ifla_vf_rss_query_en { ++ __u32 vf; ++ __u32 setting; ++}; ++ ++enum { ++ IFLA_VF_STATS_RX_PACKETS = 0, ++ IFLA_VF_STATS_TX_PACKETS = 1, ++ IFLA_VF_STATS_RX_BYTES = 2, ++ IFLA_VF_STATS_TX_BYTES = 3, ++ IFLA_VF_STATS_BROADCAST = 4, ++ IFLA_VF_STATS_MULTICAST = 5, ++ IFLA_VF_STATS_PAD = 6, ++ IFLA_VF_STATS_RX_DROPPED = 7, ++ IFLA_VF_STATS_TX_DROPPED = 8, ++ __IFLA_VF_STATS_MAX = 9, ++}; ++ ++struct ifla_vf_trust { ++ __u32 vf; ++ __u32 setting; ++}; ++ ++enum { ++ IFLA_VF_PORT_UNSPEC = 0, ++ IFLA_VF_PORT = 1, ++ __IFLA_VF_PORT_MAX = 2, ++}; ++ ++enum { ++ IFLA_PORT_UNSPEC = 0, ++ IFLA_PORT_VF = 1, ++ IFLA_PORT_PROFILE = 2, ++ IFLA_PORT_VSI_TYPE = 3, ++ IFLA_PORT_INSTANCE_UUID = 4, ++ IFLA_PORT_HOST_UUID = 5, ++ IFLA_PORT_REQUEST = 6, ++ IFLA_PORT_RESPONSE = 7, ++ __IFLA_PORT_MAX = 8, ++}; ++ ++struct if_stats_msg { ++ __u8 family; ++ __u8 pad1; ++ __u16 pad2; ++ __u32 ifindex; ++ __u32 filter_mask; ++}; ++ ++enum { ++ IFLA_STATS_UNSPEC = 0, ++ IFLA_STATS_LINK_64 = 1, ++ IFLA_STATS_LINK_XSTATS = 2, ++ IFLA_STATS_LINK_XSTATS_SLAVE = 3, ++ IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, ++ IFLA_STATS_AF_SPEC = 5, ++ __IFLA_STATS_MAX = 6, ++}; ++ ++enum { ++ IFLA_OFFLOAD_XSTATS_UNSPEC = 0, ++ IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, ++ __IFLA_OFFLOAD_XSTATS_MAX = 2, ++}; ++ ++enum { ++ XDP_ATTACHED_NONE = 0, ++ XDP_ATTACHED_DRV = 1, ++ XDP_ATTACHED_SKB = 2, ++ XDP_ATTACHED_HW = 3, ++ XDP_ATTACHED_MULTI = 4, ++}; ++ ++enum { ++ IFLA_XDP_UNSPEC = 0, ++ IFLA_XDP_FD = 1, ++ IFLA_XDP_ATTACHED = 2, ++ IFLA_XDP_FLAGS = 3, ++ IFLA_XDP_PROG_ID = 4, ++ IFLA_XDP_DRV_PROG_ID = 5, ++ IFLA_XDP_SKB_PROG_ID = 6, ++ IFLA_XDP_HW_PROG_ID = 7, ++ __IFLA_XDP_MAX = 8, ++}; ++ ++enum { ++ IFLA_EVENT_NONE = 0, ++ IFLA_EVENT_REBOOT = 1, ++ IFLA_EVENT_FEATURES = 2, ++ IFLA_EVENT_BONDING_FAILOVER = 3, ++ IFLA_EVENT_NOTIFY_PEERS = 4, ++ IFLA_EVENT_IGMP_RESEND = 5, ++ IFLA_EVENT_BONDING_OPTIONS = 6, ++}; ++ ++enum { ++ IFLA_BRIDGE_FLAGS = 0, ++ IFLA_BRIDGE_MODE = 1, ++ IFLA_BRIDGE_VLAN_INFO = 2, ++ IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, ++ __IFLA_BRIDGE_MAX = 4, ++}; ++ ++enum { ++ BR_MCAST_DIR_RX = 0, ++ BR_MCAST_DIR_TX = 1, ++ BR_MCAST_DIR_SIZE = 2, ++}; ++ ++enum rtattr_type_t { ++ RTA_UNSPEC = 0, ++ RTA_DST = 1, ++ RTA_SRC = 2, ++ RTA_IIF = 3, ++ RTA_OIF = 4, ++ RTA_GATEWAY = 5, ++ RTA_PRIORITY = 6, ++ RTA_PREFSRC = 7, ++ RTA_METRICS = 8, ++ RTA_MULTIPATH = 9, ++ RTA_PROTOINFO = 10, ++ RTA_FLOW = 11, ++ RTA_CACHEINFO = 12, ++ RTA_SESSION = 13, ++ RTA_MP_ALGO = 14, ++ RTA_TABLE = 15, ++ RTA_MARK = 16, ++ RTA_MFC_STATS = 17, ++ RTA_VIA = 18, ++ RTA_NEWDST = 19, ++ RTA_PREF = 20, ++ RTA_ENCAP_TYPE = 21, ++ RTA_ENCAP = 22, ++ RTA_EXPIRES = 23, ++ RTA_PAD = 24, ++ RTA_UID = 25, ++ RTA_TTL_PROPAGATE = 26, ++ RTA_IP_PROTO = 27, ++ RTA_SPORT = 28, ++ RTA_DPORT = 29, ++ __RTA_MAX = 30, ++}; ++ ++struct rta_cacheinfo { ++ __u32 rta_clntref; ++ __u32 rta_lastuse; ++ __s32 rta_expires; ++ __u32 rta_error; ++ __u32 rta_used; ++ __u32 rta_id; ++ __u32 rta_ts; ++ __u32 rta_tsage; ++}; ++ ++struct ifinfomsg { ++ unsigned char ifi_family; ++ unsigned char __ifi_pad; ++ short unsigned int ifi_type; ++ int ifi_index; ++ unsigned int ifi_flags; ++ unsigned int ifi_change; ++}; ++ ++typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); ++ ++typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); ++ ++struct rtnl_af_ops { ++ struct list_head list; ++ int family; ++ int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); ++ size_t (*get_link_af_size)(const struct net_device *, u32); ++ int (*validate_link_af)(const struct net_device *, const struct nlattr *); ++ int (*set_link_af)(struct net_device *, const struct nlattr *); ++ int (*fill_stats_af)(struct sk_buff *, const struct net_device *); ++ size_t (*get_stats_af_size)(const struct net_device *); ++}; ++ ++struct rtnl_link { ++ rtnl_doit_func doit; ++ rtnl_dumpit_func dumpit; ++ struct module *owner; ++ unsigned int flags; ++ struct callback_head rcu; ++}; ++ ++enum { ++ IF_LINK_MODE_DEFAULT = 0, ++ IF_LINK_MODE_DORMANT = 1, ++}; ++ ++enum lw_bits { ++ LW_URGENT = 0, ++}; ++ ++struct seg6_pernet_data { ++ struct mutex lock; ++ struct in6_addr *tun_src; ++}; ++ ++enum bpf_adj_room_mode { ++ BPF_ADJ_ROOM_NET = 0, ++}; ++ ++enum bpf_hdr_start_off { ++ BPF_HDR_START_MAC = 0, ++ BPF_HDR_START_NET = 1, ++}; ++ ++struct bpf_tunnel_key { ++ __u32 tunnel_id; ++ union { ++ __u32 remote_ipv4; ++ __u32 remote_ipv6[4]; ++ }; ++ __u8 tunnel_tos; ++ __u8 tunnel_ttl; ++ __u16 tunnel_ext; ++ __u32 tunnel_label; ++}; ++ ++struct bpf_xfrm_state { ++ __u32 reqid; ++ __u32 spi; ++ __u16 family; ++ __u16 ext; ++ union { ++ __u32 remote_ipv4; ++ __u32 remote_ipv6[4]; ++ }; ++}; ++ ++struct bpf_sock { ++ __u32 bound_dev_if; ++ __u32 family; ++ __u32 type; ++ __u32 protocol; ++ __u32 mark; ++ __u32 priority; ++ __u32 src_ip4; ++ __u32 src_ip6[4]; ++ __u32 src_port; ++}; ++ ++enum sk_action { ++ SK_DROP = 0, ++ SK_PASS = 1, ++}; ++ ++struct sk_reuseport_md { ++ void *data; ++ void *data_end; ++ __u32 len; ++ __u32 eth_protocol; ++ __u32 ip_protocol; ++ __u32 bind_inany; ++ __u32 hash; ++}; ++ ++struct bpf_sock_addr { ++ __u32 user_family; ++ __u32 user_ip4; ++ __u32 user_ip6[4]; ++ __u32 user_port; ++ __u32 family; ++ __u32 type; ++ __u32 protocol; ++ __u32 msg_src_ip4; ++ __u32 msg_src_ip6[4]; ++}; ++ ++struct bpf_sock_ops { ++ __u32 op; ++ union { ++ __u32 args[4]; ++ __u32 reply; ++ __u32 replylong[4]; ++ }; ++ __u32 family; ++ __u32 remote_ip4; ++ __u32 local_ip4; ++ __u32 remote_ip6[4]; ++ __u32 local_ip6[4]; ++ __u32 remote_port; ++ __u32 local_port; ++ __u32 is_fullsock; ++ __u32 snd_cwnd; ++ __u32 srtt_us; ++ __u32 bpf_sock_ops_cb_flags; ++ __u32 state; ++ __u32 rtt_min; ++ __u32 snd_ssthresh; ++ __u32 rcv_nxt; ++ __u32 snd_nxt; ++ __u32 snd_una; ++ __u32 mss_cache; ++ __u32 ecn_flags; ++ __u32 rate_delivered; ++ __u32 rate_interval_us; ++ __u32 packets_out; ++ __u32 retrans_out; ++ __u32 total_retrans; ++ __u32 segs_in; ++ __u32 data_segs_in; ++ __u32 segs_out; ++ __u32 data_segs_out; ++ __u32 lost_out; ++ __u32 sacked_out; ++ __u32 sk_txhash; ++ __u64 bytes_received; ++ __u64 bytes_acked; ++}; ++ ++enum { ++ BPF_SOCK_OPS_VOID = 0, ++ BPF_SOCK_OPS_TIMEOUT_INIT = 1, ++ BPF_SOCK_OPS_RWND_INIT = 2, ++ BPF_SOCK_OPS_TCP_CONNECT_CB = 3, ++ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, ++ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, ++ BPF_SOCK_OPS_NEEDS_ECN = 6, ++ BPF_SOCK_OPS_BASE_RTT = 7, ++ BPF_SOCK_OPS_RTO_CB = 8, ++ BPF_SOCK_OPS_RETRANS_CB = 9, ++ BPF_SOCK_OPS_STATE_CB = 10, ++ BPF_SOCK_OPS_TCP_LISTEN_CB = 11, ++}; ++ ++enum { ++ BPF_FIB_LKUP_RET_SUCCESS = 0, ++ BPF_FIB_LKUP_RET_BLACKHOLE = 1, ++ BPF_FIB_LKUP_RET_UNREACHABLE = 2, ++ BPF_FIB_LKUP_RET_PROHIBIT = 3, ++ BPF_FIB_LKUP_RET_NOT_FWDED = 4, ++ BPF_FIB_LKUP_RET_FWD_DISABLED = 5, ++ BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, ++ BPF_FIB_LKUP_RET_NO_NEIGH = 7, ++ BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, ++}; ++ ++struct bpf_fib_lookup { ++ __u8 family; ++ __u8 l4_protocol; ++ __be16 sport; ++ __be16 dport; ++ __u16 tot_len; ++ __u32 ifindex; ++ union { ++ __u8 tos; ++ __be32 flowinfo; ++ __u32 rt_metric; ++ }; ++ union { ++ __be32 ipv4_src; ++ __u32 ipv6_src[4]; ++ }; ++ union { ++ __be32 ipv4_dst; ++ __u32 ipv6_dst[4]; ++ }; ++ __be16 h_vlan_proto; ++ __be16 h_vlan_TCI; ++ __u8 smac[6]; ++ __u8 dmac[6]; ++}; ++ ++struct xsk_queue; ++ ++struct xdp_umem_props { ++ u64 chunk_mask; ++ u64 size; ++}; ++ ++struct xdp_umem_page; ++ ++struct xdp_umem { ++ struct xsk_queue *fq; ++ struct xsk_queue *cq; ++ struct xdp_umem_page *pages; ++ struct xdp_umem_props props; ++ u32 headroom; ++ u32 chunk_size_nohr; ++ struct user_struct *user; ++ long unsigned int address; ++ refcount_t users; ++ struct work_struct work; ++ struct page **pgs; ++ u32 npgs; ++ struct net_device *dev; ++ u16 queue_id; ++ bool zc; ++ spinlock_t xsk_list_lock; ++ struct list_head xsk_list; ++}; ++ ++enum rt_class_t { ++ RT_TABLE_UNSPEC = 0, ++ RT_TABLE_COMPAT = 252, ++ RT_TABLE_DEFAULT = 253, ++ RT_TABLE_MAIN = 254, ++ RT_TABLE_LOCAL = 255, ++ RT_TABLE_MAX = 4294967295, ++}; ++ ++struct bpf_skb_data_end { ++ struct qdisc_skb_cb qdisc_cb; ++ void *data_meta; ++ void *data_end; ++}; ++ ++struct sk_msg_buff { ++ void *data; ++ void *data_end; ++ __u32 apply_bytes; ++ __u32 cork_bytes; ++ int sg_copybreak; ++ int sg_start; ++ int sg_curr; ++ int sg_end; ++ struct scatterlist sg_data[16]; ++ bool sg_copy[16]; ++ __u32 flags; ++ struct sock *sk_redir; ++ struct sock *sk; ++ struct sk_buff *skb; ++ struct list_head list; ++}; ++ ++typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); ++ ++struct fib_nh_exception { ++ struct fib_nh_exception *fnhe_next; ++ int fnhe_genid; ++ __be32 fnhe_daddr; ++ u32 fnhe_pmtu; ++ bool fnhe_mtu_locked; ++ __be32 fnhe_gw; ++ long unsigned int fnhe_expires; ++ struct rtable *fnhe_rth_input; ++ struct rtable *fnhe_rth_output; ++ long unsigned int fnhe_stamp; ++ struct callback_head rcu; ++}; ++ ++struct fnhe_hash_bucket { ++ struct fib_nh_exception *chain; ++}; ++ ++struct fib_info; ++ ++struct fib_nh { ++ struct net_device *nh_dev; ++ struct hlist_node nh_hash; ++ struct fib_info *nh_parent; ++ unsigned int nh_flags; ++ unsigned char nh_scope; ++ int nh_weight; ++ atomic_t nh_upper_bound; ++ __u32 nh_tclassid; ++ int nh_oif; ++ __be32 nh_gw; ++ __be32 nh_saddr; ++ int nh_saddr_genid; ++ struct rtable **nh_pcpu_rth_output; ++ struct rtable *nh_rth_input; ++ struct fnhe_hash_bucket *nh_exceptions; ++ struct lwtunnel_state *nh_lwtstate; ++}; ++ ++struct fib_info { ++ struct hlist_node fib_hash; ++ struct hlist_node fib_lhash; ++ struct net *fib_net; ++ int fib_treeref; ++ refcount_t fib_clntref; ++ unsigned int fib_flags; ++ unsigned char fib_dead; ++ unsigned char fib_protocol; ++ unsigned char fib_scope; ++ unsigned char fib_type; ++ __be32 fib_prefsrc; ++ u32 fib_tb_id; ++ u32 fib_priority; ++ struct dst_metrics *fib_metrics; ++ int fib_nhs; ++ struct callback_head rcu; ++ struct fib_nh fib_nh[0]; ++}; ++ ++struct fib_result { ++ __be32 prefix; ++ unsigned char prefixlen; ++ unsigned char nh_sel; ++ unsigned char type; ++ unsigned char scope; ++ u32 tclassid; ++ struct fib_info *fi; ++ struct fib_table *table; ++ struct hlist_head *fa_head; ++}; ++ ++struct tcp_skb_cb { ++ __u32 seq; ++ __u32 end_seq; ++ union { ++ __u32 tcp_tw_isn; ++ struct { ++ u16 tcp_gso_segs; ++ u16 tcp_gso_size; ++ }; ++ }; ++ __u8 tcp_flags; ++ __u8 sacked; ++ __u8 ip_dsfield; ++ __u8 txstamp_ack: 1; ++ __u8 eor: 1; ++ __u8 has_rxtstamp: 1; ++ __u8 unused: 5; ++ __u32 ack_seq; ++ union { ++ struct { ++ __u32 in_flight: 30; ++ __u32 is_app_limited: 1; ++ __u32 unused: 1; ++ __u32 delivered; ++ u64 first_tx_mstamp; ++ u64 delivered_mstamp; ++ } tx; ++ union { ++ struct inet_skb_parm h4; ++ struct inet6_skb_parm h6; ++ } header; ++ struct { ++ __u32 flags; ++ struct sock *sk_redir; ++ void *data_end; ++ } bpf; ++ }; ++}; ++ ++struct _bpf_dtab_netdev { ++ struct net_device *dev; ++}; ++ ++struct xdp_umem_page { ++ void *addr; ++ dma_addr_t dma; ++}; ++ ++struct xdp_sock { ++ struct sock sk; ++ struct xsk_queue *rx; ++ struct net_device *dev; ++ struct xdp_umem *umem; ++ struct list_head flush_node; ++ u16 queue_id; ++ long: 48; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct xsk_queue *tx; ++ struct list_head list; ++ bool zc; ++ struct mutex mutex; ++ spinlock_t tx_completion_lock; ++ u64 rx_dropped; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct ipv6_sr_hdr { ++ __u8 nexthdr; ++ __u8 hdrlen; ++ __u8 type; ++ __u8 segments_left; ++ __u8 first_segment; ++ __u8 flags; ++ __u16 tag; ++ struct in6_addr segments[0]; ++}; ++ ++struct seg6_bpf_srh_state { ++ struct ipv6_sr_hdr *srh; ++ u16 hdrlen; ++ bool valid; ++}; ++ ++struct bpf_scratchpad { ++ union { ++ __be32 diff[128]; ++ u8 buff[512]; ++ }; ++}; ++ ++struct sk_reuseport_kern { ++ struct sk_buff *skb; ++ struct sock *sk; ++ struct sock *selected_sk; ++ void *data_end; ++ u32 hash; ++ u32 reuseport_id; ++ bool bind_inany; ++}; ++ ++struct bpf_dtab_netdev___2; ++ ++struct bpf_cpu_map_entry___2; ++ ++struct sock_diag_req { ++ __u8 sdiag_family; ++ __u8 sdiag_protocol; ++}; ++ ++struct sock_diag_handler { ++ __u8 family; ++ int (*dump)(struct sk_buff *, struct nlmsghdr *); ++ int (*get_info)(struct sk_buff *, struct sock *); ++ int (*destroy)(struct sk_buff *, struct nlmsghdr *); ++}; ++ ++struct broadcast_sk { ++ struct sock *sk; ++ struct work_struct work; ++}; ++ ++typedef int gifconf_func_t(struct net_device *, char *, int, int); ++ ++struct hwtstamp_config { ++ int flags; ++ int tx_type; ++ int rx_filter; ++}; ++ ++enum hwtstamp_tx_types { ++ HWTSTAMP_TX_OFF = 0, ++ HWTSTAMP_TX_ON = 1, ++ HWTSTAMP_TX_ONESTEP_SYNC = 2, ++}; ++ ++enum hwtstamp_rx_filters { ++ HWTSTAMP_FILTER_NONE = 0, ++ HWTSTAMP_FILTER_ALL = 1, ++ HWTSTAMP_FILTER_SOME = 2, ++ HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, ++ HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, ++ HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, ++ HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, ++ HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, ++ HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, ++ HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, ++ HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, ++ HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, ++ HWTSTAMP_FILTER_PTP_V2_EVENT = 12, ++ HWTSTAMP_FILTER_PTP_V2_SYNC = 13, ++ HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, ++ HWTSTAMP_FILTER_NTP_ALL = 15, ++}; ++ ++struct tso_t { ++ int next_frag_idx; ++ void *data; ++ size_t size; ++ u16 ip_id; ++ bool ipv6; ++ u32 tcp_seq; ++}; ++ ++struct fib_notifier_info { ++ struct net *net; ++ int family; ++ struct netlink_ext_ack *extack; ++}; ++ ++enum fib_event_type { ++ FIB_EVENT_ENTRY_REPLACE = 0, ++ FIB_EVENT_ENTRY_APPEND = 1, ++ FIB_EVENT_ENTRY_ADD = 2, ++ FIB_EVENT_ENTRY_DEL = 3, ++ FIB_EVENT_RULE_ADD = 4, ++ FIB_EVENT_RULE_DEL = 5, ++ FIB_EVENT_NH_ADD = 6, ++ FIB_EVENT_NH_DEL = 7, ++ FIB_EVENT_VIF_ADD = 8, ++ FIB_EVENT_VIF_DEL = 9, ++}; ++ ++struct zero_copy_allocator { ++ void (*free)(struct zero_copy_allocator *, long unsigned int); ++}; ++ ++struct xdp_attachment_info { ++ struct bpf_prog *prog; ++ u32 flags; ++}; ++ ++struct pp_alloc_cache { ++ u32 count; ++ void *cache[128]; ++}; ++ ++struct page_pool_params { ++ unsigned int flags; ++ unsigned int order; ++ unsigned int pool_size; ++ int nid; ++ struct device *dev; ++ enum dma_data_direction dma_dir; ++}; ++ ++struct page_pool { ++ struct callback_head rcu; ++ struct page_pool_params p; ++ long: 64; ++ long: 64; ++ struct pp_alloc_cache alloc; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ struct ptr_ring ring; ++}; ++ ++struct xdp_mem_allocator { ++ struct xdp_mem_info mem; ++ union { ++ void *allocator; ++ struct page_pool *page_pool; ++ struct zero_copy_allocator *zc_alloc; ++ }; ++ struct rhash_head node; ++ struct callback_head rcu; ++}; ++ ++struct rx_queue_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct netdev_rx_queue *, char *); ++ ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); ++}; ++ ++struct netdev_queue_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct netdev_queue *, char *); ++ ssize_t (*store)(struct netdev_queue *, const char *, size_t); ++}; ++ ++struct inet6_ifaddr { ++ struct in6_addr addr; ++ __u32 prefix_len; ++ __u32 rt_priority; ++ __u32 valid_lft; ++ __u32 prefered_lft; ++ refcount_t refcnt; ++ spinlock_t lock; ++ int state; ++ __u32 flags; ++ __u8 dad_probes; ++ __u8 stable_privacy_retry; ++ __u16 scope; ++ __u64 dad_nonce; ++ long unsigned int cstamp; ++ long unsigned int tstamp; ++ struct delayed_work dad_work; ++ struct inet6_dev *idev; ++ struct fib6_info *rt; ++ struct hlist_node addr_lst; ++ struct list_head if_list; ++ struct list_head tmp_list; ++ struct inet6_ifaddr *ifpub; ++ int regen_count; ++ bool tokenized; ++ struct callback_head rcu; ++ struct in6_addr peer_addr; ++}; ++ ++struct fib_rule_uid_range { ++ __u32 start; ++ __u32 end; ++}; ++ ++enum { ++ FRA_UNSPEC = 0, ++ FRA_DST = 1, ++ FRA_SRC = 2, ++ FRA_IIFNAME = 3, ++ FRA_GOTO = 4, ++ FRA_UNUSED2 = 5, ++ FRA_PRIORITY = 6, ++ FRA_UNUSED3 = 7, ++ FRA_UNUSED4 = 8, ++ FRA_UNUSED5 = 9, ++ FRA_FWMARK = 10, ++ FRA_FLOW = 11, ++ FRA_TUN_ID = 12, ++ FRA_SUPPRESS_IFGROUP = 13, ++ FRA_SUPPRESS_PREFIXLEN = 14, ++ FRA_TABLE = 15, ++ FRA_FWMASK = 16, ++ FRA_OIFNAME = 17, ++ FRA_PAD = 18, ++ FRA_L3MDEV = 19, ++ FRA_UID_RANGE = 20, ++ FRA_PROTOCOL = 21, ++ FRA_IP_PROTO = 22, ++ FRA_SPORT_RANGE = 23, ++ FRA_DPORT_RANGE = 24, ++ __FRA_MAX = 25, ++}; ++ ++enum { ++ FR_ACT_UNSPEC = 0, ++ FR_ACT_TO_TBL = 1, ++ FR_ACT_GOTO = 2, ++ FR_ACT_NOP = 3, ++ FR_ACT_RES3 = 4, ++ FR_ACT_RES4 = 5, ++ FR_ACT_BLACKHOLE = 6, ++ FR_ACT_UNREACHABLE = 7, ++ FR_ACT_PROHIBIT = 8, ++ __FR_ACT_MAX = 9, ++}; ++ ++struct fib_rule_notifier_info { ++ struct fib_notifier_info info; ++ struct fib_rule *rule; ++}; ++ ++struct trace_event_raw_kfree_skb { ++ struct trace_entry ent; ++ void *skbaddr; ++ void *location; ++ short unsigned int protocol; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_consume_skb { ++ struct trace_entry ent; ++ void *skbaddr; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_skb_copy_datagram_iovec { ++ struct trace_entry ent; ++ const void *skbaddr; ++ int len; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_kfree_skb {}; ++ ++struct trace_event_data_offsets_consume_skb {}; ++ ++struct trace_event_data_offsets_skb_copy_datagram_iovec {}; ++ ++struct trace_event_raw_net_dev_start_xmit { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ u16 queue_mapping; ++ const void *skbaddr; ++ bool vlan_tagged; ++ u16 vlan_proto; ++ u16 vlan_tci; ++ u16 protocol; ++ u8 ip_summed; ++ unsigned int len; ++ unsigned int data_len; ++ int network_offset; ++ bool transport_offset_valid; ++ int transport_offset; ++ u8 tx_flags; ++ u16 gso_size; ++ u16 gso_segs; ++ u16 gso_type; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_net_dev_xmit { ++ struct trace_entry ent; ++ void *skbaddr; ++ unsigned int len; ++ int rc; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_net_dev_template { ++ struct trace_entry ent; ++ void *skbaddr; ++ unsigned int len; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_net_dev_rx_verbose_template { ++ struct trace_entry ent; ++ u32 __data_loc_name; ++ unsigned int napi_id; ++ u16 queue_mapping; ++ const void *skbaddr; ++ bool vlan_tagged; ++ u16 vlan_proto; ++ u16 vlan_tci; ++ u16 protocol; ++ u8 ip_summed; ++ u32 hash; ++ bool l4_hash; ++ unsigned int len; ++ unsigned int data_len; ++ unsigned int truesize; ++ bool mac_header_valid; ++ int mac_header; ++ unsigned char nr_frags; ++ u16 gso_size; ++ u16 gso_type; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_net_dev_start_xmit { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_net_dev_xmit { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_net_dev_template { ++ u32 name; ++}; ++ ++struct trace_event_data_offsets_net_dev_rx_verbose_template { ++ u32 name; ++}; ++ ++struct trace_event_raw_napi_poll { ++ struct trace_entry ent; ++ struct napi_struct *napi; ++ u32 __data_loc_dev_name; ++ int work; ++ int budget; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_napi_poll { ++ u32 dev_name; ++}; ++ ++enum tcp_ca_state { ++ TCP_CA_Open = 0, ++ TCP_CA_Disorder = 1, ++ TCP_CA_CWR = 2, ++ TCP_CA_Recovery = 3, ++ TCP_CA_Loss = 4, ++}; ++ ++struct trace_event_raw_sock_rcvqueue_full { ++ struct trace_entry ent; ++ int rmem_alloc; ++ unsigned int truesize; ++ int sk_rcvbuf; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_sock_exceed_buf_limit { ++ struct trace_entry ent; ++ char name[32]; ++ long int *sysctl_mem; ++ long int allocated; ++ int sysctl_rmem; ++ int rmem_alloc; ++ int sysctl_wmem; ++ int wmem_alloc; ++ int wmem_queued; ++ int kind; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_inet_sock_set_state { ++ struct trace_entry ent; ++ const void *skaddr; ++ int oldstate; ++ int newstate; ++ __u16 sport; ++ __u16 dport; ++ __u16 family; ++ __u8 protocol; ++ __u8 saddr[4]; ++ __u8 daddr[4]; ++ __u8 saddr_v6[16]; ++ __u8 daddr_v6[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_sock_rcvqueue_full {}; ++ ++struct trace_event_data_offsets_sock_exceed_buf_limit {}; ++ ++struct trace_event_data_offsets_inet_sock_set_state {}; ++ ++struct trace_event_raw_udp_fail_queue_rcv_skb { ++ struct trace_entry ent; ++ int rc; ++ __u16 lport; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; ++ ++struct trace_event_raw_tcp_event_sk_skb { ++ struct trace_entry ent; ++ const void *skbaddr; ++ const void *skaddr; ++ __u16 sport; ++ __u16 dport; ++ __u8 saddr[4]; ++ __u8 daddr[4]; ++ __u8 saddr_v6[16]; ++ __u8 daddr_v6[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_tcp_event_sk { ++ struct trace_entry ent; ++ const void *skaddr; ++ __u16 sport; ++ __u16 dport; ++ __u8 saddr[4]; ++ __u8 daddr[4]; ++ __u8 saddr_v6[16]; ++ __u8 daddr_v6[16]; ++ __u64 sock_cookie; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_tcp_retransmit_synack { ++ struct trace_entry ent; ++ const void *skaddr; ++ const void *req; ++ __u16 sport; ++ __u16 dport; ++ __u8 saddr[4]; ++ __u8 daddr[4]; ++ __u8 saddr_v6[16]; ++ __u8 daddr_v6[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_tcp_probe { ++ struct trace_entry ent; ++ __u8 saddr[28]; ++ __u8 daddr[28]; ++ __u16 sport; ++ __u16 dport; ++ __u32 mark; ++ __u16 data_len; ++ __u32 snd_nxt; ++ __u32 snd_una; ++ __u32 snd_cwnd; ++ __u32 ssthresh; ++ __u32 snd_wnd; ++ __u32 srtt; ++ __u32 rcv_wnd; ++ __u64 sock_cookie; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_tcp_event_sk_skb {}; ++ ++struct trace_event_data_offsets_tcp_event_sk {}; ++ ++struct trace_event_data_offsets_tcp_retransmit_synack {}; ++ ++struct trace_event_data_offsets_tcp_probe {}; ++ ++struct trace_event_raw_fib_table_lookup { ++ struct trace_entry ent; ++ u32 tb_id; ++ int err; ++ int oif; ++ int iif; ++ u8 proto; ++ __u8 tos; ++ __u8 scope; ++ __u8 flags; ++ __u8 src[4]; ++ __u8 dst[4]; ++ __u8 gw[4]; ++ __u8 saddr[4]; ++ u16 sport; ++ u16 dport; ++ u32 __data_loc_name; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_fib_table_lookup { ++ u32 name; ++}; ++ ++struct trace_event_raw_qdisc_dequeue { ++ struct trace_entry ent; ++ struct Qdisc *qdisc; ++ const struct netdev_queue *txq; ++ int packets; ++ void *skbaddr; ++ int ifindex; ++ u32 handle; ++ u32 parent; ++ long unsigned int txq_state; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_qdisc_dequeue {}; ++ ++struct br_mcast_stats { ++ __u64 igmp_v1queries[2]; ++ __u64 igmp_v2queries[2]; ++ __u64 igmp_v3queries[2]; ++ __u64 igmp_leaves[2]; ++ __u64 igmp_v1reports[2]; ++ __u64 igmp_v2reports[2]; ++ __u64 igmp_v3reports[2]; ++ __u64 igmp_parse_errors; ++ __u64 mld_v1queries[2]; ++ __u64 mld_v2queries[2]; ++ __u64 mld_leaves[2]; ++ __u64 mld_v1reports[2]; ++ __u64 mld_v2reports[2]; ++ __u64 mld_parse_errors; ++ __u64 mcast_bytes[2]; ++ __u64 mcast_packets[2]; ++}; ++ ++struct br_ip { ++ union { ++ __be32 ip4; ++ struct in6_addr ip6; ++ } u; ++ __be16 proto; ++ __u16 vid; ++}; ++ ++struct bridge_id { ++ unsigned char prio[2]; ++ unsigned char addr[6]; ++}; ++ ++typedef struct bridge_id bridge_id; ++ ++struct mac_addr { ++ unsigned char addr[6]; ++}; ++ ++typedef struct mac_addr mac_addr; ++ ++typedef __u16 port_id; ++ ++struct bridge_mcast_own_query { ++ struct timer_list timer; ++ u32 startup_sent; ++}; ++ ++struct bridge_mcast_other_query { ++ struct timer_list timer; ++ long unsigned int delay_time; ++}; ++ ++struct net_bridge_port; ++ ++struct bridge_mcast_querier { ++ struct br_ip addr; ++ struct net_bridge_port *port; ++}; ++ ++struct net_bridge; ++ ++struct net_bridge_vlan_group; ++ ++struct bridge_mcast_stats; ++ ++struct net_bridge_port { ++ struct net_bridge *br; ++ struct net_device *dev; ++ struct list_head list; ++ long unsigned int flags; ++ struct net_bridge_vlan_group *vlgrp; ++ struct net_bridge_port *backup_port; ++ u8 priority; ++ u8 state; ++ u16 port_no; ++ unsigned char topology_change_ack; ++ unsigned char config_pending; ++ port_id port_id; ++ port_id designated_port; ++ bridge_id designated_root; ++ bridge_id designated_bridge; ++ u32 path_cost; ++ u32 designated_cost; ++ long unsigned int designated_age; ++ struct timer_list forward_delay_timer; ++ struct timer_list hold_timer; ++ struct timer_list message_age_timer; ++ struct kobject kobj; ++ struct callback_head rcu; ++ struct bridge_mcast_own_query ip4_own_query; ++ struct bridge_mcast_own_query ip6_own_query; ++ unsigned char multicast_router; ++ struct bridge_mcast_stats *mcast_stats; ++ struct timer_list multicast_router_timer; ++ struct hlist_head mglist; ++ struct hlist_node rlist; ++ char sysfs_name[16]; ++ struct netpoll *np; ++ int offload_fwd_mark; ++ u16 group_fwd_mask; ++ u16 backup_redirected_cnt; ++}; ++ ++struct bridge_mcast_stats { ++ struct br_mcast_stats mstats; ++ struct u64_stats_sync syncp; ++}; ++ ++struct net_bridge_mdb_htable; ++ ++struct net_bridge { ++ spinlock_t lock; ++ spinlock_t hash_lock; ++ struct list_head port_list; ++ struct net_device *dev; ++ struct pcpu_sw_netstats *stats; ++ u8 vlan_enabled; ++ u8 vlan_stats_enabled; ++ __be16 vlan_proto; ++ u16 default_pvid; ++ struct net_bridge_vlan_group *vlgrp; ++ struct rhashtable fdb_hash_tbl; ++ union { ++ struct rtable fake_rtable; ++ struct rt6_info fake_rt6_info; ++ }; ++ bool nf_call_iptables; ++ bool nf_call_ip6tables; ++ bool nf_call_arptables; ++ u16 group_fwd_mask; ++ u16 group_fwd_mask_required; ++ bridge_id designated_root; ++ bridge_id bridge_id; ++ u32 root_path_cost; ++ unsigned char topology_change; ++ unsigned char topology_change_detected; ++ u16 root_port; ++ long unsigned int max_age; ++ long unsigned int hello_time; ++ long unsigned int forward_delay; ++ long unsigned int ageing_time; ++ long unsigned int bridge_max_age; ++ long unsigned int bridge_hello_time; ++ long unsigned int bridge_forward_delay; ++ long unsigned int bridge_ageing_time; ++ u8 group_addr[6]; ++ bool group_addr_set; ++ enum { ++ BR_NO_STP = 0, ++ BR_KERNEL_STP = 1, ++ BR_USER_STP = 2, ++ } stp_enabled; ++ unsigned char multicast_router; ++ u8 multicast_disabled: 1; ++ u8 multicast_querier: 1; ++ u8 multicast_query_use_ifaddr: 1; ++ u8 has_ipv6_addr: 1; ++ u8 multicast_stats_enabled: 1; ++ u32 hash_elasticity; ++ u32 hash_max; ++ u32 multicast_last_member_count; ++ u32 multicast_startup_query_count; ++ u8 multicast_igmp_version; ++ long unsigned int multicast_last_member_interval; ++ long unsigned int multicast_membership_interval; ++ long unsigned int multicast_querier_interval; ++ long unsigned int multicast_query_interval; ++ long unsigned int multicast_query_response_interval; ++ long unsigned int multicast_startup_query_interval; ++ spinlock_t multicast_lock; ++ struct net_bridge_mdb_htable *mdb; ++ struct hlist_head router_list; ++ struct timer_list multicast_router_timer; ++ struct bridge_mcast_other_query ip4_other_query; ++ struct bridge_mcast_own_query ip4_own_query; ++ struct bridge_mcast_querier ip4_querier; ++ struct bridge_mcast_stats *mcast_stats; ++ struct bridge_mcast_other_query ip6_other_query; ++ struct bridge_mcast_own_query ip6_own_query; ++ struct bridge_mcast_querier ip6_querier; ++ u8 multicast_mld_version; ++ struct timer_list hello_timer; ++ struct timer_list tcn_timer; ++ struct timer_list topology_change_timer; ++ struct delayed_work gc_work; ++ struct kobject *ifobj; ++ u32 auto_cnt; ++ int offload_fwd_mark; ++ bool neigh_suppress_enabled; ++ bool mtu_set_by_user; ++ struct hlist_head fdb_list; ++}; ++ ++struct net_bridge_vlan_group { ++ struct rhashtable vlan_hash; ++ struct rhashtable tunnel_hash; ++ struct list_head vlan_list; ++ u16 num_vlans; ++ u16 pvid; ++}; ++ ++struct net_bridge_fdb_key { ++ mac_addr addr; ++ u16 vlan_id; ++}; ++ ++struct net_bridge_fdb_entry { ++ struct rhash_head rhnode; ++ struct net_bridge_port *dst; ++ struct net_bridge_fdb_key key; ++ struct hlist_node fdb_node; ++ unsigned char is_local: 1; ++ unsigned char is_static: 1; ++ unsigned char added_by_user: 1; ++ unsigned char added_by_external_learn: 1; ++ unsigned char offloaded: 1; ++ long: 59; ++ long: 64; ++ long: 64; ++ long unsigned int updated; ++ long unsigned int used; ++ struct callback_head rcu; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct net_bridge_mdb_htable { ++ struct hlist_head *mhash; ++ struct callback_head rcu; ++ struct net_bridge_mdb_htable *old; ++ u32 size; ++ u32 max; ++ u32 secret; ++ u32 ver; ++}; ++ ++struct nf_br_ops { ++ int (*br_dev_xmit_hook)(struct sk_buff *); ++}; ++ ++struct trace_event_raw_br_fdb_add { ++ struct trace_entry ent; ++ u8 ndm_flags; ++ u32 __data_loc_dev; ++ unsigned char addr[6]; ++ u16 vid; ++ u16 nlh_flags; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_br_fdb_external_learn_add { ++ struct trace_entry ent; ++ u32 __data_loc_br_dev; ++ u32 __data_loc_dev; ++ unsigned char addr[6]; ++ u16 vid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_fdb_delete { ++ struct trace_entry ent; ++ u32 __data_loc_br_dev; ++ u32 __data_loc_dev; ++ unsigned char addr[6]; ++ u16 vid; ++ char __data[0]; ++}; ++ ++struct trace_event_raw_br_fdb_update { ++ struct trace_entry ent; ++ u32 __data_loc_br_dev; ++ u32 __data_loc_dev; ++ unsigned char addr[6]; ++ u16 vid; ++ bool added_by_user; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_br_fdb_add { ++ u32 dev; ++}; ++ ++struct trace_event_data_offsets_br_fdb_external_learn_add { ++ u32 br_dev; ++ u32 dev; ++}; ++ ++struct trace_event_data_offsets_fdb_delete { ++ u32 br_dev; ++ u32 dev; ++}; ++ ++struct trace_event_data_offsets_br_fdb_update { ++ u32 br_dev; ++ u32 dev; ++}; ++ ++struct update_classid_context { ++ u32 classid; ++ unsigned int batch; ++}; ++ ++enum lwtunnel_encap_types { ++ LWTUNNEL_ENCAP_NONE = 0, ++ LWTUNNEL_ENCAP_MPLS = 1, ++ LWTUNNEL_ENCAP_IP = 2, ++ LWTUNNEL_ENCAP_ILA = 3, ++ LWTUNNEL_ENCAP_IP6 = 4, ++ LWTUNNEL_ENCAP_SEG6 = 5, ++ LWTUNNEL_ENCAP_BPF = 6, ++ LWTUNNEL_ENCAP_SEG6_LOCAL = 7, ++ __LWTUNNEL_ENCAP_MAX = 8, ++}; ++ ++struct rtnexthop { ++ short unsigned int rtnh_len; ++ unsigned char rtnh_flags; ++ unsigned char rtnh_hops; ++ int rtnh_ifindex; ++}; ++ ++struct lwtunnel_encap_ops { ++ int (*build_state)(struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); ++ void (*destroy_state)(struct lwtunnel_state *); ++ int (*output)(struct net *, struct sock *, struct sk_buff *); ++ int (*input)(struct sk_buff *); ++ int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); ++ int (*get_encap_size)(struct lwtunnel_state *); ++ int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); ++ int (*xmit)(struct sk_buff *); ++ struct module *owner; ++}; ++ ++enum bpf_ret_code { ++ BPF_OK = 0, ++ BPF_DROP = 2, ++ BPF_REDIRECT = 7, ++}; ++ ++enum { ++ LWT_BPF_PROG_UNSPEC = 0, ++ LWT_BPF_PROG_FD = 1, ++ LWT_BPF_PROG_NAME = 2, ++ __LWT_BPF_PROG_MAX = 3, ++}; ++ ++enum { ++ LWT_BPF_UNSPEC = 0, ++ LWT_BPF_IN = 1, ++ LWT_BPF_OUT = 2, ++ LWT_BPF_XMIT = 3, ++ LWT_BPF_XMIT_HEADROOM = 4, ++ __LWT_BPF_MAX = 5, ++}; ++ ++enum { ++ LWTUNNEL_XMIT_DONE = 0, ++ LWTUNNEL_XMIT_CONTINUE = 1, ++}; ++ ++struct bpf_lwt_prog { ++ struct bpf_prog *prog; ++ char *name; ++}; ++ ++struct bpf_lwt { ++ struct bpf_lwt_prog in; ++ struct bpf_lwt_prog out; ++ struct bpf_lwt_prog xmit; ++ int family; ++}; ++ ++struct dst_cache_pcpu { ++ long unsigned int refresh_ts; ++ struct dst_entry *dst; ++ u32 cookie; ++ union { ++ struct in_addr in_saddr; ++ struct in6_addr in6_saddr; ++ }; ++}; ++ ++struct gro_cell; ++ ++struct gro_cells { ++ struct gro_cell *cells; ++}; ++ ++struct gro_cell { ++ struct sk_buff_head napi_skbs; ++ struct napi_struct napi; ++}; ++ ++struct group_req { ++ __u32 gr_interface; ++ int: 32; ++ struct __kernel_sockaddr_storage gr_group; ++}; ++ ++struct group_source_req { ++ __u32 gsr_interface; ++ int: 32; ++ struct __kernel_sockaddr_storage gsr_group; ++ struct __kernel_sockaddr_storage gsr_source; ++}; ++ ++struct group_filter { ++ __u32 gf_interface; ++ int: 32; ++ struct __kernel_sockaddr_storage gf_group; ++ __u32 gf_fmode; ++ __u32 gf_numsrc; ++ struct __kernel_sockaddr_storage gf_slist[1]; ++}; ++ ++struct compat_cmsghdr { ++ compat_size_t cmsg_len; ++ compat_int_t cmsg_level; ++ compat_int_t cmsg_type; ++}; ++ ++struct compat_group_req { ++ __u32 gr_interface; ++ struct __kernel_sockaddr_storage gr_group; ++}; ++ ++struct compat_group_source_req { ++ __u32 gsr_interface; ++ struct __kernel_sockaddr_storage gsr_group; ++ struct __kernel_sockaddr_storage gsr_source; ++}; ++ ++struct compat_group_filter { ++ __u32 gf_interface; ++ struct __kernel_sockaddr_storage gf_group; ++ __u32 gf_fmode; ++ __u32 gf_numsrc; ++ struct __kernel_sockaddr_storage gf_slist[1]; ++}; ++ ++typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); ++ ++struct fch_hdr { ++ __u8 daddr[6]; ++ __u8 saddr[6]; ++}; ++ ++struct fcllc { ++ __u8 dsap; ++ __u8 ssap; ++ __u8 llc; ++ __u8 protid[3]; ++ __be16 ethertype; ++}; ++ ++enum macvlan_mode { ++ MACVLAN_MODE_PRIVATE = 1, ++ MACVLAN_MODE_VEPA = 2, ++ MACVLAN_MODE_BRIDGE = 4, ++ MACVLAN_MODE_PASSTHRU = 8, ++ MACVLAN_MODE_SOURCE = 16, ++}; ++ ++struct tc_ratespec { ++ unsigned char cell_log; ++ __u8 linklayer; ++ short unsigned int overhead; ++ short int cell_align; ++ short unsigned int mpu; ++ __u32 rate; ++}; ++ ++struct tc_prio_qopt { ++ int bands; ++ __u8 priomap[16]; ++}; ++ ++enum { ++ TCA_UNSPEC = 0, ++ TCA_KIND = 1, ++ TCA_OPTIONS = 2, ++ TCA_STATS = 3, ++ TCA_XSTATS = 4, ++ TCA_RATE = 5, ++ TCA_FCNT = 6, ++ TCA_STATS2 = 7, ++ TCA_STAB = 8, ++ TCA_PAD = 9, ++ TCA_DUMP_INVISIBLE = 10, ++ TCA_CHAIN = 11, ++ TCA_HW_OFFLOAD = 12, ++ TCA_INGRESS_BLOCK = 13, ++ TCA_EGRESS_BLOCK = 14, ++ __TCA_MAX = 15, ++}; ++ ++struct vlan_pcpu_stats { ++ u64 rx_packets; ++ u64 rx_bytes; ++ u64 rx_multicast; ++ u64 tx_packets; ++ u64 tx_bytes; ++ struct u64_stats_sync syncp; ++ u32 rx_errors; ++ u32 tx_dropped; ++}; ++ ++struct netpoll___2; ++ ++struct skb_array { ++ struct ptr_ring ring; ++}; ++ ++struct macvlan_port; ++ ++struct macvlan_dev { ++ struct net_device *dev; ++ struct list_head list; ++ struct hlist_node hlist; ++ struct macvlan_port *port; ++ struct net_device *lowerdev; ++ void *accel_priv; ++ struct vlan_pcpu_stats *pcpu_stats; ++ long unsigned int mc_filter[4]; ++ netdev_features_t set_features; ++ enum macvlan_mode mode; ++ u16 flags; ++ int nest_level; ++ unsigned int macaddr_count; ++ struct netpoll___2 *netpoll; ++}; ++ ++struct psched_ratecfg { ++ u64 rate_bytes_ps; ++ u32 mult; ++ u16 overhead; ++ u8 linklayer; ++ u8 shift; ++}; ++ ++struct mini_Qdisc_pair { ++ struct mini_Qdisc miniq1; ++ struct mini_Qdisc miniq2; ++ struct mini_Qdisc **p_miniq; ++}; ++ ++struct pfifo_fast_priv { ++ struct skb_array q[3]; ++}; ++ ++struct tc_qopt_offload_stats { ++ struct gnet_stats_basic_packed *bstats; ++ struct gnet_stats_queue *qstats; ++}; ++ ++enum tc_mq_command { ++ TC_MQ_CREATE = 0, ++ TC_MQ_DESTROY = 1, ++ TC_MQ_STATS = 2, ++}; ++ ++struct tc_mq_qopt_offload { ++ enum tc_mq_command command; ++ u32 handle; ++ struct tc_qopt_offload_stats stats; ++}; ++ ++struct mq_sched { ++ struct Qdisc **qdiscs; ++}; ++ ++enum tc_link_layer { ++ TC_LINKLAYER_UNAWARE = 0, ++ TC_LINKLAYER_ETHERNET = 1, ++ TC_LINKLAYER_ATM = 2, ++}; ++ ++enum { ++ TCA_STAB_UNSPEC = 0, ++ TCA_STAB_BASE = 1, ++ TCA_STAB_DATA = 2, ++ __TCA_STAB_MAX = 3, ++}; ++ ++struct qdisc_rate_table { ++ struct tc_ratespec rate; ++ u32 data[256]; ++ struct qdisc_rate_table *next; ++ int refcnt; ++}; ++ ++struct Qdisc_class_common { ++ u32 classid; ++ struct hlist_node hnode; ++}; ++ ++struct Qdisc_class_hash { ++ struct hlist_head *hash; ++ unsigned int hashsize; ++ unsigned int hashmask; ++ unsigned int hashelems; ++}; ++ ++struct qdisc_watchdog { ++ u64 last_expires; ++ struct hrtimer timer; ++ struct Qdisc *qdisc; ++}; ++ ++struct check_loop_arg { ++ struct qdisc_walker w; ++ struct Qdisc *p; ++ int depth; ++}; ++ ++struct tcf_bind_args { ++ struct tcf_walker w; ++ u32 classid; ++ long unsigned int cl; ++}; ++ ++struct qdisc_dump_args { ++ struct qdisc_walker w; ++ struct sk_buff *skb; ++ struct netlink_callback *cb; ++}; ++ ++enum net_xmit_qdisc_t { ++ __NET_XMIT_STOLEN = 65536, ++ __NET_XMIT_BYPASS = 131072, ++}; ++ ++enum { ++ TCA_ACT_UNSPEC = 0, ++ TCA_ACT_KIND = 1, ++ TCA_ACT_OPTIONS = 2, ++ TCA_ACT_INDEX = 3, ++ TCA_ACT_STATS = 4, ++ TCA_ACT_PAD = 5, ++ TCA_ACT_COOKIE = 6, ++ __TCA_ACT_MAX = 7, ++}; ++ ++struct tcf_t { ++ __u64 install; ++ __u64 lastuse; ++ __u64 expires; ++ __u64 firstuse; ++}; ++ ++typedef void tcf_chain_head_change_t(struct tcf_proto *, void *); ++ ++struct tcf_idrinfo { ++ spinlock_t lock; ++ struct idr action_idr; ++ struct net *net; ++}; ++ ++struct tc_action_ops; ++ ++struct tc_cookie; ++ ++struct tc_action { ++ const struct tc_action_ops *ops; ++ __u32 type; ++ __u32 order; ++ struct tcf_idrinfo *idrinfo; ++ u32 tcfa_index; ++ refcount_t tcfa_refcnt; ++ atomic_t tcfa_bindcnt; ++ int tcfa_action; ++ struct tcf_t tcfa_tm; ++ struct gnet_stats_basic_packed tcfa_bstats; ++ struct gnet_stats_queue tcfa_qstats; ++ struct net_rate_estimator *tcfa_rate_est; ++ spinlock_t tcfa_lock; ++ struct gnet_stats_basic_cpu *cpu_bstats; ++ struct gnet_stats_queue *cpu_qstats; ++ struct tc_cookie *act_cookie; ++ struct tcf_chain *goto_chain; ++}; ++ ++struct tc_action_ops { ++ struct list_head head; ++ char kind[16]; ++ __u32 type; ++ size_t size; ++ struct module *owner; ++ int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); ++ int (*dump)(struct sk_buff *, struct tc_action *, int, int); ++ void (*cleanup)(struct tc_action *); ++ int (*lookup)(struct net *, struct tc_action **, u32, struct netlink_ext_ack *); ++ int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action **, int, int, bool, struct netlink_ext_ack *); ++ int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); ++ void (*stats_update)(struct tc_action *, u64, u32, u64); ++ size_t (*get_fill_size)(const struct tc_action *); ++ struct net_device * (*get_dev)(const struct tc_action *); ++ void (*put_dev)(struct net_device *); ++}; ++ ++struct tc_cookie { ++ u8 *data; ++ u32 len; ++ struct callback_head rcu; ++}; ++ ++enum tcf_block_binder_type { ++ TCF_BLOCK_BINDER_TYPE_UNSPEC = 0, ++ TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, ++ TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, ++}; ++ ++struct tcf_block_ext_info { ++ enum tcf_block_binder_type binder_type; ++ tcf_chain_head_change_t *chain_head_change; ++ void *chain_head_change_priv; ++ u32 block_index; ++}; ++ ++struct tcf_exts { ++ __u32 type; ++ int nr_actions; ++ struct tc_action **actions; ++ struct net *net; ++ int action; ++ int police; ++}; ++ ++enum tc_block_command { ++ TC_BLOCK_BIND = 0, ++ TC_BLOCK_UNBIND = 1, ++}; ++ ++struct tc_block_offload { ++ enum tc_block_command command; ++ enum tcf_block_binder_type binder_type; ++ struct tcf_block *block; ++ struct netlink_ext_ack *extack; ++}; ++ ++struct tcf_filter_chain_list_item { ++ struct list_head list; ++ tcf_chain_head_change_t *chain_head_change; ++ void *chain_head_change_priv; ++}; ++ ++struct tcf_net { ++ struct idr idr; ++}; ++ ++struct tcf_block_owner_item { ++ struct list_head list; ++ struct Qdisc *q; ++ enum tcf_block_binder_type binder_type; ++}; ++ ++struct tcf_block_cb { ++ struct list_head list; ++ tc_setup_cb_t *cb; ++ void *cb_ident; ++ void *cb_priv; ++ unsigned int refcnt; ++}; ++ ++struct tcf_chain_info { ++ struct tcf_proto **pprev; ++ struct tcf_proto *next; ++}; ++ ++struct tcf_dump_args { ++ struct tcf_walker w; ++ struct sk_buff *skb; ++ struct netlink_callback *cb; ++ struct tcf_block *block; ++ struct Qdisc *q; ++ u32 parent; ++}; ++ ++struct tcamsg { ++ unsigned char tca_family; ++ unsigned char tca__pad1; ++ short unsigned int tca__pad2; ++}; ++ ++enum { ++ TCA_ROOT_UNSPEC = 0, ++ TCA_ROOT_TAB = 1, ++ TCA_ROOT_FLAGS = 2, ++ TCA_ROOT_COUNT = 3, ++ TCA_ROOT_TIME_DELTA = 4, ++ __TCA_ROOT_MAX = 5, ++}; ++ ++struct tc_action_net { ++ struct tcf_idrinfo *idrinfo; ++ const struct tc_action_ops *ops; ++}; ++ ++struct tcf_action_net { ++ struct rhashtable egdev_ht; ++}; ++ ++struct tcf_action_egdev_cb { ++ struct list_head list; ++ tc_setup_cb_t *cb; ++ void *cb_priv; ++}; ++ ++struct tcf_action_egdev { ++ struct rhash_head ht_node; ++ const struct net_device *dev; ++ unsigned int refcnt; ++ struct list_head cb_list; ++}; ++ ++struct tc_fifo_qopt { ++ __u32 limit; ++}; ++ ++enum { ++ TCA_CGROUP_UNSPEC = 0, ++ TCA_CGROUP_ACT = 1, ++ TCA_CGROUP_POLICE = 2, ++ TCA_CGROUP_EMATCHES = 3, ++ __TCA_CGROUP_MAX = 4, ++}; ++ ++struct tcf_ematch_tree_hdr { ++ __u16 nmatches; ++ __u16 progid; ++}; ++ ++struct tcf_pkt_info { ++ unsigned char *ptr; ++ int nexthdr; ++}; ++ ++struct tcf_ematch_ops; ++ ++struct tcf_ematch { ++ struct tcf_ematch_ops *ops; ++ long unsigned int data; ++ unsigned int datalen; ++ u16 matchid; ++ u16 flags; ++ struct net *net; ++}; ++ ++struct tcf_ematch_ops { ++ int kind; ++ int datalen; ++ int (*change)(struct net *, void *, int, struct tcf_ematch *); ++ int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *); ++ void (*destroy)(struct tcf_ematch *); ++ int (*dump)(struct sk_buff *, struct tcf_ematch *); ++ struct module *owner; ++ struct list_head link; ++}; ++ ++struct tcf_ematch_tree { ++ struct tcf_ematch_tree_hdr hdr; ++ struct tcf_ematch *matches; ++}; ++ ++struct cls_cgroup_head { ++ u32 handle; ++ struct tcf_exts exts; ++ struct tcf_ematch_tree ematches; ++ struct tcf_proto *tp; ++ struct rcu_work rwork; ++}; ++ ++enum { ++ TCA_EMATCH_TREE_UNSPEC = 0, ++ TCA_EMATCH_TREE_HDR = 1, ++ TCA_EMATCH_TREE_LIST = 2, ++ __TCA_EMATCH_TREE_MAX = 3, ++}; ++ ++struct tcf_ematch_hdr { ++ __u16 matchid; ++ __u16 kind; ++ __u16 flags; ++ __u16 pad; ++}; ++ ++struct sockaddr_nl { ++ __kernel_sa_family_t nl_family; ++ short unsigned int nl_pad; ++ __u32 nl_pid; ++ __u32 nl_groups; ++}; ++ ++struct nlmsgerr { ++ int error; ++ struct nlmsghdr msg; ++}; ++ ++enum nlmsgerr_attrs { ++ NLMSGERR_ATTR_UNUSED = 0, ++ NLMSGERR_ATTR_MSG = 1, ++ NLMSGERR_ATTR_OFFS = 2, ++ NLMSGERR_ATTR_COOKIE = 3, ++ __NLMSGERR_ATTR_MAX = 4, ++ NLMSGERR_ATTR_MAX = 3, ++}; ++ ++struct nl_pktinfo { ++ __u32 group; ++}; ++ ++enum { ++ NETLINK_UNCONNECTED = 0, ++ NETLINK_CONNECTED = 1, ++}; ++ ++enum netlink_skb_flags { ++ NETLINK_SKB_DST = 8, ++}; ++ ++struct netlink_notify { ++ struct net *net; ++ u32 portid; ++ int protocol; ++}; ++ ++struct netlink_tap { ++ struct net_device *dev; ++ struct module *module; ++ struct list_head list; ++}; ++ ++struct netlink_sock { ++ struct sock sk; ++ u32 portid; ++ u32 dst_portid; ++ u32 dst_group; ++ u32 flags; ++ u32 subscriptions; ++ u32 ngroups; ++ long unsigned int *groups; ++ long unsigned int state; ++ size_t max_recvmsg_len; ++ wait_queue_head_t wait; ++ bool bound; ++ bool cb_running; ++ int dump_done_errno; ++ struct netlink_callback cb; ++ struct mutex *cb_mutex; ++ struct mutex cb_def_mutex; ++ void (*netlink_rcv)(struct sk_buff *); ++ int (*netlink_bind)(struct net *, int); ++ void (*netlink_unbind)(struct net *, int); ++ struct module *module; ++ struct rhash_head node; ++ struct callback_head rcu; ++ struct work_struct work; ++}; ++ ++struct listeners; ++ ++struct netlink_table { ++ struct rhashtable hash; ++ struct hlist_head mc_list; ++ struct listeners *listeners; ++ unsigned int flags; ++ unsigned int groups; ++ struct mutex *cb_mutex; ++ struct module *module; ++ int (*bind)(struct net *, int); ++ void (*unbind)(struct net *, int); ++ bool (*compare)(struct net *, struct sock *); ++ int registered; ++}; ++ ++struct listeners { ++ struct callback_head rcu; ++ long unsigned int masks[0]; ++}; ++ ++struct netlink_tap_net { ++ struct list_head netlink_tap_all; ++ struct mutex netlink_tap_lock; ++}; ++ ++struct netlink_compare_arg { ++ possible_net_t pnet; ++ u32 portid; ++}; ++ ++struct netlink_broadcast_data { ++ struct sock *exclude_sk; ++ struct net *net; ++ u32 portid; ++ u32 group; ++ int failure; ++ int delivery_failure; ++ int congested; ++ int delivered; ++ gfp_t allocation; ++ struct sk_buff *skb; ++ struct sk_buff *skb2; ++ int (*tx_filter)(struct sock *, struct sk_buff *, void *); ++ void *tx_data; ++}; ++ ++struct netlink_set_err_data { ++ struct sock *exclude_sk; ++ u32 portid; ++ u32 group; ++ int code; ++}; ++ ++struct nl_seq_iter { ++ struct seq_net_private p; ++ struct rhashtable_iter hti; ++ int link; ++}; ++ ++enum { ++ CTRL_CMD_UNSPEC = 0, ++ CTRL_CMD_NEWFAMILY = 1, ++ CTRL_CMD_DELFAMILY = 2, ++ CTRL_CMD_GETFAMILY = 3, ++ CTRL_CMD_NEWOPS = 4, ++ CTRL_CMD_DELOPS = 5, ++ CTRL_CMD_GETOPS = 6, ++ CTRL_CMD_NEWMCAST_GRP = 7, ++ CTRL_CMD_DELMCAST_GRP = 8, ++ CTRL_CMD_GETMCAST_GRP = 9, ++ __CTRL_CMD_MAX = 10, ++}; ++ ++enum { ++ CTRL_ATTR_UNSPEC = 0, ++ CTRL_ATTR_FAMILY_ID = 1, ++ CTRL_ATTR_FAMILY_NAME = 2, ++ CTRL_ATTR_VERSION = 3, ++ CTRL_ATTR_HDRSIZE = 4, ++ CTRL_ATTR_MAXATTR = 5, ++ CTRL_ATTR_OPS = 6, ++ CTRL_ATTR_MCAST_GROUPS = 7, ++ __CTRL_ATTR_MAX = 8, ++}; ++ ++enum { ++ CTRL_ATTR_OP_UNSPEC = 0, ++ CTRL_ATTR_OP_ID = 1, ++ CTRL_ATTR_OP_FLAGS = 2, ++ __CTRL_ATTR_OP_MAX = 3, ++}; ++ ++enum { ++ CTRL_ATTR_MCAST_GRP_UNSPEC = 0, ++ CTRL_ATTR_MCAST_GRP_NAME = 1, ++ CTRL_ATTR_MCAST_GRP_ID = 2, ++ __CTRL_ATTR_MCAST_GRP_MAX = 3, ++}; ++ ++struct nf_hook_entries_rcu_head { ++ struct callback_head head; ++ void *allocation; ++}; ++ ++struct nf_loginfo { ++ u_int8_t type; ++ union { ++ struct { ++ u_int32_t copy_len; ++ u_int16_t group; ++ u_int16_t qthreshold; ++ u_int16_t flags; ++ } ulog; ++ struct { ++ u_int8_t level; ++ u_int8_t logflags; ++ } log; ++ } u; ++}; ++ ++struct nf_log_buf { ++ unsigned int count; ++ char buf[1020]; ++}; ++ ++struct ip_rt_info { ++ __be32 daddr; ++ __be32 saddr; ++ u_int8_t tos; ++ u_int32_t mark; ++}; ++ ++struct ip6_rt_info { ++ struct in6_addr daddr; ++ struct in6_addr saddr; ++ u_int32_t mark; ++}; ++ ++struct xt_action_param; ++ ++struct xt_mtchk_param; ++ ++struct xt_mtdtor_param; ++ ++struct xt_match { ++ struct list_head list; ++ const char name[29]; ++ u_int8_t revision; ++ bool (*match)(const struct sk_buff *, struct xt_action_param *); ++ int (*checkentry)(const struct xt_mtchk_param *); ++ void (*destroy)(const struct xt_mtdtor_param *); ++ void (*compat_from_user)(void *, const void *); ++ int (*compat_to_user)(void *, const void *); ++ struct module *me; ++ const char *table; ++ unsigned int matchsize; ++ unsigned int usersize; ++ unsigned int compatsize; ++ unsigned int hooks; ++ short unsigned int proto; ++ short unsigned int family; ++}; ++ ++struct xt_entry_match { ++ union { ++ struct { ++ __u16 match_size; ++ char name[29]; ++ __u8 revision; ++ } user; ++ struct { ++ __u16 match_size; ++ struct xt_match *match; ++ } kernel; ++ __u16 match_size; ++ } u; ++ unsigned char data[0]; ++}; ++ ++struct xt_tgchk_param; ++ ++struct xt_tgdtor_param; ++ ++struct xt_target { ++ struct list_head list; ++ const char name[29]; ++ u_int8_t revision; ++ unsigned int (*target)(struct sk_buff *, const struct xt_action_param *); ++ int (*checkentry)(const struct xt_tgchk_param *); ++ void (*destroy)(const struct xt_tgdtor_param *); ++ void (*compat_from_user)(void *, const void *); ++ int (*compat_to_user)(void *, const void *); ++ struct module *me; ++ const char *table; ++ unsigned int targetsize; ++ unsigned int usersize; ++ unsigned int compatsize; ++ unsigned int hooks; ++ short unsigned int proto; ++ short unsigned int family; ++}; ++ ++struct xt_entry_target { ++ union { ++ struct { ++ __u16 target_size; ++ char name[29]; ++ __u8 revision; ++ } user; ++ struct { ++ __u16 target_size; ++ struct xt_target *target; ++ } kernel; ++ __u16 target_size; ++ } u; ++ unsigned char data[0]; ++}; ++ ++struct xt_standard_target { ++ struct xt_entry_target target; ++ int verdict; ++}; ++ ++struct xt_error_target { ++ struct xt_entry_target target; ++ char errorname[30]; ++}; ++ ++struct xt_counters { ++ __u64 pcnt; ++ __u64 bcnt; ++}; ++ ++struct xt_counters_info { ++ char name[32]; ++ unsigned int num_counters; ++ struct xt_counters counters[0]; ++}; ++ ++struct xt_action_param { ++ union { ++ const struct xt_match *match; ++ const struct xt_target *target; ++ }; ++ union { ++ const void *matchinfo; ++ const void *targinfo; ++ }; ++ const struct nf_hook_state *state; ++ int fragoff; ++ unsigned int thoff; ++ bool hotdrop; ++}; ++ ++struct xt_mtchk_param { ++ struct net *net; ++ const char *table; ++ const void *entryinfo; ++ const struct xt_match *match; ++ void *matchinfo; ++ unsigned int hook_mask; ++ u_int8_t family; ++ bool nft_compat; ++}; ++ ++struct xt_mtdtor_param { ++ struct net *net; ++ const struct xt_match *match; ++ void *matchinfo; ++ u_int8_t family; ++}; ++ ++struct xt_tgchk_param { ++ struct net *net; ++ const char *table; ++ const void *entryinfo; ++ const struct xt_target *target; ++ void *targinfo; ++ unsigned int hook_mask; ++ u_int8_t family; ++ bool nft_compat; ++}; ++ ++struct xt_tgdtor_param { ++ struct net *net; ++ const struct xt_target *target; ++ void *targinfo; ++ u_int8_t family; ++}; ++ ++struct xt_percpu_counter_alloc_state { ++ unsigned int off; ++ const char *mem; ++}; ++ ++struct compat_xt_entry_match { ++ union { ++ struct { ++ u_int16_t match_size; ++ char name[29]; ++ u_int8_t revision; ++ } user; ++ struct { ++ u_int16_t match_size; ++ compat_uptr_t match; ++ } kernel; ++ u_int16_t match_size; ++ } u; ++ unsigned char data[0]; ++}; ++ ++struct compat_xt_entry_target { ++ union { ++ struct { ++ u_int16_t target_size; ++ char name[29]; ++ u_int8_t revision; ++ } user; ++ struct { ++ u_int16_t target_size; ++ compat_uptr_t target; ++ } kernel; ++ u_int16_t target_size; ++ } u; ++ unsigned char data[0]; ++}; ++ ++struct compat_xt_counters { ++ compat_u64 pcnt; ++ compat_u64 bcnt; ++}; ++ ++struct compat_xt_counters_info { ++ char name[32]; ++ compat_uint_t num_counters; ++ struct compat_xt_counters counters[0]; ++}; ++ ++struct compat_delta { ++ unsigned int offset; ++ int delta; ++}; ++ ++struct xt_af { ++ struct mutex mutex; ++ struct list_head match; ++ struct list_head target; ++ struct mutex compat_mutex; ++ struct compat_delta *compat_tab; ++ unsigned int number; ++ unsigned int cur; ++}; ++ ++struct compat_xt_standard_target { ++ struct compat_xt_entry_target t; ++ compat_uint_t verdict; ++}; ++ ++struct compat_xt_error_target { ++ struct compat_xt_entry_target t; ++ char errorname[30]; ++}; ++ ++struct nf_mttg_trav { ++ struct list_head *head; ++ struct list_head *curr; ++ uint8_t class; ++}; ++ ++enum { ++ MTTG_TRAV_INIT = 0, ++ MTTG_TRAV_NFP_UNSPEC = 1, ++ MTTG_TRAV_NFP_SPEC = 2, ++ MTTG_TRAV_DONE = 3, ++}; ++ ++struct xt_tcp { ++ __u16 spts[2]; ++ __u16 dpts[2]; ++ __u8 option; ++ __u8 flg_mask; ++ __u8 flg_cmp; ++ __u8 invflags; ++}; ++ ++struct xt_udp { ++ __u16 spts[2]; ++ __u16 dpts[2]; ++ __u8 invflags; ++}; ++ ++struct rtmsg { ++ unsigned char rtm_family; ++ unsigned char rtm_dst_len; ++ unsigned char rtm_src_len; ++ unsigned char rtm_tos; ++ unsigned char rtm_table; ++ unsigned char rtm_protocol; ++ unsigned char rtm_scope; ++ unsigned char rtm_type; ++ unsigned int rtm_flags; ++}; ++ ++struct ipv4_addr_key { ++ __be32 addr; ++ int vif; ++}; ++ ++struct inetpeer_addr { ++ union { ++ struct ipv4_addr_key a4; ++ struct in6_addr a6; ++ u32 key[4]; ++ }; ++ __u16 family; ++}; ++ ++struct inet_peer { ++ struct rb_node rb_node; ++ struct inetpeer_addr daddr; ++ u32 metrics[17]; ++ u32 rate_tokens; ++ u32 n_redirects; ++ long unsigned int rate_last; ++ union { ++ struct { ++ atomic_t rid; ++ }; ++ struct callback_head rcu; ++ }; ++ __u32 dtime; ++ refcount_t refcnt; ++}; ++ ++struct uncached_list { ++ spinlock_t lock; ++ struct list_head head; ++}; ++ ++struct rt_cache_stat { ++ unsigned int in_slow_tot; ++ unsigned int in_slow_mc; ++ unsigned int in_no_route; ++ unsigned int in_brd; ++ unsigned int in_martian_dst; ++ unsigned int in_martian_src; ++ unsigned int out_slow_tot; ++ unsigned int out_slow_mc; ++}; ++ ++struct fib_prop { ++ int error; ++ u8 scope; ++}; ++ ++struct raw_hashinfo { ++ rwlock_t lock; ++ struct hlist_head ht[256]; ++}; ++ ++enum { ++ INET_FRAG_FIRST_IN = 1, ++ INET_FRAG_LAST_IN = 2, ++ INET_FRAG_COMPLETE = 4, ++}; ++ ++struct ipq { ++ struct inet_frag_queue q; ++ u8 ecn; ++ u16 max_df_size; ++ int iif; ++ unsigned int rid; ++ struct inet_peer *peer; ++}; ++ ++struct ip_options_data { ++ struct ip_options_rcu opt; ++ char data[40]; ++}; ++ ++struct ipcm_cookie { ++ struct sockcm_cookie sockc; ++ __be32 addr; ++ int oif; ++ struct ip_options_rcu *opt; ++ __u8 ttl; ++ __s16 tos; ++ char priority; ++ __u16 gso_size; ++}; ++ ++struct ip_reply_arg { ++ struct kvec iov[1]; ++ int flags; ++ __wsum csum; ++ int csumoffset; ++ int bound_dev_if; ++ u8 tos; ++ kuid_t uid; ++}; ++ ++struct ip_mreq_source { ++ __be32 imr_multiaddr; ++ __be32 imr_interface; ++ __be32 imr_sourceaddr; ++}; ++ ++struct ip_msfilter { ++ __be32 imsf_multiaddr; ++ __be32 imsf_interface; ++ __u32 imsf_fmode; ++ __u32 imsf_numsrc; ++ __be32 imsf_slist[1]; ++}; ++ ++struct in_pktinfo { ++ int ipi_ifindex; ++ struct in_addr ipi_spec_dst; ++ struct in_addr ipi_addr; ++}; ++ ++struct inet_timewait_sock { ++ struct sock_common __tw_common; ++ __u32 tw_mark; ++ volatile unsigned char tw_substate; ++ unsigned char tw_rcv_wscale; ++ __be16 tw_sport; ++ unsigned int tw_kill: 1; ++ unsigned int tw_transparent: 1; ++ unsigned int tw_flowlabel: 20; ++ unsigned int tw_pad: 2; ++ unsigned int tw_tos: 8; ++ struct timer_list tw_timer; ++ struct inet_bind_bucket *tw_tb; ++}; ++ ++struct tcpvegas_info { ++ __u32 tcpv_enabled; ++ __u32 tcpv_rttcnt; ++ __u32 tcpv_rtt; ++ __u32 tcpv_minrtt; ++}; ++ ++struct tcp_dctcp_info { ++ __u16 dctcp_enabled; ++ __u16 dctcp_ce_state; ++ __u32 dctcp_alpha; ++ __u32 dctcp_ab_ecn; ++ __u32 dctcp_ab_tot; ++}; ++ ++struct tcp_bbr_info { ++ __u32 bbr_bw_lo; ++ __u32 bbr_bw_hi; ++ __u32 bbr_min_rtt; ++ __u32 bbr_pacing_gain; ++ __u32 bbr_cwnd_gain; ++}; ++ ++union tcp_cc_info { ++ struct tcpvegas_info vegas; ++ struct tcp_dctcp_info dctcp; ++ struct tcp_bbr_info bbr; ++}; ++ ++enum { ++ BPF_TCP_ESTABLISHED = 1, ++ BPF_TCP_SYN_SENT = 2, ++ BPF_TCP_SYN_RECV = 3, ++ BPF_TCP_FIN_WAIT1 = 4, ++ BPF_TCP_FIN_WAIT2 = 5, ++ BPF_TCP_TIME_WAIT = 6, ++ BPF_TCP_CLOSE = 7, ++ BPF_TCP_CLOSE_WAIT = 8, ++ BPF_TCP_LAST_ACK = 9, ++ BPF_TCP_LISTEN = 10, ++ BPF_TCP_CLOSING = 11, ++ BPF_TCP_NEW_SYN_RECV = 12, ++ BPF_TCP_MAX_STATES = 13, ++}; ++ ++enum inet_csk_ack_state_t { ++ ICSK_ACK_SCHED = 1, ++ ICSK_ACK_TIMER = 2, ++ ICSK_ACK_PUSHED = 4, ++ ICSK_ACK_PUSHED2 = 8, ++ ICSK_ACK_NOW = 16, ++}; ++ ++enum { ++ TCP_FLAG_CWR = 32768, ++ TCP_FLAG_ECE = 16384, ++ TCP_FLAG_URG = 8192, ++ TCP_FLAG_ACK = 4096, ++ TCP_FLAG_PSH = 2048, ++ TCP_FLAG_RST = 1024, ++ TCP_FLAG_SYN = 512, ++ TCP_FLAG_FIN = 256, ++ TCP_RESERVED_BITS = 15, ++ TCP_DATA_OFFSET = 240, ++}; ++ ++struct tcp_repair_opt { ++ __u32 opt_code; ++ __u32 opt_val; ++}; ++ ++struct tcp_repair_window { ++ __u32 snd_wl1; ++ __u32 snd_wnd; ++ __u32 max_window; ++ __u32 rcv_wnd; ++ __u32 rcv_wup; ++}; ++ ++enum { ++ TCP_NO_QUEUE = 0, ++ TCP_RECV_QUEUE = 1, ++ TCP_SEND_QUEUE = 2, ++ TCP_QUEUES_NR = 3, ++}; ++ ++struct tcp_info { ++ __u8 tcpi_state; ++ __u8 tcpi_ca_state; ++ __u8 tcpi_retransmits; ++ __u8 tcpi_probes; ++ __u8 tcpi_backoff; ++ __u8 tcpi_options; ++ __u8 tcpi_snd_wscale: 4; ++ __u8 tcpi_rcv_wscale: 4; ++ __u8 tcpi_delivery_rate_app_limited: 1; ++ __u32 tcpi_rto; ++ __u32 tcpi_ato; ++ __u32 tcpi_snd_mss; ++ __u32 tcpi_rcv_mss; ++ __u32 tcpi_unacked; ++ __u32 tcpi_sacked; ++ __u32 tcpi_lost; ++ __u32 tcpi_retrans; ++ __u32 tcpi_fackets; ++ __u32 tcpi_last_data_sent; ++ __u32 tcpi_last_ack_sent; ++ __u32 tcpi_last_data_recv; ++ __u32 tcpi_last_ack_recv; ++ __u32 tcpi_pmtu; ++ __u32 tcpi_rcv_ssthresh; ++ __u32 tcpi_rtt; ++ __u32 tcpi_rttvar; ++ __u32 tcpi_snd_ssthresh; ++ __u32 tcpi_snd_cwnd; ++ __u32 tcpi_advmss; ++ __u32 tcpi_reordering; ++ __u32 tcpi_rcv_rtt; ++ __u32 tcpi_rcv_space; ++ __u32 tcpi_total_retrans; ++ __u64 tcpi_pacing_rate; ++ __u64 tcpi_max_pacing_rate; ++ __u64 tcpi_bytes_acked; ++ __u64 tcpi_bytes_received; ++ __u32 tcpi_segs_out; ++ __u32 tcpi_segs_in; ++ __u32 tcpi_notsent_bytes; ++ __u32 tcpi_min_rtt; ++ __u32 tcpi_data_segs_in; ++ __u32 tcpi_data_segs_out; ++ __u64 tcpi_delivery_rate; ++ __u64 tcpi_busy_time; ++ __u64 tcpi_rwnd_limited; ++ __u64 tcpi_sndbuf_limited; ++ __u32 tcpi_delivered; ++ __u32 tcpi_delivered_ce; ++ __u64 tcpi_bytes_sent; ++ __u64 tcpi_bytes_retrans; ++ __u32 tcpi_dsack_dups; ++ __u32 tcpi_reord_seen; ++}; ++ ++enum { ++ TCP_NLA_PAD = 0, ++ TCP_NLA_BUSY = 1, ++ TCP_NLA_RWND_LIMITED = 2, ++ TCP_NLA_SNDBUF_LIMITED = 3, ++ TCP_NLA_DATA_SEGS_OUT = 4, ++ TCP_NLA_TOTAL_RETRANS = 5, ++ TCP_NLA_PACING_RATE = 6, ++ TCP_NLA_DELIVERY_RATE = 7, ++ TCP_NLA_SND_CWND = 8, ++ TCP_NLA_REORDERING = 9, ++ TCP_NLA_MIN_RTT = 10, ++ TCP_NLA_RECUR_RETRANS = 11, ++ TCP_NLA_DELIVERY_RATE_APP_LMT = 12, ++ TCP_NLA_SNDQ_SIZE = 13, ++ TCP_NLA_CA_STATE = 14, ++ TCP_NLA_SND_SSTHRESH = 15, ++ TCP_NLA_DELIVERED = 16, ++ TCP_NLA_DELIVERED_CE = 17, ++ TCP_NLA_BYTES_SENT = 18, ++ TCP_NLA_BYTES_RETRANS = 19, ++ TCP_NLA_DSACK_DUPS = 20, ++ TCP_NLA_REORD_SEEN = 21, ++}; ++ ++struct tcp_zerocopy_receive { ++ __u64 address; ++ __u32 length; ++ __u32 recv_skip_hint; ++}; ++ ++struct tcp_md5sig_pool { ++ struct ahash_request *md5_req; ++ void *scratch; ++}; ++ ++enum tcp_chrono { ++ TCP_CHRONO_UNSPEC = 0, ++ TCP_CHRONO_BUSY = 1, ++ TCP_CHRONO_RWND_LIMITED = 2, ++ TCP_CHRONO_SNDBUF_LIMITED = 3, ++ __TCP_CHRONO_MAX = 4, ++}; ++ ++struct tcp_splice_state { ++ struct pipe_inode_info *pipe; ++ size_t len; ++ unsigned int flags; ++}; ++ ++struct tcp_sack_block_wire { ++ __be32 start_seq; ++ __be32 end_seq; ++}; ++ ++enum tcp_queue { ++ TCP_FRAG_IN_WRITE_QUEUE = 0, ++ TCP_FRAG_IN_RTX_QUEUE = 1, ++}; ++ ++enum tcp_ca_ack_event_flags { ++ CA_ACK_SLOWPATH = 1, ++ CA_ACK_WIN_UPDATE = 2, ++ CA_ACK_ECE = 4, ++}; ++ ++struct tcp_sacktag_state { ++ u32 reord; ++ u64 first_sackt; ++ u64 last_sackt; ++ struct rate_sample *rate; ++ int flag; ++ unsigned int mss_now; ++}; ++ ++enum pkt_hash_types { ++ PKT_HASH_TYPE_NONE = 0, ++ PKT_HASH_TYPE_L2 = 1, ++ PKT_HASH_TYPE_L3 = 2, ++ PKT_HASH_TYPE_L4 = 3, ++}; ++ ++enum tsq_flags { ++ TSQF_THROTTLED = 1, ++ TSQF_QUEUED = 2, ++ TCPF_TSQ_DEFERRED = 4, ++ TCPF_WRITE_TIMER_DEFERRED = 8, ++ TCPF_DELACK_TIMER_DEFERRED = 16, ++ TCPF_MTU_REDUCED_DEFERRED = 32, ++}; ++ ++struct tcp_out_options { ++ u16 options; ++ u16 mss; ++ u8 ws; ++ u8 num_sack_blocks; ++ u8 hash_size; ++ __u8 *hash_location; ++ __u32 tsval; ++ __u32 tsecr; ++ struct tcp_fastopen_cookie *fastopen_cookie; ++}; ++ ++struct tsq_tasklet { ++ struct tasklet_struct tasklet; ++ struct list_head head; ++}; ++ ++struct tcp_md5sig { ++ struct __kernel_sockaddr_storage tcpm_addr; ++ __u8 tcpm_flags; ++ __u8 tcpm_prefixlen; ++ __u16 tcpm_keylen; ++ __u32 __tcpm_pad; ++ __u8 tcpm_key[80]; ++}; ++ ++struct tcp_timewait_sock { ++ struct inet_timewait_sock tw_sk; ++ u32 tw_rcv_wnd; ++ u32 tw_ts_offset; ++ u32 tw_ts_recent; ++ u32 tw_last_oow_ack_time; ++ int tw_ts_recent_stamp; ++ struct tcp_md5sig_key *tw_md5_key; ++}; ++ ++enum tcp_tw_status { ++ TCP_TW_SUCCESS = 0, ++ TCP_TW_RST = 1, ++ TCP_TW_ACK = 2, ++ TCP_TW_SYN = 3, ++}; ++ ++struct tcp4_pseudohdr { ++ __be32 saddr; ++ __be32 daddr; ++ __u8 pad; ++ __u8 protocol; ++ __be16 len; ++}; ++ ++enum tcp_seq_states { ++ TCP_SEQ_STATE_LISTENING = 0, ++ TCP_SEQ_STATE_ESTABLISHED = 1, ++}; ++ ++struct tcp_seq_afinfo { ++ sa_family_t family; ++}; ++ ++struct tcp_iter_state { ++ struct seq_net_private p; ++ enum tcp_seq_states state; ++ struct sock *syn_wait_sk; ++ int bucket; ++ int offset; ++ int sbucket; ++ int num; ++ loff_t last_pos; ++}; ++ ++enum tcp_metric_index { ++ TCP_METRIC_RTT = 0, ++ TCP_METRIC_RTTVAR = 1, ++ TCP_METRIC_SSTHRESH = 2, ++ TCP_METRIC_CWND = 3, ++ TCP_METRIC_REORDERING = 4, ++ TCP_METRIC_RTT_US = 5, ++ TCP_METRIC_RTTVAR_US = 6, ++ __TCP_METRIC_MAX = 7, ++}; ++ ++enum { ++ TCP_METRICS_ATTR_UNSPEC = 0, ++ TCP_METRICS_ATTR_ADDR_IPV4 = 1, ++ TCP_METRICS_ATTR_ADDR_IPV6 = 2, ++ TCP_METRICS_ATTR_AGE = 3, ++ TCP_METRICS_ATTR_TW_TSVAL = 4, ++ TCP_METRICS_ATTR_TW_TS_STAMP = 5, ++ TCP_METRICS_ATTR_VALS = 6, ++ TCP_METRICS_ATTR_FOPEN_MSS = 7, ++ TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, ++ TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, ++ TCP_METRICS_ATTR_FOPEN_COOKIE = 10, ++ TCP_METRICS_ATTR_SADDR_IPV4 = 11, ++ TCP_METRICS_ATTR_SADDR_IPV6 = 12, ++ TCP_METRICS_ATTR_PAD = 13, ++ __TCP_METRICS_ATTR_MAX = 14, ++}; ++ ++enum { ++ TCP_METRICS_CMD_UNSPEC = 0, ++ TCP_METRICS_CMD_GET = 1, ++ TCP_METRICS_CMD_DEL = 2, ++ __TCP_METRICS_CMD_MAX = 3, ++}; ++ ++struct tcp_fastopen_metrics { ++ u16 mss; ++ u16 syn_loss: 10; ++ u16 try_exp: 2; ++ long unsigned int last_syn_loss; ++ struct tcp_fastopen_cookie cookie; ++}; ++ ++struct tcp_metrics_block { ++ struct tcp_metrics_block *tcpm_next; ++ possible_net_t tcpm_net; ++ struct inetpeer_addr tcpm_saddr; ++ struct inetpeer_addr tcpm_daddr; ++ long unsigned int tcpm_stamp; ++ u32 tcpm_lock; ++ u32 tcpm_vals[5]; ++ struct tcp_fastopen_metrics tcpm_fastopen; ++ struct callback_head callback_head; ++}; ++ ++struct tcpm_hash_bucket { ++ struct tcp_metrics_block *chain; ++}; ++ ++struct icmp_filter { ++ __u32 data; ++}; ++ ++struct raw_iter_state { ++ struct seq_net_private p; ++ int bucket; ++}; ++ ++struct raw_sock { ++ struct inet_sock inet; ++ struct icmp_filter filter; ++ u32 ipmr_table; ++}; ++ ++struct raw_frag_vec { ++ struct msghdr *msg; ++ union { ++ struct icmphdr icmph; ++ char c[1]; ++ } hdr; ++ int hlen; ++}; ++ ++struct udp_skb_cb { ++ union { ++ struct inet_skb_parm h4; ++ struct inet6_skb_parm h6; ++ } header; ++ __u16 cscov; ++ __u8 partial_cov; ++}; ++ ++struct udp_dev_scratch { ++ u32 _tsize_state; ++ u16 len; ++ bool is_linear; ++ bool csum_unnecessary; ++}; ++ ++struct udp_seq_afinfo { ++ sa_family_t family; ++ struct udp_table *udp_table; ++}; ++ ++struct udp_iter_state { ++ struct seq_net_private p; ++ int bucket; ++}; ++ ++struct inet_protosw { ++ struct list_head list; ++ short unsigned int type; ++ short unsigned int protocol; ++ struct proto *prot; ++ const struct proto_ops *ops; ++ unsigned char flags; ++}; ++ ++typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); ++ ++typedef struct sock * (*udp_lookup_t)(struct sk_buff *, __be16, __be16); ++ ++struct arpreq { ++ struct sockaddr arp_pa; ++ struct sockaddr arp_ha; ++ int arp_flags; ++ struct sockaddr arp_netmask; ++ char arp_dev[16]; ++}; ++ ++typedef struct { ++ char ax25_call[7]; ++} ax25_address; ++ ++enum { ++ AX25_VALUES_IPDEFMODE = 0, ++ AX25_VALUES_AXDEFMODE = 1, ++ AX25_VALUES_BACKOFF = 2, ++ AX25_VALUES_CONMODE = 3, ++ AX25_VALUES_WINDOW = 4, ++ AX25_VALUES_EWINDOW = 5, ++ AX25_VALUES_T1 = 6, ++ AX25_VALUES_T2 = 7, ++ AX25_VALUES_T3 = 8, ++ AX25_VALUES_IDLE = 9, ++ AX25_VALUES_N2 = 10, ++ AX25_VALUES_PACLEN = 11, ++ AX25_VALUES_PROTOCOL = 12, ++ AX25_VALUES_DS_TIMEOUT = 13, ++ AX25_MAX_VALUES = 14, ++}; ++ ++struct ax25_dev { ++ struct ax25_dev *next; ++ struct net_device *dev; ++ struct net_device *forward; ++ struct ctl_table_header *sysheader; ++ int values[14]; ++}; ++ ++typedef struct ax25_dev ax25_dev; ++ ++enum { ++ XFRM_LOOKUP_ICMP = 1, ++ XFRM_LOOKUP_QUEUE = 2, ++ XFRM_LOOKUP_KEEP_DST_REF = 4, ++}; ++ ++struct pingv6_ops { ++ int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); ++ void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); ++ void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); ++ int (*icmpv6_err_convert)(u8, u8, int *); ++ void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); ++ int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); ++}; ++ ++struct icmp_bxm { ++ struct sk_buff *skb; ++ int offset; ++ int data_len; ++ struct { ++ struct icmphdr icmph; ++ __be32 times[3]; ++ } data; ++ int head_len; ++ struct ip_options_data replyopts; ++}; ++ ++struct icmp_control { ++ bool (*handler)(struct sk_buff *); ++ short int error; ++}; ++ ++struct ifaddrmsg { ++ __u8 ifa_family; ++ __u8 ifa_prefixlen; ++ __u8 ifa_flags; ++ __u8 ifa_scope; ++ __u32 ifa_index; ++}; ++ ++enum { ++ IFA_UNSPEC = 0, ++ IFA_ADDRESS = 1, ++ IFA_LOCAL = 2, ++ IFA_LABEL = 3, ++ IFA_BROADCAST = 4, ++ IFA_ANYCAST = 5, ++ IFA_CACHEINFO = 6, ++ IFA_MULTICAST = 7, ++ IFA_FLAGS = 8, ++ IFA_RT_PRIORITY = 9, ++ __IFA_MAX = 10, ++}; ++ ++struct ifa_cacheinfo { ++ __u32 ifa_prefered; ++ __u32 ifa_valid; ++ __u32 cstamp; ++ __u32 tstamp; ++}; ++ ++enum { ++ IFLA_INET_UNSPEC = 0, ++ IFLA_INET_CONF = 1, ++ __IFLA_INET_MAX = 2, ++}; ++ ++struct in_validator_info { ++ __be32 ivi_addr; ++ struct in_device *ivi_dev; ++ struct netlink_ext_ack *extack; ++}; ++ ++struct netconfmsg { ++ __u8 ncm_family; ++}; ++ ++enum { ++ NETCONFA_UNSPEC = 0, ++ NETCONFA_IFINDEX = 1, ++ NETCONFA_FORWARDING = 2, ++ NETCONFA_RP_FILTER = 3, ++ NETCONFA_MC_FORWARDING = 4, ++ NETCONFA_PROXY_NEIGH = 5, ++ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, ++ NETCONFA_INPUT = 7, ++ NETCONFA_BC_FORWARDING = 8, ++ __NETCONFA_MAX = 9, ++}; ++ ++struct devinet_sysctl_table { ++ struct ctl_table_header *sysctl_header; ++ struct ctl_table devinet_vars[33]; ++}; ++ ++struct igmphdr { ++ __u8 type; ++ __u8 code; ++ __sum16 csum; ++ __be32 group; ++}; ++ ++struct igmpv3_grec { ++ __u8 grec_type; ++ __u8 grec_auxwords; ++ __be16 grec_nsrcs; ++ __be32 grec_mca; ++ __be32 grec_src[0]; ++}; ++ ++struct igmpv3_report { ++ __u8 type; ++ __u8 resv1; ++ __sum16 csum; ++ __be16 resv2; ++ __be16 ngrec; ++ struct igmpv3_grec grec[0]; ++}; ++ ++struct igmpv3_query { ++ __u8 type; ++ __u8 code; ++ __sum16 csum; ++ __be32 group; ++ __u8 qrv: 3; ++ __u8 suppress: 1; ++ __u8 resv: 4; ++ __u8 qqic; ++ __be16 nsrcs; ++ __be32 srcs[0]; ++}; ++ ++struct igmp_mc_iter_state { ++ struct seq_net_private p; ++ struct net_device *dev; ++ struct in_device *in_dev; ++}; ++ ++struct igmp_mcf_iter_state { ++ struct seq_net_private p; ++ struct net_device *dev; ++ struct in_device *idev; ++ struct ip_mc_list *im; ++}; ++ ++struct nl_info { ++ struct nlmsghdr *nlh; ++ struct net *nl_net; ++ u32 portid; ++ bool skip_notify; ++}; ++ ++struct fib_config { ++ u8 fc_dst_len; ++ u8 fc_tos; ++ u8 fc_protocol; ++ u8 fc_scope; ++ u8 fc_type; ++ u32 fc_table; ++ __be32 fc_dst; ++ __be32 fc_gw; ++ int fc_oif; ++ u32 fc_flags; ++ u32 fc_priority; ++ __be32 fc_prefsrc; ++ struct nlattr *fc_mx; ++ struct rtnexthop *fc_mp; ++ int fc_mx_len; ++ int fc_mp_len; ++ u32 fc_flow; ++ u32 fc_nlflags; ++ struct nl_info fc_nlinfo; ++ struct nlattr *fc_encap; ++ u16 fc_encap_type; ++}; ++ ++struct fib_result_nl { ++ __be32 fl_addr; ++ u32 fl_mark; ++ unsigned char fl_tos; ++ unsigned char fl_scope; ++ unsigned char tb_id_in; ++ unsigned char tb_id; ++ unsigned char prefixlen; ++ unsigned char nh_sel; ++ unsigned char type; ++ unsigned char scope; ++ int err; ++}; ++ ++struct fib_nh_notifier_info { ++ struct fib_notifier_info info; ++ struct fib_nh *fib_nh; ++}; ++ ++struct fib_alias { ++ struct hlist_node fa_list; ++ struct fib_info *fa_info; ++ u8 fa_tos; ++ u8 fa_type; ++ u8 fa_state; ++ u8 fa_slen; ++ u32 tb_id; ++ s16 fa_default; ++ struct callback_head rcu; ++}; ++ ++struct fib_entry_notifier_info { ++ struct fib_notifier_info info; ++ u32 dst; ++ int dst_len; ++ struct fib_info *fi; ++ u8 tos; ++ u8 type; ++ u32 tb_id; ++}; ++ ++typedef unsigned int t_key; ++ ++struct key_vector { ++ t_key key; ++ unsigned char pos; ++ unsigned char bits; ++ unsigned char slen; ++ union { ++ struct hlist_head leaf; ++ struct key_vector *tnode[0]; ++ }; ++}; ++ ++struct tnode { ++ struct callback_head rcu; ++ t_key empty_children; ++ t_key full_children; ++ struct key_vector *parent; ++ struct key_vector kv[1]; ++}; ++ ++struct trie_use_stats { ++ unsigned int gets; ++ unsigned int backtrack; ++ unsigned int semantic_match_passed; ++ unsigned int semantic_match_miss; ++ unsigned int null_node_hit; ++ unsigned int resize_node_skipped; ++}; ++ ++struct trie_stat { ++ unsigned int totdepth; ++ unsigned int maxdepth; ++ unsigned int tnodes; ++ unsigned int leaves; ++ unsigned int nullpointers; ++ unsigned int prefixes; ++ unsigned int nodesizes[32]; ++}; ++ ++struct trie { ++ struct key_vector kv[1]; ++ struct trie_use_stats *stats; ++}; ++ ++struct fib_trie_iter { ++ struct seq_net_private p; ++ struct fib_table *tb; ++ struct key_vector *tnode; ++ unsigned int index; ++ unsigned int depth; ++}; ++ ++struct fib_route_iter { ++ struct seq_net_private p; ++ struct fib_table *main_tb; ++ struct key_vector *tnode; ++ loff_t pos; ++ t_key key; ++}; ++ ++struct ipfrag_skb_cb { ++ union { ++ struct inet_skb_parm h4; ++ struct inet6_skb_parm h6; ++ }; ++ struct sk_buff *next_frag; ++ int frag_run_len; ++}; ++ ++struct ping_iter_state { ++ struct seq_net_private p; ++ int bucket; ++ sa_family_t family; ++}; ++ ++struct pingfakehdr { ++ struct icmphdr icmph; ++ struct msghdr *msg; ++ sa_family_t family; ++ __wsum wcheck; ++}; ++ ++struct ping_table { ++ struct hlist_nulls_head hash[64]; ++ rwlock_t lock; ++}; ++ ++enum lwtunnel_ip_t { ++ LWTUNNEL_IP_UNSPEC = 0, ++ LWTUNNEL_IP_ID = 1, ++ LWTUNNEL_IP_DST = 2, ++ LWTUNNEL_IP_SRC = 3, ++ LWTUNNEL_IP_TTL = 4, ++ LWTUNNEL_IP_TOS = 5, ++ LWTUNNEL_IP_FLAGS = 6, ++ LWTUNNEL_IP_PAD = 7, ++ __LWTUNNEL_IP_MAX = 8, ++}; ++ ++enum lwtunnel_ip6_t { ++ LWTUNNEL_IP6_UNSPEC = 0, ++ LWTUNNEL_IP6_ID = 1, ++ LWTUNNEL_IP6_DST = 2, ++ LWTUNNEL_IP6_SRC = 3, ++ LWTUNNEL_IP6_HOPLIMIT = 4, ++ LWTUNNEL_IP6_TC = 5, ++ LWTUNNEL_IP6_FLAGS = 6, ++ LWTUNNEL_IP6_PAD = 7, ++ __LWTUNNEL_IP6_MAX = 8, ++}; ++ ++struct ip6_tnl_encap_ops { ++ size_t (*encap_hlen)(struct ip_tunnel_encap *); ++ int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); ++}; ++ ++struct snmp_mib { ++ const char *name; ++ int entry; ++}; ++ ++struct fib4_rule { ++ struct fib_rule common; ++ u8 dst_len; ++ u8 src_len; ++ u8 tos; ++ __be32 src; ++ __be32 srcmask; ++ __be32 dst; ++ __be32 dstmask; ++ u32 tclassid; ++}; ++ ++enum { ++ PIM_TYPE_HELLO = 0, ++ PIM_TYPE_REGISTER = 1, ++ PIM_TYPE_REGISTER_STOP = 2, ++ PIM_TYPE_JOIN_PRUNE = 3, ++ PIM_TYPE_BOOTSTRAP = 4, ++ PIM_TYPE_ASSERT = 5, ++ PIM_TYPE_GRAFT = 6, ++ PIM_TYPE_GRAFT_ACK = 7, ++ PIM_TYPE_CANDIDATE_RP_ADV = 8, ++}; ++ ++struct pimreghdr { ++ __u8 type; ++ __u8 reserved; ++ __be16 csum; ++ __be32 flags; ++}; ++ ++typedef short unsigned int vifi_t; ++ ++struct vifctl { ++ vifi_t vifc_vifi; ++ unsigned char vifc_flags; ++ unsigned char vifc_threshold; ++ unsigned int vifc_rate_limit; ++ union { ++ struct in_addr vifc_lcl_addr; ++ int vifc_lcl_ifindex; ++ }; ++ struct in_addr vifc_rmt_addr; ++}; ++ ++struct mfcctl { ++ struct in_addr mfcc_origin; ++ struct in_addr mfcc_mcastgrp; ++ vifi_t mfcc_parent; ++ unsigned char mfcc_ttls[32]; ++ unsigned int mfcc_pkt_cnt; ++ unsigned int mfcc_byte_cnt; ++ unsigned int mfcc_wrong_if; ++ int mfcc_expire; ++}; ++ ++struct sioc_sg_req { ++ struct in_addr src; ++ struct in_addr grp; ++ long unsigned int pktcnt; ++ long unsigned int bytecnt; ++ long unsigned int wrong_if; ++}; ++ ++struct sioc_vif_req { ++ vifi_t vifi; ++ long unsigned int icount; ++ long unsigned int ocount; ++ long unsigned int ibytes; ++ long unsigned int obytes; ++}; ++ ++struct igmpmsg { ++ __u32 unused1; ++ __u32 unused2; ++ unsigned char im_msgtype; ++ unsigned char im_mbz; ++ unsigned char im_vif; ++ unsigned char unused3; ++ struct in_addr im_src; ++ struct in_addr im_dst; ++}; ++ ++enum { ++ IPMRA_TABLE_UNSPEC = 0, ++ IPMRA_TABLE_ID = 1, ++ IPMRA_TABLE_CACHE_RES_QUEUE_LEN = 2, ++ IPMRA_TABLE_MROUTE_REG_VIF_NUM = 3, ++ IPMRA_TABLE_MROUTE_DO_ASSERT = 4, ++ IPMRA_TABLE_MROUTE_DO_PIM = 5, ++ IPMRA_TABLE_VIFS = 6, ++ IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE = 7, ++ __IPMRA_TABLE_MAX = 8, ++}; ++ ++enum { ++ IPMRA_VIF_UNSPEC = 0, ++ IPMRA_VIF = 1, ++ __IPMRA_VIF_MAX = 2, ++}; ++ ++enum { ++ IPMRA_VIFA_UNSPEC = 0, ++ IPMRA_VIFA_IFINDEX = 1, ++ IPMRA_VIFA_VIF_ID = 2, ++ IPMRA_VIFA_FLAGS = 3, ++ IPMRA_VIFA_BYTES_IN = 4, ++ IPMRA_VIFA_BYTES_OUT = 5, ++ IPMRA_VIFA_PACKETS_IN = 6, ++ IPMRA_VIFA_PACKETS_OUT = 7, ++ IPMRA_VIFA_LOCAL_ADDR = 8, ++ IPMRA_VIFA_REMOTE_ADDR = 9, ++ IPMRA_VIFA_PAD = 10, ++ __IPMRA_VIFA_MAX = 11, ++}; ++ ++enum { ++ IPMRA_CREPORT_UNSPEC = 0, ++ IPMRA_CREPORT_MSGTYPE = 1, ++ IPMRA_CREPORT_VIF_ID = 2, ++ IPMRA_CREPORT_SRC_ADDR = 3, ++ IPMRA_CREPORT_DST_ADDR = 4, ++ IPMRA_CREPORT_PKT = 5, ++ __IPMRA_CREPORT_MAX = 6, ++}; ++ ++struct vif_device { ++ struct net_device *dev; ++ long unsigned int bytes_in; ++ long unsigned int bytes_out; ++ long unsigned int pkt_in; ++ long unsigned int pkt_out; ++ long unsigned int rate_limit; ++ unsigned char threshold; ++ short unsigned int flags; ++ int link; ++ struct netdev_phys_item_id dev_parent_id; ++ __be32 local; ++ __be32 remote; ++}; ++ ++struct vif_entry_notifier_info { ++ struct fib_notifier_info info; ++ struct net_device *dev; ++ short unsigned int vif_index; ++ short unsigned int vif_flags; ++ u32 tb_id; ++}; ++ ++enum { ++ MFC_STATIC = 1, ++ MFC_OFFLOAD = 2, ++}; ++ ++struct mr_mfc { ++ struct rhlist_head mnode; ++ short unsigned int mfc_parent; ++ int mfc_flags; ++ union { ++ struct { ++ long unsigned int expires; ++ struct sk_buff_head unresolved; ++ } unres; ++ struct { ++ long unsigned int last_assert; ++ int minvif; ++ int maxvif; ++ long unsigned int bytes; ++ long unsigned int pkt; ++ long unsigned int wrong_if; ++ long unsigned int lastuse; ++ unsigned char ttls[32]; ++ refcount_t refcount; ++ } res; ++ } mfc_un; ++ struct list_head list; ++ struct callback_head rcu; ++ void (*free)(struct callback_head *); ++}; ++ ++struct mfc_entry_notifier_info { ++ struct fib_notifier_info info; ++ struct mr_mfc *mfc; ++ u32 tb_id; ++}; ++ ++struct mr_table_ops { ++ const struct rhashtable_params *rht_params; ++ void *cmparg_any; ++}; ++ ++struct mr_table { ++ struct list_head list; ++ possible_net_t net; ++ struct mr_table_ops ops; ++ u32 id; ++ struct sock *mroute_sk; ++ struct timer_list ipmr_expire_timer; ++ struct list_head mfc_unres_queue; ++ struct vif_device vif_table[32]; ++ struct rhltable mfc_hash; ++ struct list_head mfc_cache_list; ++ int maxvif; ++ atomic_t cache_resolve_queue_len; ++ bool mroute_do_assert; ++ bool mroute_do_pim; ++ bool mroute_do_wrvifwhole; ++ int mroute_reg_vif_num; ++}; ++ ++struct mr_vif_iter { ++ struct seq_net_private p; ++ struct mr_table *mrt; ++ int ct; ++}; ++ ++struct mr_mfc_iter { ++ struct seq_net_private p; ++ struct mr_table *mrt; ++ struct list_head *cache; ++ spinlock_t *lock; ++}; ++ ++struct mfc_cache_cmp_arg { ++ __be32 mfc_mcastgrp; ++ __be32 mfc_origin; ++}; ++ ++struct mfc_cache { ++ struct mr_mfc _c; ++ union { ++ struct { ++ __be32 mfc_mcastgrp; ++ __be32 mfc_origin; ++ }; ++ struct mfc_cache_cmp_arg cmparg; ++ }; ++}; ++ ++struct ip_tunnel_parm { ++ char name[16]; ++ int link; ++ __be16 i_flags; ++ __be16 o_flags; ++ __be32 i_key; ++ __be32 o_key; ++ struct iphdr iph; ++}; ++ ++struct ipmr_result { ++ struct mr_table *mrt; ++}; ++ ++struct compat_sioc_sg_req { ++ struct in_addr src; ++ struct in_addr grp; ++ compat_ulong_t pktcnt; ++ compat_ulong_t bytecnt; ++ compat_ulong_t wrong_if; ++}; ++ ++struct compat_sioc_vif_req { ++ vifi_t vifi; ++ compat_ulong_t icount; ++ compat_ulong_t ocount; ++ compat_ulong_t ibytes; ++ compat_ulong_t obytes; ++}; ++ ++struct rta_mfc_stats { ++ __u64 mfcs_packets; ++ __u64 mfcs_bytes; ++ __u64 mfcs_wrong_if; ++}; ++ ++struct bictcp { ++ u32 cnt; ++ u32 last_max_cwnd; ++ u32 last_cwnd; ++ u32 last_time; ++ u32 bic_origin_point; ++ u32 bic_K; ++ u32 delay_min; ++ u32 epoch_start; ++ u32 ack_cnt; ++ u32 tcp_cwnd; ++ u16 unused; ++ u8 sample_cnt; ++ u8 found; ++ u32 round_start; ++ u32 end_seq; ++ u32 last_ack; ++ u32 curr_rtt; ++}; ++ ++struct netlbl_audit { ++ u32 secid; ++ kuid_t loginuid; ++ unsigned int sessionid; ++}; ++ ++struct cipso_v4_std_map_tbl { ++ struct { ++ u32 *cipso; ++ u32 *local; ++ u32 cipso_size; ++ u32 local_size; ++ } lvl; ++ struct { ++ u32 *cipso; ++ u32 *local; ++ u32 cipso_size; ++ u32 local_size; ++ } cat; ++}; ++ ++struct cipso_v4_doi { ++ u32 doi; ++ u32 type; ++ union { ++ struct cipso_v4_std_map_tbl *std; ++ } map; ++ u8 tags[5]; ++ refcount_t refcount; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct cipso_v4_map_cache_bkt { ++ spinlock_t lock; ++ u32 size; ++ struct list_head list; ++}; ++ ++struct cipso_v4_map_cache_entry { ++ u32 hash; ++ unsigned char *key; ++ size_t key_len; ++ struct netlbl_lsm_cache *lsm_data; ++ u32 activity; ++ struct list_head list; ++}; ++ ++struct xfrm_policy_afinfo { ++ struct dst_ops *dst_ops; ++ struct dst_entry * (*dst_lookup)(struct net *, int, int, const xfrm_address_t *, const xfrm_address_t *, u32); ++ int (*get_saddr)(struct net *, int, xfrm_address_t *, xfrm_address_t *, u32); ++ void (*decode_session)(struct sk_buff *, struct flowi *, int); ++ int (*get_tos)(const struct flowi *); ++ int (*init_path)(struct xfrm_dst *, struct dst_entry *, int); ++ int (*fill_dst)(struct xfrm_dst *, struct net_device *, const struct flowi *); ++ struct dst_entry * (*blackhole_route)(struct net *, struct dst_entry *); ++}; ++ ++struct ip_tunnel; ++ ++struct ip6_tnl; ++ ++struct xfrm_tunnel_skb_cb { ++ union { ++ struct inet_skb_parm h4; ++ struct inet6_skb_parm h6; ++ } header; ++ union { ++ struct ip_tunnel *ip4; ++ struct ip6_tnl *ip6; ++ } tunnel; ++}; ++ ++struct xfrm_mode_skb_cb { ++ struct xfrm_tunnel_skb_cb header; ++ __be16 id; ++ __be16 frag_off; ++ u8 ihl; ++ u8 tos; ++ u8 ttl; ++ u8 protocol; ++ u8 optlen; ++ u8 flow_lbl[3]; ++}; ++ ++struct xfrm_spi_skb_cb { ++ struct xfrm_tunnel_skb_cb header; ++ unsigned int daddroff; ++ unsigned int family; ++ __be32 seq; ++}; ++ ++struct xfrm_input_afinfo { ++ unsigned int family; ++ int (*callback)(struct sk_buff *, u8, int); ++}; ++ ++struct xfrm4_protocol { ++ int (*handler)(struct sk_buff *); ++ int (*input_handler)(struct sk_buff *, int, __be32, int); ++ int (*cb_handler)(struct sk_buff *, int); ++ int (*err_handler)(struct sk_buff *, u32); ++ struct xfrm4_protocol *next; ++ int priority; ++}; ++ ++enum { ++ XFRM_STATE_VOID = 0, ++ XFRM_STATE_ACQ = 1, ++ XFRM_STATE_VALID = 2, ++ XFRM_STATE_ERROR = 3, ++ XFRM_STATE_EXPIRED = 4, ++ XFRM_STATE_DEAD = 5, ++}; ++ ++struct xfrm_if; ++ ++struct xfrm_if_cb { ++ struct xfrm_if * (*decode_session)(struct sk_buff *, short unsigned int); ++}; ++ ++struct xfrm_if_parms { ++ int link; ++ u32 if_id; ++}; ++ ++struct xfrm_if { ++ struct xfrm_if *next; ++ struct net_device *dev; ++ struct net *net; ++ struct xfrm_if_parms p; ++ struct gro_cells gro_cells; ++}; ++ ++struct xfrm_policy_walk { ++ struct xfrm_policy_walk_entry walk; ++ u8 type; ++ u32 seq; ++}; ++ ++struct xfrm_kmaddress { ++ xfrm_address_t local; ++ xfrm_address_t remote; ++ u32 reserved; ++ u16 family; ++}; ++ ++struct xfrm_migrate { ++ xfrm_address_t old_daddr; ++ xfrm_address_t old_saddr; ++ xfrm_address_t new_daddr; ++ xfrm_address_t new_saddr; ++ u8 proto; ++ u8 mode; ++ u16 reserved; ++ u32 reqid; ++ u16 old_family; ++ u16 new_family; ++}; ++ ++struct xfrmk_spdinfo { ++ u32 incnt; ++ u32 outcnt; ++ u32 fwdcnt; ++ u32 inscnt; ++ u32 outscnt; ++ u32 fwdscnt; ++ u32 spdhcnt; ++ u32 spdhmcnt; ++}; ++ ++struct xfrm_flo { ++ struct dst_entry *dst_orig; ++ u8 flags; ++}; ++ ++enum xfrm_ae_ftype_t { ++ XFRM_AE_UNSPEC = 0, ++ XFRM_AE_RTHR = 1, ++ XFRM_AE_RVAL = 2, ++ XFRM_AE_LVAL = 4, ++ XFRM_AE_ETHR = 8, ++ XFRM_AE_CR = 16, ++ XFRM_AE_CE = 32, ++ XFRM_AE_CU = 64, ++ __XFRM_AE_MAX = 65, ++}; ++ ++enum xfrm_attr_type_t { ++ XFRMA_UNSPEC = 0, ++ XFRMA_ALG_AUTH = 1, ++ XFRMA_ALG_CRYPT = 2, ++ XFRMA_ALG_COMP = 3, ++ XFRMA_ENCAP = 4, ++ XFRMA_TMPL = 5, ++ XFRMA_SA = 6, ++ XFRMA_POLICY = 7, ++ XFRMA_SEC_CTX = 8, ++ XFRMA_LTIME_VAL = 9, ++ XFRMA_REPLAY_VAL = 10, ++ XFRMA_REPLAY_THRESH = 11, ++ XFRMA_ETIMER_THRESH = 12, ++ XFRMA_SRCADDR = 13, ++ XFRMA_COADDR = 14, ++ XFRMA_LASTUSED = 15, ++ XFRMA_POLICY_TYPE = 16, ++ XFRMA_MIGRATE = 17, ++ XFRMA_ALG_AEAD = 18, ++ XFRMA_KMADDRESS = 19, ++ XFRMA_ALG_AUTH_TRUNC = 20, ++ XFRMA_MARK = 21, ++ XFRMA_TFCPAD = 22, ++ XFRMA_REPLAY_ESN_VAL = 23, ++ XFRMA_SA_EXTRA_FLAGS = 24, ++ XFRMA_PROTO = 25, ++ XFRMA_ADDRESS_FILTER = 26, ++ XFRMA_PAD = 27, ++ XFRMA_OFFLOAD_DEV = 28, ++ XFRMA_SET_MARK = 29, ++ XFRMA_SET_MARK_MASK = 30, ++ XFRMA_IF_ID = 31, ++ __XFRMA_MAX = 32, ++}; ++ ++enum xfrm_nlgroups { ++ XFRMNLGRP_NONE = 0, ++ XFRMNLGRP_ACQUIRE = 1, ++ XFRMNLGRP_EXPIRE = 2, ++ XFRMNLGRP_SA = 3, ++ XFRMNLGRP_POLICY = 4, ++ XFRMNLGRP_AEVENTS = 5, ++ XFRMNLGRP_REPORT = 6, ++ XFRMNLGRP_MIGRATE = 7, ++ XFRMNLGRP_MAPPING = 8, ++ __XFRMNLGRP_MAX = 9, ++}; ++ ++struct km_event { ++ union { ++ u32 hard; ++ u32 proto; ++ u32 byid; ++ u32 aevent; ++ u32 type; ++ } data; ++ u32 seq; ++ u32 portid; ++ u32 event; ++ struct net *net; ++}; ++ ++enum { ++ XFRM_MODE_FLAG_TUNNEL = 1, ++}; ++ ++struct xfrm_mgr { ++ struct list_head list; ++ int (*notify)(struct xfrm_state *, const struct km_event *); ++ int (*acquire)(struct xfrm_state *, struct xfrm_tmpl *, struct xfrm_policy *); ++ struct xfrm_policy * (*compile_policy)(struct sock *, int, u8 *, int, int *); ++ int (*new_mapping)(struct xfrm_state *, xfrm_address_t *, __be16); ++ int (*notify_policy)(struct xfrm_policy *, int, const struct km_event *); ++ int (*report)(struct net *, u8, struct xfrm_selector *, xfrm_address_t *); ++ int (*migrate)(const struct xfrm_selector *, u8, u8, const struct xfrm_migrate *, int, const struct xfrm_kmaddress *, const struct xfrm_encap_tmpl *); ++ bool (*is_alive)(const struct km_event *); ++}; ++ ++struct xfrmk_sadinfo { ++ u32 sadhcnt; ++ u32 sadhmcnt; ++ u32 sadcnt; ++}; ++ ++struct ip_tunnel_6rd_parm { ++ struct in6_addr prefix; ++ __be32 relay_prefix; ++ u16 prefixlen; ++ u16 relay_prefixlen; ++}; ++ ++struct ip_tunnel_prl_entry; ++ ++struct ip_tunnel { ++ struct ip_tunnel *next; ++ struct hlist_node hash_node; ++ struct net_device *dev; ++ struct net *net; ++ long unsigned int err_time; ++ int err_count; ++ u32 i_seqno; ++ u32 o_seqno; ++ int tun_hlen; ++ u32 index; ++ u8 erspan_ver; ++ u8 dir; ++ u16 hwid; ++ struct dst_cache dst_cache; ++ struct ip_tunnel_parm parms; ++ int mlink; ++ int encap_hlen; ++ int hlen; ++ struct ip_tunnel_encap encap; ++ struct ip_tunnel_6rd_parm ip6rd; ++ struct ip_tunnel_prl_entry *prl; ++ unsigned int prl_count; ++ unsigned int ip_tnl_net_id; ++ struct gro_cells gro_cells; ++ __u32 fwmark; ++ bool collect_md; ++ bool ignore_df; ++}; ++ ++struct __ip6_tnl_parm { ++ char name[16]; ++ int link; ++ __u8 proto; ++ __u8 encap_limit; ++ __u8 hop_limit; ++ bool collect_md; ++ __be32 flowinfo; ++ __u32 flags; ++ struct in6_addr laddr; ++ struct in6_addr raddr; ++ __be16 i_flags; ++ __be16 o_flags; ++ __be32 i_key; ++ __be32 o_key; ++ __u32 fwmark; ++ __u32 index; ++ __u8 erspan_ver; ++ __u8 dir; ++ __u16 hwid; ++}; ++ ++struct ip6_tnl { ++ struct ip6_tnl *next; ++ struct net_device *dev; ++ struct net *net; ++ struct __ip6_tnl_parm parms; ++ struct flowi fl; ++ struct dst_cache dst_cache; ++ struct gro_cells gro_cells; ++ int err_count; ++ long unsigned int err_time; ++ __u32 i_seqno; ++ __u32 o_seqno; ++ int hlen; ++ int tun_hlen; ++ int encap_hlen; ++ struct ip_tunnel_encap encap; ++ int mlink; ++}; ++ ++struct xfrm_skb_cb { ++ struct xfrm_tunnel_skb_cb header; ++ union { ++ struct { ++ __u32 low; ++ __u32 hi; ++ } output; ++ struct { ++ __be32 low; ++ __be32 hi; ++ } input; ++ } seq; ++}; ++ ++struct ip_tunnel_prl_entry { ++ struct ip_tunnel_prl_entry *next; ++ __be32 addr; ++ u16 flags; ++ struct callback_head callback_head; ++}; ++ ++struct xfrm_trans_tasklet { ++ struct tasklet_struct tasklet; ++ struct sk_buff_head queue; ++}; ++ ++struct xfrm_trans_cb { ++ union { ++ struct inet_skb_parm h4; ++ struct inet6_skb_parm h6; ++ } header; ++ int (*finish)(struct net *, struct sock *, struct sk_buff *); ++}; ++ ++struct xfrm_user_offload { ++ int ifindex; ++ __u8 flags; ++}; ++ ++struct sadb_alg { ++ __u8 sadb_alg_id; ++ __u8 sadb_alg_ivlen; ++ __u16 sadb_alg_minbits; ++ __u16 sadb_alg_maxbits; ++ __u16 sadb_alg_reserved; ++}; ++ ++struct xfrm_algo_aead_info { ++ char *geniv; ++ u16 icv_truncbits; ++}; ++ ++struct xfrm_algo_auth_info { ++ u16 icv_truncbits; ++ u16 icv_fullbits; ++}; ++ ++struct xfrm_algo_encr_info { ++ char *geniv; ++ u16 blockbits; ++ u16 defkeybits; ++}; ++ ++struct xfrm_algo_comp_info { ++ u16 threshold; ++}; ++ ++struct xfrm_algo_desc { ++ char *name; ++ char *compat; ++ u8 available: 1; ++ u8 pfkey_supported: 1; ++ union { ++ struct xfrm_algo_aead_info aead; ++ struct xfrm_algo_auth_info auth; ++ struct xfrm_algo_encr_info encr; ++ struct xfrm_algo_comp_info comp; ++ } uinfo; ++ struct sadb_alg desc; ++}; ++ ++struct xfrm_algo_list { ++ struct xfrm_algo_desc *algs; ++ int entries; ++ u32 type; ++ u32 mask; ++}; ++ ++struct xfrm_aead_name { ++ const char *name; ++ int icvbits; ++}; ++ ++enum { ++ XFRM_SHARE_ANY = 0, ++ XFRM_SHARE_SESSION = 1, ++ XFRM_SHARE_USER = 2, ++ XFRM_SHARE_UNIQUE = 3, ++}; ++ ++struct xfrm_user_tmpl { ++ struct xfrm_id id; ++ __u16 family; ++ xfrm_address_t saddr; ++ __u32 reqid; ++ __u8 mode; ++ __u8 share; ++ __u8 optional; ++ __u32 aalgos; ++ __u32 ealgos; ++ __u32 calgos; ++}; ++ ++struct xfrm_userpolicy_type { ++ __u8 type; ++ __u16 reserved1; ++ __u8 reserved2; ++}; ++ ++enum xfrm_sadattr_type_t { ++ XFRMA_SAD_UNSPEC = 0, ++ XFRMA_SAD_CNT = 1, ++ XFRMA_SAD_HINFO = 2, ++ __XFRMA_SAD_MAX = 3, ++}; ++ ++struct xfrmu_sadhinfo { ++ __u32 sadhcnt; ++ __u32 sadhmcnt; ++}; ++ ++enum xfrm_spdattr_type_t { ++ XFRMA_SPD_UNSPEC = 0, ++ XFRMA_SPD_INFO = 1, ++ XFRMA_SPD_HINFO = 2, ++ XFRMA_SPD_IPV4_HTHRESH = 3, ++ XFRMA_SPD_IPV6_HTHRESH = 4, ++ __XFRMA_SPD_MAX = 5, ++}; ++ ++struct xfrmu_spdinfo { ++ __u32 incnt; ++ __u32 outcnt; ++ __u32 fwdcnt; ++ __u32 inscnt; ++ __u32 outscnt; ++ __u32 fwdscnt; ++}; ++ ++struct xfrmu_spdhinfo { ++ __u32 spdhcnt; ++ __u32 spdhmcnt; ++}; ++ ++struct xfrmu_spdhthresh { ++ __u8 lbits; ++ __u8 rbits; ++}; ++ ++struct xfrm_usersa_info { ++ struct xfrm_selector sel; ++ struct xfrm_id id; ++ xfrm_address_t saddr; ++ struct xfrm_lifetime_cfg lft; ++ struct xfrm_lifetime_cur curlft; ++ struct xfrm_stats stats; ++ __u32 seq; ++ __u32 reqid; ++ __u16 family; ++ __u8 mode; ++ __u8 replay_window; ++ __u8 flags; ++}; ++ ++struct xfrm_usersa_id { ++ xfrm_address_t daddr; ++ __be32 spi; ++ __u16 family; ++ __u8 proto; ++}; ++ ++struct xfrm_aevent_id { ++ struct xfrm_usersa_id sa_id; ++ xfrm_address_t saddr; ++ __u32 flags; ++ __u32 reqid; ++}; ++ ++struct xfrm_userspi_info { ++ struct xfrm_usersa_info info; ++ __u32 min; ++ __u32 max; ++}; ++ ++struct xfrm_userpolicy_info { ++ struct xfrm_selector sel; ++ struct xfrm_lifetime_cfg lft; ++ struct xfrm_lifetime_cur curlft; ++ __u32 priority; ++ __u32 index; ++ __u8 dir; ++ __u8 action; ++ __u8 flags; ++ __u8 share; ++}; ++ ++struct xfrm_userpolicy_id { ++ struct xfrm_selector sel; ++ __u32 index; ++ __u8 dir; ++}; ++ ++struct xfrm_user_acquire { ++ struct xfrm_id id; ++ xfrm_address_t saddr; ++ struct xfrm_selector sel; ++ struct xfrm_userpolicy_info policy; ++ __u32 aalgos; ++ __u32 ealgos; ++ __u32 calgos; ++ __u32 seq; ++}; ++ ++struct xfrm_user_expire { ++ struct xfrm_usersa_info state; ++ __u8 hard; ++}; ++ ++struct xfrm_user_polexpire { ++ struct xfrm_userpolicy_info pol; ++ __u8 hard; ++}; ++ ++struct xfrm_usersa_flush { ++ __u8 proto; ++}; ++ ++struct xfrm_user_report { ++ __u8 proto; ++ struct xfrm_selector sel; ++}; ++ ++struct xfrm_user_kmaddress { ++ xfrm_address_t local; ++ xfrm_address_t remote; ++ __u32 reserved; ++ __u16 family; ++}; ++ ++struct xfrm_user_migrate { ++ xfrm_address_t old_daddr; ++ xfrm_address_t old_saddr; ++ xfrm_address_t new_daddr; ++ xfrm_address_t new_saddr; ++ __u8 proto; ++ __u8 mode; ++ __u16 reserved; ++ __u32 reqid; ++ __u16 old_family; ++ __u16 new_family; ++}; ++ ++struct xfrm_user_mapping { ++ struct xfrm_usersa_id id; ++ __u32 reqid; ++ xfrm_address_t old_saddr; ++ xfrm_address_t new_saddr; ++ __be16 old_sport; ++ __be16 new_sport; ++}; ++ ++struct xfrm_dump_info { ++ struct sk_buff *in_skb; ++ struct sk_buff *out_skb; ++ u32 nlmsg_seq; ++ u16 nlmsg_flags; ++}; ++ ++struct xfrm_link { ++ int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); ++ int (*start)(struct netlink_callback *); ++ int (*dump)(struct sk_buff *, struct netlink_callback *); ++ int (*done)(struct netlink_callback *); ++ const struct nla_policy *nla_pol; ++ int nla_max; ++}; ++ ++struct unix_skb_parms { ++ struct pid *pid; ++ kuid_t uid; ++ kgid_t gid; ++ struct scm_fp_list *fp; ++ u32 secid; ++ u32 consumed; ++}; ++ ++struct unix_stream_read_state { ++ int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); ++ struct socket *socket; ++ struct msghdr *msg; ++ struct pipe_inode_info *pipe; ++ size_t size; ++ int flags; ++ unsigned int splice_flags; ++}; ++ ++struct ac6_iter_state { ++ struct seq_net_private p; ++ struct net_device *dev; ++ struct inet6_dev *idev; ++}; ++ ++struct ipcm6_cookie { ++ struct sockcm_cookie sockc; ++ __s16 hlimit; ++ __s16 tclass; ++ __s8 dontfrag; ++ struct ipv6_txoptions *opt; ++ __u16 gso_size; ++}; ++ ++enum { ++ IFLA_INET6_UNSPEC = 0, ++ IFLA_INET6_FLAGS = 1, ++ IFLA_INET6_CONF = 2, ++ IFLA_INET6_STATS = 3, ++ IFLA_INET6_MCAST = 4, ++ IFLA_INET6_CACHEINFO = 5, ++ IFLA_INET6_ICMP6STATS = 6, ++ IFLA_INET6_TOKEN = 7, ++ IFLA_INET6_ADDR_GEN_MODE = 8, ++ __IFLA_INET6_MAX = 9, ++}; ++ ++enum in6_addr_gen_mode { ++ IN6_ADDR_GEN_MODE_EUI64 = 0, ++ IN6_ADDR_GEN_MODE_NONE = 1, ++ IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, ++ IN6_ADDR_GEN_MODE_RANDOM = 3, ++}; ++ ++struct ifla_cacheinfo { ++ __u32 max_reasm_len; ++ __u32 tstamp; ++ __u32 reachable_time; ++ __u32 retrans_time; ++}; ++ ++struct wpan_phy; ++ ++struct wpan_dev_header_ops; ++ ++struct wpan_dev { ++ struct wpan_phy *wpan_phy; ++ int iftype; ++ struct list_head list; ++ struct net_device *netdev; ++ const struct wpan_dev_header_ops *header_ops; ++ struct net_device *lowpan_dev; ++ u32 identifier; ++ __le16 pan_id; ++ __le16 short_addr; ++ __le64 extended_addr; ++ atomic_t bsn; ++ atomic_t dsn; ++ u8 min_be; ++ u8 max_be; ++ u8 csma_retries; ++ s8 frame_retries; ++ bool lbt; ++ bool promiscuous_mode; ++ bool ackreq; ++}; ++ ++struct prefixmsg { ++ unsigned char prefix_family; ++ unsigned char prefix_pad1; ++ short unsigned int prefix_pad2; ++ int prefix_ifindex; ++ unsigned char prefix_type; ++ unsigned char prefix_len; ++ unsigned char prefix_flags; ++ unsigned char prefix_pad3; ++}; ++ ++enum { ++ PREFIX_UNSPEC = 0, ++ PREFIX_ADDRESS = 1, ++ PREFIX_CACHEINFO = 2, ++ __PREFIX_MAX = 3, ++}; ++ ++struct prefix_cacheinfo { ++ __u32 preferred_time; ++ __u32 valid_time; ++}; ++ ++struct in6_ifreq { ++ struct in6_addr ifr6_addr; ++ __u32 ifr6_prefixlen; ++ int ifr6_ifindex; ++}; ++ ++enum { ++ DEVCONF_FORWARDING = 0, ++ DEVCONF_HOPLIMIT = 1, ++ DEVCONF_MTU6 = 2, ++ DEVCONF_ACCEPT_RA = 3, ++ DEVCONF_ACCEPT_REDIRECTS = 4, ++ DEVCONF_AUTOCONF = 5, ++ DEVCONF_DAD_TRANSMITS = 6, ++ DEVCONF_RTR_SOLICITS = 7, ++ DEVCONF_RTR_SOLICIT_INTERVAL = 8, ++ DEVCONF_RTR_SOLICIT_DELAY = 9, ++ DEVCONF_USE_TEMPADDR = 10, ++ DEVCONF_TEMP_VALID_LFT = 11, ++ DEVCONF_TEMP_PREFERED_LFT = 12, ++ DEVCONF_REGEN_MAX_RETRY = 13, ++ DEVCONF_MAX_DESYNC_FACTOR = 14, ++ DEVCONF_MAX_ADDRESSES = 15, ++ DEVCONF_FORCE_MLD_VERSION = 16, ++ DEVCONF_ACCEPT_RA_DEFRTR = 17, ++ DEVCONF_ACCEPT_RA_PINFO = 18, ++ DEVCONF_ACCEPT_RA_RTR_PREF = 19, ++ DEVCONF_RTR_PROBE_INTERVAL = 20, ++ DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, ++ DEVCONF_PROXY_NDP = 22, ++ DEVCONF_OPTIMISTIC_DAD = 23, ++ DEVCONF_ACCEPT_SOURCE_ROUTE = 24, ++ DEVCONF_MC_FORWARDING = 25, ++ DEVCONF_DISABLE_IPV6 = 26, ++ DEVCONF_ACCEPT_DAD = 27, ++ DEVCONF_FORCE_TLLAO = 28, ++ DEVCONF_NDISC_NOTIFY = 29, ++ DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, ++ DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, ++ DEVCONF_SUPPRESS_FRAG_NDISC = 32, ++ DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, ++ DEVCONF_USE_OPTIMISTIC = 34, ++ DEVCONF_ACCEPT_RA_MTU = 35, ++ DEVCONF_STABLE_SECRET = 36, ++ DEVCONF_USE_OIF_ADDRS_ONLY = 37, ++ DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, ++ DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, ++ DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, ++ DEVCONF_DROP_UNSOLICITED_NA = 41, ++ DEVCONF_KEEP_ADDR_ON_DOWN = 42, ++ DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, ++ DEVCONF_SEG6_ENABLED = 44, ++ DEVCONF_SEG6_REQUIRE_HMAC = 45, ++ DEVCONF_ENHANCED_DAD = 46, ++ DEVCONF_ADDR_GEN_MODE = 47, ++ DEVCONF_DISABLE_POLICY = 48, ++ DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, ++ DEVCONF_NDISC_TCLASS = 50, ++ DEVCONF_MAX = 51, ++}; ++ ++enum { ++ INET6_IFADDR_STATE_PREDAD = 0, ++ INET6_IFADDR_STATE_DAD = 1, ++ INET6_IFADDR_STATE_POSTDAD = 2, ++ INET6_IFADDR_STATE_ERRDAD = 3, ++ INET6_IFADDR_STATE_DEAD = 4, ++}; ++ ++enum nl802154_cca_modes { ++ __NL802154_CCA_INVALID = 0, ++ NL802154_CCA_ENERGY = 1, ++ NL802154_CCA_CARRIER = 2, ++ NL802154_CCA_ENERGY_CARRIER = 3, ++ NL802154_CCA_ALOHA = 4, ++ NL802154_CCA_UWB_SHR = 5, ++ NL802154_CCA_UWB_MULTIPLEXED = 6, ++ __NL802154_CCA_ATTR_AFTER_LAST = 7, ++ NL802154_CCA_ATTR_MAX = 6, ++}; ++ ++enum nl802154_cca_opts { ++ NL802154_CCA_OPT_ENERGY_CARRIER_AND = 0, ++ NL802154_CCA_OPT_ENERGY_CARRIER_OR = 1, ++ __NL802154_CCA_OPT_ATTR_AFTER_LAST = 2, ++ NL802154_CCA_OPT_ATTR_MAX = 1, ++}; ++ ++enum nl802154_supported_bool_states { ++ NL802154_SUPPORTED_BOOL_FALSE = 0, ++ NL802154_SUPPORTED_BOOL_TRUE = 1, ++ __NL802154_SUPPORTED_BOOL_INVALD = 2, ++ NL802154_SUPPORTED_BOOL_BOTH = 3, ++ __NL802154_SUPPORTED_BOOL_AFTER_LAST = 4, ++ NL802154_SUPPORTED_BOOL_MAX = 3, ++}; ++ ++struct wpan_phy_supported { ++ u32 channels[32]; ++ u32 cca_modes; ++ u32 cca_opts; ++ u32 iftypes; ++ enum nl802154_supported_bool_states lbt; ++ u8 min_minbe; ++ u8 max_minbe; ++ u8 min_maxbe; ++ u8 max_maxbe; ++ u8 min_csma_backoffs; ++ u8 max_csma_backoffs; ++ s8 min_frame_retries; ++ s8 max_frame_retries; ++ size_t tx_powers_size; ++ size_t cca_ed_levels_size; ++ const s32 *tx_powers; ++ const s32 *cca_ed_levels; ++}; ++ ++struct wpan_phy_cca { ++ enum nl802154_cca_modes mode; ++ enum nl802154_cca_opts opt; ++}; ++ ++struct wpan_phy { ++ const void *privid; ++ u32 flags; ++ u8 current_channel; ++ u8 current_page; ++ struct wpan_phy_supported supported; ++ s32 transmit_power; ++ struct wpan_phy_cca cca; ++ __le64 perm_extended_addr; ++ s32 cca_ed_level; ++ u8 symbol_duration; ++ u16 lifs_period; ++ u16 sifs_period; ++ struct device dev; ++ possible_net_t _net; ++ long: 64; ++ long: 64; ++ char priv[0]; ++}; ++ ++struct ieee802154_addr { ++ u8 mode; ++ __le16 pan_id; ++ union { ++ __le16 short_addr; ++ __le64 extended_addr; ++ }; ++}; ++ ++struct wpan_dev_header_ops { ++ int (*create)(struct sk_buff *, struct net_device *, const struct ieee802154_addr *, const struct ieee802154_addr *, unsigned int); ++}; ++ ++union fwnet_hwaddr { ++ u8 u[16]; ++ struct { ++ __be64 uniq_id; ++ u8 max_rec; ++ u8 sspd; ++ __be16 fifo_hi; ++ __be32 fifo_lo; ++ } uc; ++}; ++ ++struct in6_validator_info { ++ struct in6_addr i6vi_addr; ++ struct inet6_dev *i6vi_dev; ++ struct netlink_ext_ack *extack; ++}; ++ ++struct ifa6_config { ++ const struct in6_addr *pfx; ++ unsigned int plen; ++ const struct in6_addr *peer_pfx; ++ u32 rt_priority; ++ u32 ifa_flags; ++ u32 preferred_lft; ++ u32 valid_lft; ++ u16 scope; ++}; ++ ++struct fib6_config { ++ u32 fc_table; ++ u32 fc_metric; ++ int fc_dst_len; ++ int fc_src_len; ++ int fc_ifindex; ++ u32 fc_flags; ++ u32 fc_protocol; ++ u16 fc_type; ++ u16 fc_delete_all_nh: 1; ++ u16 __unused: 15; ++ struct in6_addr fc_dst; ++ struct in6_addr fc_src; ++ struct in6_addr fc_prefsrc; ++ struct in6_addr fc_gateway; ++ long unsigned int fc_expires; ++ struct nlattr *fc_mx; ++ int fc_mx_len; ++ int fc_mp_len; ++ struct nlattr *fc_mp; ++ struct nl_info fc_nlinfo; ++ struct nlattr *fc_encap; ++ u16 fc_encap_type; ++}; ++ ++enum cleanup_prefix_rt_t { ++ CLEANUP_PREFIX_RT_NOP = 0, ++ CLEANUP_PREFIX_RT_DEL = 1, ++ CLEANUP_PREFIX_RT_EXPIRE = 2, ++}; ++ ++enum { ++ IPV6_SADDR_RULE_INIT = 0, ++ IPV6_SADDR_RULE_LOCAL = 1, ++ IPV6_SADDR_RULE_SCOPE = 2, ++ IPV6_SADDR_RULE_PREFERRED = 3, ++ IPV6_SADDR_RULE_OIF = 4, ++ IPV6_SADDR_RULE_LABEL = 5, ++ IPV6_SADDR_RULE_PRIVACY = 6, ++ IPV6_SADDR_RULE_ORCHID = 7, ++ IPV6_SADDR_RULE_PREFIX = 8, ++ IPV6_SADDR_RULE_NOT_OPTIMISTIC = 9, ++ IPV6_SADDR_RULE_MAX = 10, ++}; ++ ++struct ipv6_saddr_score { ++ int rule; ++ int addr_type; ++ struct inet6_ifaddr *ifa; ++ long unsigned int scorebits[1]; ++ int scopedist; ++ int matchlen; ++}; ++ ++struct ipv6_saddr_dst { ++ const struct in6_addr *addr; ++ int ifindex; ++ int scope; ++ int label; ++ unsigned int prefs; ++}; ++ ++struct if6_iter_state { ++ struct seq_net_private p; ++ int bucket; ++ int offset; ++}; ++ ++enum addr_type_t { ++ UNICAST_ADDR = 0, ++ MULTICAST_ADDR = 1, ++ ANYCAST_ADDR = 2, ++}; ++ ++enum { ++ DAD_PROCESS = 0, ++ DAD_BEGIN = 1, ++ DAD_ABORT = 2, ++}; ++ ++struct ifaddrlblmsg { ++ __u8 ifal_family; ++ __u8 __ifal_reserved; ++ __u8 ifal_prefixlen; ++ __u8 ifal_flags; ++ __u32 ifal_index; ++ __u32 ifal_seq; ++}; ++ ++enum { ++ IFAL_ADDRESS = 1, ++ IFAL_LABEL = 2, ++ __IFAL_MAX = 3, ++}; ++ ++struct ip6addrlbl_entry { ++ struct in6_addr prefix; ++ int prefixlen; ++ int ifindex; ++ int addrtype; ++ u32 label; ++ struct hlist_node list; ++ struct callback_head rcu; ++}; ++ ++struct ip6addrlbl_init_table { ++ const struct in6_addr *prefix; ++ int prefixlen; ++ u32 label; ++}; ++ ++struct rd_msg { ++ struct icmp6hdr icmph; ++ struct in6_addr target; ++ struct in6_addr dest; ++ __u8 opt[0]; ++}; ++ ++struct fib6_gc_args { ++ int timeout; ++ int more; ++}; ++ ++struct rt6_exception { ++ struct hlist_node hlist; ++ struct rt6_info *rt6i; ++ long unsigned int stamp; ++ struct callback_head rcu; ++}; ++ ++struct route_info { ++ __u8 type; ++ __u8 length; ++ __u8 prefix_len; ++ __u8 reserved_l: 3; ++ __u8 route_pref: 2; ++ __u8 reserved_h: 3; ++ __be32 lifetime; ++ __u8 prefix[0]; ++}; ++ ++struct rt6_rtnl_dump_arg { ++ struct sk_buff *skb; ++ struct netlink_callback *cb; ++ struct net *net; ++}; ++ ++struct netevent_redirect { ++ struct dst_entry *old; ++ struct dst_entry *new; ++ struct neighbour *neigh; ++ const void *daddr; ++}; ++ ++struct trace_event_raw_fib6_table_lookup { ++ struct trace_entry ent; ++ u32 tb_id; ++ int err; ++ int oif; ++ int iif; ++ __u8 tos; ++ __u8 scope; ++ __u8 flags; ++ __u8 src[16]; ++ __u8 dst[16]; ++ u16 sport; ++ u16 dport; ++ u8 proto; ++ u8 rt_type; ++ u32 __data_loc_name; ++ __u8 gw[16]; ++ char __data[0]; ++}; ++ ++struct trace_event_data_offsets_fib6_table_lookup { ++ u32 name; ++}; ++ ++enum rt6_nud_state { ++ RT6_NUD_FAIL_HARD = 4294967293, ++ RT6_NUD_FAIL_PROBE = 4294967294, ++ RT6_NUD_FAIL_DO_RR = 4294967295, ++ RT6_NUD_SUCCEED = 1, ++}; ++ ++struct __rt6_probe_work { ++ struct work_struct work; ++ struct in6_addr target; ++ struct net_device *dev; ++}; ++ ++struct ip6rd_flowi { ++ struct flowi6 fl6; ++ struct in6_addr gateway; ++}; ++ ++struct arg_dev_net_ip { ++ struct net_device *dev; ++ struct net *net; ++ struct in6_addr *addr; ++}; ++ ++struct arg_netdev_event { ++ const struct net_device *dev; ++ union { ++ unsigned int nh_flags; ++ long unsigned int event; ++ }; ++}; ++ ++struct rt6_mtu_change_arg { ++ struct net_device *dev; ++ unsigned int mtu; ++}; ++ ++struct rt6_nh { ++ struct fib6_info *fib6_info; ++ struct fib6_config r_cfg; ++ struct list_head next; ++}; ++ ++enum fib6_walk_state { ++ FWS_L = 0, ++ FWS_R = 1, ++ FWS_C = 2, ++ FWS_U = 3, ++}; ++ ++struct fib6_walker { ++ struct list_head lh; ++ struct fib6_node *root; ++ struct fib6_node *node; ++ struct fib6_info *leaf; ++ enum fib6_walk_state state; ++ unsigned int skip; ++ unsigned int count; ++ int (*func)(struct fib6_walker *); ++ void *args; ++}; ++ ++struct fib6_entry_notifier_info { ++ struct fib_notifier_info info; ++ struct fib6_info *rt; ++}; ++ ++struct ipv6_route_iter { ++ struct seq_net_private p; ++ struct fib6_walker w; ++ loff_t skip; ++ struct fib6_table *tbl; ++ int sernum; ++}; ++ ++struct fib6_cleaner { ++ struct fib6_walker w; ++ struct net *net; ++ int (*func)(struct fib6_info *, void *); ++ int sernum; ++ void *arg; ++}; ++ ++enum { ++ FIB6_NO_SERNUM_CHANGE = 0, ++}; ++ ++struct fib6_dump_arg { ++ struct net *net; ++ struct notifier_block *nb; ++}; ++ ++struct lookup_args { ++ int offset; ++ const struct in6_addr *addr; ++}; ++ ++struct ipv6_mreq { ++ struct in6_addr ipv6mr_multiaddr; ++ int ipv6mr_ifindex; ++}; ++ ++struct in6_flowlabel_req { ++ struct in6_addr flr_dst; ++ __be32 flr_label; ++ __u8 flr_action; ++ __u8 flr_share; ++ __u16 flr_flags; ++ __u16 flr_expires; ++ __u16 flr_linger; ++ __u32 __flr_pad; ++}; ++ ++struct ip6_mtuinfo { ++ struct sockaddr_in6 ip6m_addr; ++ __u32 ip6m_mtu; ++}; ++ ++struct nduseroptmsg { ++ unsigned char nduseropt_family; ++ unsigned char nduseropt_pad1; ++ short unsigned int nduseropt_opts_len; ++ int nduseropt_ifindex; ++ __u8 nduseropt_icmp_type; ++ __u8 nduseropt_icmp_code; ++ short unsigned int nduseropt_pad2; ++ unsigned int nduseropt_pad3; ++}; ++ ++enum { ++ NDUSEROPT_UNSPEC = 0, ++ NDUSEROPT_SRCADDR = 1, ++ __NDUSEROPT_MAX = 2, ++}; ++ ++struct nd_msg { ++ struct icmp6hdr icmph; ++ struct in6_addr target; ++ __u8 opt[0]; ++}; ++ ++struct rs_msg { ++ struct icmp6hdr icmph; ++ __u8 opt[0]; ++}; ++ ++struct ra_msg { ++ struct icmp6hdr icmph; ++ __be32 reachable_time; ++ __be32 retrans_timer; ++}; ++ ++struct icmp6_filter { ++ __u32 data[8]; ++}; ++ ++struct raw6_sock { ++ struct inet_sock inet; ++ __u32 checksum; ++ __u32 offset; ++ struct icmp6_filter filter; ++ __u32 ip6mr_table; ++ struct ipv6_pinfo inet6; ++}; ++ ++typedef int mh_filter_t(struct sock *, struct sk_buff *); ++ ++struct raw6_frag_vec { ++ struct msghdr *msg; ++ int hlen; ++ char c[4]; ++}; ++ ++struct ipv6_destopt_hao { ++ __u8 type; ++ __u8 length; ++ struct in6_addr addr; ++} __attribute__((packed)); ++ ++struct icmpv6_msg { ++ struct sk_buff *skb; ++ int offset; ++ uint8_t type; ++}; ++ ++struct icmp6_err { ++ int err; ++ int fatal; ++}; ++ ++struct mld_msg { ++ struct icmp6hdr mld_hdr; ++ struct in6_addr mld_mca; ++}; ++ ++struct mld2_grec { ++ __u8 grec_type; ++ __u8 grec_auxwords; ++ __be16 grec_nsrcs; ++ struct in6_addr grec_mca; ++ struct in6_addr grec_src[0]; ++}; ++ ++struct mld2_report { ++ struct icmp6hdr mld2r_hdr; ++ struct mld2_grec mld2r_grec[0]; ++}; ++ ++struct mld2_query { ++ struct icmp6hdr mld2q_hdr; ++ struct in6_addr mld2q_mca; ++ __u8 mld2q_qrv: 3; ++ __u8 mld2q_suppress: 1; ++ __u8 mld2q_resv2: 4; ++ __u8 mld2q_qqic; ++ __be16 mld2q_nsrcs; ++ struct in6_addr mld2q_srcs[0]; ++}; ++ ++struct igmp6_mc_iter_state { ++ struct seq_net_private p; ++ struct net_device *dev; ++ struct inet6_dev *idev; ++}; ++ ++struct igmp6_mcf_iter_state { ++ struct seq_net_private p; ++ struct net_device *dev; ++ struct inet6_dev *idev; ++ struct ifmcaddr6 *im; ++}; ++ ++enum ip6_defrag_users { ++ IP6_DEFRAG_LOCAL_DELIVER = 0, ++ IP6_DEFRAG_CONNTRACK_IN = 1, ++ __IP6_DEFRAG_CONNTRACK_IN = 65536, ++ IP6_DEFRAG_CONNTRACK_OUT = 65537, ++ __IP6_DEFRAG_CONNTRACK_OUT = 131072, ++ IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, ++ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, ++}; ++ ++struct frag_queue { ++ struct inet_frag_queue q; ++ int iif; ++ __u16 nhoffset; ++ u8 ecn; ++}; ++ ++struct tcp6_sock { ++ struct tcp_sock tcp; ++ struct ipv6_pinfo inet6; ++}; ++ ++struct tcp6_pseudohdr { ++ struct in6_addr saddr; ++ struct in6_addr daddr; ++ __be32 len; ++ __be32 protocol; ++}; ++ ++struct rt0_hdr { ++ struct ipv6_rt_hdr rt_hdr; ++ __u32 reserved; ++ struct in6_addr addr[0]; ++}; ++ ++struct tlvtype_proc { ++ int type; ++ bool (*func)(struct sk_buff *, int); ++}; ++ ++struct ip6fl_iter_state { ++ struct seq_net_private p; ++ struct pid_namespace *pid_ns; ++ int bucket; ++}; ++ ++struct sr6_tlv { ++ __u8 type; ++ __u8 len; ++ __u8 data[0]; ++}; ++ ++enum { ++ SEG6_ATTR_UNSPEC = 0, ++ SEG6_ATTR_DST = 1, ++ SEG6_ATTR_DSTLEN = 2, ++ SEG6_ATTR_HMACKEYID = 3, ++ SEG6_ATTR_SECRET = 4, ++ SEG6_ATTR_SECRETLEN = 5, ++ SEG6_ATTR_ALGID = 6, ++ SEG6_ATTR_HMACINFO = 7, ++ __SEG6_ATTR_MAX = 8, ++}; ++ ++enum { ++ SEG6_CMD_UNSPEC = 0, ++ SEG6_CMD_SETHMAC = 1, ++ SEG6_CMD_DUMPHMAC = 2, ++ SEG6_CMD_SET_TUNSRC = 3, ++ SEG6_CMD_GET_TUNSRC = 4, ++ __SEG6_CMD_MAX = 5, ++}; ++ ++typedef short unsigned int mifi_t; ++ ++typedef __u32 if_mask; ++ ++struct if_set { ++ if_mask ifs_bits[8]; ++}; ++ ++struct mif6ctl { ++ mifi_t mif6c_mifi; ++ unsigned char mif6c_flags; ++ unsigned char vifc_threshold; ++ __u16 mif6c_pifi; ++ unsigned int vifc_rate_limit; ++}; ++ ++struct mf6cctl { ++ struct sockaddr_in6 mf6cc_origin; ++ struct sockaddr_in6 mf6cc_mcastgrp; ++ mifi_t mf6cc_parent; ++ struct if_set mf6cc_ifset; ++}; ++ ++struct sioc_sg_req6 { ++ struct sockaddr_in6 src; ++ struct sockaddr_in6 grp; ++ long unsigned int pktcnt; ++ long unsigned int bytecnt; ++ long unsigned int wrong_if; ++}; ++ ++struct sioc_mif_req6 { ++ mifi_t mifi; ++ long unsigned int icount; ++ long unsigned int ocount; ++ long unsigned int ibytes; ++ long unsigned int obytes; ++}; ++ ++struct mrt6msg { ++ __u8 im6_mbz; ++ __u8 im6_msgtype; ++ __u16 im6_mif; ++ __u32 im6_pad; ++ struct in6_addr im6_src; ++ struct in6_addr im6_dst; ++}; ++ ++enum { ++ IP6MRA_CREPORT_UNSPEC = 0, ++ IP6MRA_CREPORT_MSGTYPE = 1, ++ IP6MRA_CREPORT_MIF_ID = 2, ++ IP6MRA_CREPORT_SRC_ADDR = 3, ++ IP6MRA_CREPORT_DST_ADDR = 4, ++ IP6MRA_CREPORT_PKT = 5, ++ __IP6MRA_CREPORT_MAX = 6, ++}; ++ ++struct mfc6_cache_cmp_arg { ++ struct in6_addr mf6c_mcastgrp; ++ struct in6_addr mf6c_origin; ++}; ++ ++struct mfc6_cache { ++ struct mr_mfc _c; ++ union { ++ struct { ++ struct in6_addr mf6c_mcastgrp; ++ struct in6_addr mf6c_origin; ++ }; ++ struct mfc6_cache_cmp_arg cmparg; ++ }; ++}; ++ ++struct ip6mr_result { ++ struct mr_table *mrt; ++}; ++ ++struct compat_sioc_sg_req6 { ++ struct sockaddr_in6 src; ++ struct sockaddr_in6 grp; ++ compat_ulong_t pktcnt; ++ compat_ulong_t bytecnt; ++ compat_ulong_t wrong_if; ++}; ++ ++struct compat_sioc_mif_req6 { ++ mifi_t mifi; ++ compat_ulong_t icount; ++ compat_ulong_t ocount; ++ compat_ulong_t ibytes; ++ compat_ulong_t obytes; ++}; ++ ++struct ip6_mh { ++ __u8 ip6mh_proto; ++ __u8 ip6mh_hdrlen; ++ __u8 ip6mh_type; ++ __u8 ip6mh_reserved; ++ __u16 ip6mh_cksum; ++ __u8 data[0]; ++}; ++ ++struct xfrm6_protocol { ++ int (*handler)(struct sk_buff *); ++ int (*cb_handler)(struct sk_buff *, int); ++ int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); ++ struct xfrm6_protocol *next; ++ int priority; ++}; ++ ++typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); ++ ++struct fib6_rule { ++ struct fib_rule common; ++ struct rt6key src; ++ struct rt6key dst; ++ u8 tclass; ++}; ++ ++struct calipso_doi; ++ ++struct netlbl_calipso_ops { ++ int (*doi_add)(struct calipso_doi *, struct netlbl_audit *); ++ void (*doi_free)(struct calipso_doi *); ++ int (*doi_remove)(u32, struct netlbl_audit *); ++ struct calipso_doi * (*doi_getdef)(u32); ++ void (*doi_putdef)(struct calipso_doi *); ++ int (*doi_walk)(u32 *, int (*)(struct calipso_doi *, void *), void *); ++ int (*sock_getattr)(struct sock *, struct netlbl_lsm_secattr *); ++ int (*sock_setattr)(struct sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); ++ void (*sock_delattr)(struct sock *); ++ int (*req_setattr)(struct request_sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); ++ void (*req_delattr)(struct request_sock *); ++ int (*opt_getattr)(const unsigned char *, struct netlbl_lsm_secattr *); ++ unsigned char * (*skbuff_optptr)(const struct sk_buff *); ++ int (*skbuff_setattr)(struct sk_buff *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); ++ int (*skbuff_delattr)(struct sk_buff *); ++ void (*cache_invalidate)(); ++ int (*cache_add)(const unsigned char *, const struct netlbl_lsm_secattr *); ++}; ++ ++struct calipso_doi { ++ u32 doi; ++ u32 type; ++ refcount_t refcount; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct calipso_map_cache_bkt { ++ spinlock_t lock; ++ u32 size; ++ struct list_head list; ++}; ++ ++struct calipso_map_cache_entry { ++ u32 hash; ++ unsigned char *key; ++ size_t key_len; ++ struct netlbl_lsm_cache *lsm_data; ++ u32 activity; ++ struct list_head list; ++}; ++ ++typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *); ++ ++struct sockaddr_pkt { ++ short unsigned int spkt_family; ++ unsigned char spkt_device[14]; ++ __be16 spkt_protocol; ++}; ++ ++struct sockaddr_ll { ++ short unsigned int sll_family; ++ __be16 sll_protocol; ++ int sll_ifindex; ++ short unsigned int sll_hatype; ++ unsigned char sll_pkttype; ++ unsigned char sll_halen; ++ unsigned char sll_addr[8]; ++}; ++ ++struct tpacket_stats { ++ unsigned int tp_packets; ++ unsigned int tp_drops; ++}; ++ ++struct tpacket_stats_v3 { ++ unsigned int tp_packets; ++ unsigned int tp_drops; ++ unsigned int tp_freeze_q_cnt; ++}; ++ ++struct tpacket_rollover_stats { ++ __u64 tp_all; ++ __u64 tp_huge; ++ __u64 tp_failed; ++}; ++ ++union tpacket_stats_u { ++ struct tpacket_stats stats1; ++ struct tpacket_stats_v3 stats3; ++}; ++ ++struct tpacket_auxdata { ++ __u32 tp_status; ++ __u32 tp_len; ++ __u32 tp_snaplen; ++ __u16 tp_mac; ++ __u16 tp_net; ++ __u16 tp_vlan_tci; ++ __u16 tp_vlan_tpid; ++}; ++ ++struct tpacket_hdr { ++ long unsigned int tp_status; ++ unsigned int tp_len; ++ unsigned int tp_snaplen; ++ short unsigned int tp_mac; ++ short unsigned int tp_net; ++ unsigned int tp_sec; ++ unsigned int tp_usec; ++}; ++ ++struct tpacket2_hdr { ++ __u32 tp_status; ++ __u32 tp_len; ++ __u32 tp_snaplen; ++ __u16 tp_mac; ++ __u16 tp_net; ++ __u32 tp_sec; ++ __u32 tp_nsec; ++ __u16 tp_vlan_tci; ++ __u16 tp_vlan_tpid; ++ __u8 tp_padding[4]; ++}; ++ ++struct tpacket_hdr_variant1 { ++ __u32 tp_rxhash; ++ __u32 tp_vlan_tci; ++ __u16 tp_vlan_tpid; ++ __u16 tp_padding; ++}; ++ ++struct tpacket3_hdr { ++ __u32 tp_next_offset; ++ __u32 tp_sec; ++ __u32 tp_nsec; ++ __u32 tp_snaplen; ++ __u32 tp_len; ++ __u32 tp_status; ++ __u16 tp_mac; ++ __u16 tp_net; ++ union { ++ struct tpacket_hdr_variant1 hv1; ++ }; ++ __u8 tp_padding[8]; ++}; ++ ++struct tpacket_bd_ts { ++ unsigned int ts_sec; ++ union { ++ unsigned int ts_usec; ++ unsigned int ts_nsec; ++ }; ++}; ++ ++struct tpacket_hdr_v1 { ++ __u32 block_status; ++ __u32 num_pkts; ++ __u32 offset_to_first_pkt; ++ __u32 blk_len; ++ __u64 seq_num; ++ struct tpacket_bd_ts ts_first_pkt; ++ struct tpacket_bd_ts ts_last_pkt; ++}; ++ ++union tpacket_bd_header_u { ++ struct tpacket_hdr_v1 bh1; ++}; ++ ++struct tpacket_block_desc { ++ __u32 version; ++ __u32 offset_to_priv; ++ union tpacket_bd_header_u hdr; ++}; ++ ++enum tpacket_versions { ++ TPACKET_V1 = 0, ++ TPACKET_V2 = 1, ++ TPACKET_V3 = 2, ++}; ++ ++struct tpacket_req { ++ unsigned int tp_block_size; ++ unsigned int tp_block_nr; ++ unsigned int tp_frame_size; ++ unsigned int tp_frame_nr; ++}; ++ ++struct tpacket_req3 { ++ unsigned int tp_block_size; ++ unsigned int tp_block_nr; ++ unsigned int tp_frame_size; ++ unsigned int tp_frame_nr; ++ unsigned int tp_retire_blk_tov; ++ unsigned int tp_sizeof_priv; ++ unsigned int tp_feature_req_word; ++}; ++ ++union tpacket_req_u { ++ struct tpacket_req req; ++ struct tpacket_req3 req3; ++}; ++ ++struct virtio_net_hdr { ++ __u8 flags; ++ __u8 gso_type; ++ __virtio16 hdr_len; ++ __virtio16 gso_size; ++ __virtio16 csum_start; ++ __virtio16 csum_offset; ++}; ++ ++struct packet_mclist { ++ struct packet_mclist *next; ++ int ifindex; ++ int count; ++ short unsigned int type; ++ short unsigned int alen; ++ unsigned char addr[32]; ++}; ++ ++struct pgv; ++ ++struct tpacket_kbdq_core { ++ struct pgv *pkbdq; ++ unsigned int feature_req_word; ++ unsigned int hdrlen; ++ unsigned char reset_pending_on_curr_blk; ++ unsigned char delete_blk_timer; ++ short unsigned int kactive_blk_num; ++ short unsigned int blk_sizeof_priv; ++ short unsigned int last_kactive_blk_num; ++ char *pkblk_start; ++ char *pkblk_end; ++ int kblk_size; ++ unsigned int max_frame_len; ++ unsigned int knum_blocks; ++ uint64_t knxt_seq_num; ++ char *prev; ++ char *nxt_offset; ++ struct sk_buff *skb; ++ atomic_t blk_fill_in_prog; ++ short unsigned int retire_blk_tov; ++ short unsigned int version; ++ long unsigned int tov_in_jiffies; ++ struct timer_list retire_blk_timer; ++}; ++ ++struct pgv { ++ char *buffer; ++}; ++ ++struct packet_ring_buffer { ++ struct pgv *pg_vec; ++ unsigned int head; ++ unsigned int frames_per_block; ++ unsigned int frame_size; ++ unsigned int frame_max; ++ unsigned int pg_vec_order; ++ unsigned int pg_vec_pages; ++ unsigned int pg_vec_len; ++ unsigned int *pending_refcnt; ++ union { ++ long unsigned int *rx_owner_map; ++ struct tpacket_kbdq_core prb_bdqc; ++ }; ++}; ++ ++struct packet_fanout { ++ possible_net_t net; ++ unsigned int num_members; ++ u16 id; ++ u8 type; ++ u8 flags; ++ union { ++ atomic_t rr_cur; ++ struct bpf_prog *bpf_prog; ++ }; ++ struct list_head list; ++ struct sock *arr[256]; ++ spinlock_t lock; ++ refcount_t sk_ref; ++ long: 64; ++ long: 64; ++ struct packet_type prot_hook; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct packet_rollover { ++ int sock; ++ atomic_long_t num; ++ atomic_long_t num_huge; ++ atomic_long_t num_failed; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ u32 history[16]; ++}; ++ ++struct packet_sock { ++ struct sock sk; ++ struct packet_fanout *fanout; ++ union tpacket_stats_u stats; ++ struct packet_ring_buffer rx_ring; ++ struct packet_ring_buffer tx_ring; ++ int copy_thresh; ++ spinlock_t bind_lock; ++ struct mutex pg_vec_lock; ++ unsigned int running; ++ unsigned int auxdata: 1; ++ unsigned int origdev: 1; ++ unsigned int has_vnet_hdr: 1; ++ unsigned int tp_loss: 1; ++ unsigned int tp_tx_has_off: 1; ++ int pressure; ++ int ifindex; ++ __be16 num; ++ struct packet_rollover *rollover; ++ struct packet_mclist *mclist; ++ atomic_t mapped; ++ enum tpacket_versions tp_version; ++ unsigned int tp_hdrlen; ++ unsigned int tp_reserve; ++ unsigned int tp_tstamp; ++ struct completion skb_completion; ++ struct net_device *cached_dev; ++ int (*xmit)(struct sk_buff *); ++ long: 64; ++ long: 64; ++ long: 64; ++ struct packet_type prot_hook; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ atomic_t tp_drops; ++ long: 32; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++ long: 64; ++}; ++ ++struct packet_mreq_max { ++ int mr_ifindex; ++ short unsigned int mr_type; ++ short unsigned int mr_alen; ++ unsigned char mr_address[32]; ++}; ++ ++union tpacket_uhdr { ++ struct tpacket_hdr *h1; ++ struct tpacket2_hdr *h2; ++ struct tpacket3_hdr *h3; ++ void *raw; ++}; ++ ++struct packet_skb_cb { ++ union { ++ struct sockaddr_pkt pkt; ++ union { ++ unsigned int origlen; ++ struct sockaddr_ll ll; ++ }; ++ } sa; ++}; ++ ++struct vlan_group { ++ unsigned int nr_vlan_devs; ++ struct hlist_node hlist; ++ struct net_device **vlan_devices_arrays[16]; ++}; ++ ++struct vlan_info { ++ struct net_device *real_dev; ++ struct vlan_group grp; ++ struct list_head vid_list; ++ unsigned int nr_vids; ++ struct callback_head rcu; ++}; ++ ++enum vlan_flags { ++ VLAN_FLAG_REORDER_HDR = 1, ++ VLAN_FLAG_GVRP = 2, ++ VLAN_FLAG_LOOSE_BINDING = 4, ++ VLAN_FLAG_MVRP = 8, ++}; ++ ++struct vlan_priority_tci_mapping { ++ u32 priority; ++ u16 vlan_qos; ++ struct vlan_priority_tci_mapping *next; ++}; ++ ++struct vlan_dev_priv { ++ unsigned int nr_ingress_mappings; ++ u32 ingress_priority_map[8]; ++ unsigned int nr_egress_mappings; ++ struct vlan_priority_tci_mapping *egress_priority_map[16]; ++ __be16 vlan_proto; ++ u16 vlan_id; ++ u16 flags; ++ struct net_device *real_dev; ++ unsigned char real_dev_addr[6]; ++ struct proc_dir_entry *dent; ++ struct vlan_pcpu_stats *vlan_pcpu_stats; ++ struct netpoll *netpoll; ++ unsigned int nest_level; ++}; ++ ++enum vlan_protos { ++ VLAN_PROTO_8021Q = 0, ++ VLAN_PROTO_8021AD = 1, ++ VLAN_PROTO_NUM = 2, ++}; ++ ++struct vlan_vid_info { ++ struct list_head list; ++ __be16 proto; ++ u16 vid; ++ int refcount; ++}; ++ ++enum nl80211_iftype { ++ NL80211_IFTYPE_UNSPECIFIED = 0, ++ NL80211_IFTYPE_ADHOC = 1, ++ NL80211_IFTYPE_STATION = 2, ++ NL80211_IFTYPE_AP = 3, ++ NL80211_IFTYPE_AP_VLAN = 4, ++ NL80211_IFTYPE_WDS = 5, ++ NL80211_IFTYPE_MONITOR = 6, ++ NL80211_IFTYPE_MESH_POINT = 7, ++ NL80211_IFTYPE_P2P_CLIENT = 8, ++ NL80211_IFTYPE_P2P_GO = 9, ++ NL80211_IFTYPE_P2P_DEVICE = 10, ++ NL80211_IFTYPE_OCB = 11, ++ NL80211_IFTYPE_NAN = 12, ++ NUM_NL80211_IFTYPES = 13, ++ NL80211_IFTYPE_MAX = 12, ++}; ++ ++struct cfg80211_conn; ++ ++struct cfg80211_cached_keys; ++ ++enum ieee80211_bss_type { ++ IEEE80211_BSS_TYPE_ESS = 0, ++ IEEE80211_BSS_TYPE_PBSS = 1, ++ IEEE80211_BSS_TYPE_IBSS = 2, ++ IEEE80211_BSS_TYPE_MBSS = 3, ++ IEEE80211_BSS_TYPE_ANY = 4, ++}; ++ ++struct cfg80211_internal_bss; ++ ++enum nl80211_chan_width { ++ NL80211_CHAN_WIDTH_20_NOHT = 0, ++ NL80211_CHAN_WIDTH_20 = 1, ++ NL80211_CHAN_WIDTH_40 = 2, ++ NL80211_CHAN_WIDTH_80 = 3, ++ NL80211_CHAN_WIDTH_80P80 = 4, ++ NL80211_CHAN_WIDTH_160 = 5, ++ NL80211_CHAN_WIDTH_5 = 6, ++ NL80211_CHAN_WIDTH_10 = 7, ++}; ++ ++struct ieee80211_channel; ++ ++struct cfg80211_chan_def { ++ struct ieee80211_channel *chan; ++ enum nl80211_chan_width width; ++ u32 center_freq1; ++ u32 center_freq2; ++}; ++ ++struct ieee80211_mcs_info { ++ u8 rx_mask[10]; ++ __le16 rx_highest; ++ u8 tx_params; ++ u8 reserved[3]; ++}; ++ ++struct ieee80211_ht_cap { ++ __le16 cap_info; ++ u8 ampdu_params_info; ++ struct ieee80211_mcs_info mcs; ++ __le16 extended_ht_cap_info; ++ __le32 tx_BF_cap_info; ++ u8 antenna_selection_info; ++} __attribute__((packed)); ++ ++struct key_params; ++ ++struct cfg80211_ibss_params { ++ const u8 *ssid; ++ const u8 *bssid; ++ struct cfg80211_chan_def chandef; ++ const u8 *ie; ++ u8 ssid_len; ++ u8 ie_len; ++ u16 beacon_interval; ++ u32 basic_rates; ++ bool channel_fixed; ++ bool privacy; ++ bool control_port; ++ bool control_port_over_nl80211; ++ bool userspace_handles_dfs; ++ int: 24; ++ int mcast_rate[3]; ++ struct ieee80211_ht_cap ht_capa; ++ struct ieee80211_ht_cap ht_capa_mask; ++ struct key_params *wep_keys; ++ int wep_tx_key; ++ int: 32; ++} __attribute__((packed)); ++ ++enum nl80211_auth_type { ++ NL80211_AUTHTYPE_OPEN_SYSTEM = 0, ++ NL80211_AUTHTYPE_SHARED_KEY = 1, ++ NL80211_AUTHTYPE_FT = 2, ++ NL80211_AUTHTYPE_NETWORK_EAP = 3, ++ NL80211_AUTHTYPE_SAE = 4, ++ NL80211_AUTHTYPE_FILS_SK = 5, ++ NL80211_AUTHTYPE_FILS_SK_PFS = 6, ++ NL80211_AUTHTYPE_FILS_PK = 7, ++ __NL80211_AUTHTYPE_NUM = 8, ++ NL80211_AUTHTYPE_MAX = 7, ++ NL80211_AUTHTYPE_AUTOMATIC = 8, ++}; ++ ++enum nl80211_mfp { ++ NL80211_MFP_NO = 0, ++ NL80211_MFP_REQUIRED = 1, ++ NL80211_MFP_OPTIONAL = 2, ++}; ++ ++struct cfg80211_crypto_settings { ++ u32 wpa_versions; ++ u32 cipher_group; ++ int n_ciphers_pairwise; ++ u32 ciphers_pairwise[5]; ++ int n_akm_suites; ++ u32 akm_suites[2]; ++ bool control_port; ++ __be16 control_port_ethertype; ++ bool control_port_no_encrypt; ++ bool control_port_over_nl80211; ++ struct key_params *wep_keys; ++ int wep_tx_key; ++ const u8 *psk; ++}; ++ ++struct ieee80211_vht_mcs_info { ++ __le16 rx_mcs_map; ++ __le16 rx_highest; ++ __le16 tx_mcs_map; ++ __le16 tx_highest; ++}; ++ ++struct ieee80211_vht_cap { ++ __le32 vht_cap_info; ++ struct ieee80211_vht_mcs_info supp_mcs; ++}; ++ ++enum nl80211_bss_select_attr { ++ __NL80211_BSS_SELECT_ATTR_INVALID = 0, ++ NL80211_BSS_SELECT_ATTR_RSSI = 1, ++ NL80211_BSS_SELECT_ATTR_BAND_PREF = 2, ++ NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 3, ++ __NL80211_BSS_SELECT_ATTR_AFTER_LAST = 4, ++ NL80211_BSS_SELECT_ATTR_MAX = 3, ++}; ++ ++enum nl80211_band { ++ NL80211_BAND_2GHZ = 0, ++ NL80211_BAND_5GHZ = 1, ++ NL80211_BAND_60GHZ = 2, ++ NUM_NL80211_BANDS = 3, ++}; ++ ++struct cfg80211_bss_select_adjust { ++ enum nl80211_band band; ++ s8 delta; ++}; ++ ++struct cfg80211_bss_selection { ++ enum nl80211_bss_select_attr behaviour; ++ union { ++ enum nl80211_band band_pref; ++ struct cfg80211_bss_select_adjust adjust; ++ } param; ++}; ++ ++struct cfg80211_connect_params { ++ struct ieee80211_channel *channel; ++ struct ieee80211_channel *channel_hint; ++ const u8 *bssid; ++ const u8 *bssid_hint; ++ const u8 *ssid; ++ size_t ssid_len; ++ enum nl80211_auth_type auth_type; ++ int: 32; ++ const u8 *ie; ++ size_t ie_len; ++ bool privacy; ++ int: 24; ++ enum nl80211_mfp mfp; ++ struct cfg80211_crypto_settings crypto; ++ const u8 *key; ++ u8 key_len; ++ u8 key_idx; ++ short: 16; ++ u32 flags; ++ int bg_scan_period; ++ struct ieee80211_ht_cap ht_capa; ++ struct ieee80211_ht_cap ht_capa_mask; ++ struct ieee80211_vht_cap vht_capa; ++ struct ieee80211_vht_cap vht_capa_mask; ++ bool pbss; ++ int: 24; ++ struct cfg80211_bss_selection bss_select; ++ const u8 *prev_bssid; ++ const u8 *fils_erp_username; ++ size_t fils_erp_username_len; ++ const u8 *fils_erp_realm; ++ size_t fils_erp_realm_len; ++ u16 fils_erp_next_seq_num; ++ long: 48; ++ const u8 *fils_erp_rrk; ++ size_t fils_erp_rrk_len; ++ bool want_1x; ++ long: 56; ++} __attribute__((packed)); ++ ++struct cfg80211_cqm_config; ++ ++struct wiphy; ++ ++struct wireless_dev { ++ struct wiphy *wiphy; ++ enum nl80211_iftype iftype; ++ struct list_head list; ++ struct net_device *netdev; ++ u32 identifier; ++ struct list_head mgmt_registrations; ++ spinlock_t mgmt_registrations_lock; ++ struct mutex mtx; ++ bool use_4addr; ++ bool is_running; ++ u8 address[6]; ++ u8 ssid[32]; ++ u8 ssid_len; ++ u8 mesh_id_len; ++ u8 mesh_id_up_len; ++ struct cfg80211_conn *conn; ++ struct cfg80211_cached_keys *connect_keys; ++ enum ieee80211_bss_type conn_bss_type; ++ u32 conn_owner_nlportid; ++ struct work_struct disconnect_wk; ++ u8 disconnect_bssid[6]; ++ struct list_head event_list; ++ spinlock_t event_lock; ++ struct cfg80211_internal_bss *current_bss; ++ struct cfg80211_chan_def preset_chandef; ++ struct cfg80211_chan_def chandef; ++ bool ibss_fixed; ++ bool ibss_dfs_possible; ++ bool ps; ++ int ps_timeout; ++ int beacon_interval; ++ u32 ap_unexpected_nlportid; ++ u32 owner_nlportid; ++ bool nl_owner_dead; ++ bool cac_started; ++ long unsigned int cac_start_time; ++ unsigned int cac_time_ms; ++ struct { ++ struct cfg80211_ibss_params ibss; ++ struct cfg80211_connect_params connect; ++ struct cfg80211_cached_keys *keys; ++ const u8 *ie; ++ size_t ie_len; ++ u8 bssid[6]; ++ u8 prev_bssid[6]; ++ u8 ssid[32]; ++ s8 default_key; ++ s8 default_mgmt_key; ++ bool prev_bssid_valid; ++ } wext; ++ struct cfg80211_cqm_config *cqm_config; ++}; ++ ++struct iw_param { ++ __s32 value; ++ __u8 fixed; ++ __u8 disabled; ++ __u16 flags; ++}; ++ ++struct iw_point { ++ void *pointer; ++ __u16 length; ++ __u16 flags; ++}; ++ ++struct iw_freq { ++ __s32 m; ++ __s16 e; ++ __u8 i; ++ __u8 flags; ++}; ++ ++struct iw_quality { ++ __u8 qual; ++ __u8 level; ++ __u8 noise; ++ __u8 updated; ++}; ++ ++struct iw_discarded { ++ __u32 nwid; ++ __u32 code; ++ __u32 fragment; ++ __u32 retries; ++ __u32 misc; ++}; ++ ++struct iw_missed { ++ __u32 beacon; ++}; ++ ++struct iw_encode_ext { ++ __u32 ext_flags; ++ __u8 tx_seq[8]; ++ __u8 rx_seq[8]; ++ struct sockaddr addr; ++ __u16 alg; ++ __u16 key_len; ++ __u8 key[0]; ++}; ++ ++struct iw_statistics { ++ __u16 status; ++ struct iw_quality qual; ++ struct iw_discarded discard; ++ struct iw_missed miss; ++}; ++ ++union iwreq_data { ++ char name[16]; ++ struct iw_point essid; ++ struct iw_param nwid; ++ struct iw_freq freq; ++ struct iw_param sens; ++ struct iw_param bitrate; ++ struct iw_param txpower; ++ struct iw_param rts; ++ struct iw_param frag; ++ __u32 mode; ++ struct iw_param retry; ++ struct iw_point encoding; ++ struct iw_param power; ++ struct iw_quality qual; ++ struct sockaddr ap_addr; ++ struct sockaddr addr; ++ struct iw_param param; ++ struct iw_point data; ++}; ++ ++struct iwreq { ++ union { ++ char ifrn_name[16]; ++ } ifr_ifrn; ++ union iwreq_data u; ++}; ++ ++struct iw_event { ++ __u16 len; ++ __u16 cmd; ++ union iwreq_data u; ++}; ++ ++struct compat_iw_point { ++ compat_caddr_t pointer; ++ __u16 length; ++ __u16 flags; ++}; ++ ++struct __compat_iw_event { ++ __u16 len; ++ __u16 cmd; ++ compat_caddr_t pointer; ++}; ++ ++enum nl80211_reg_initiator { ++ NL80211_REGDOM_SET_BY_CORE = 0, ++ NL80211_REGDOM_SET_BY_USER = 1, ++ NL80211_REGDOM_SET_BY_DRIVER = 2, ++ NL80211_REGDOM_SET_BY_COUNTRY_IE = 3, ++}; ++ ++enum nl80211_dfs_regions { ++ NL80211_DFS_UNSET = 0, ++ NL80211_DFS_FCC = 1, ++ NL80211_DFS_ETSI = 2, ++ NL80211_DFS_JP = 3, ++}; ++ ++enum nl80211_user_reg_hint_type { ++ NL80211_USER_REG_HINT_USER = 0, ++ NL80211_USER_REG_HINT_CELL_BASE = 1, ++ NL80211_USER_REG_HINT_INDOOR = 2, ++}; ++ ++enum nl80211_mntr_flags { ++ __NL80211_MNTR_FLAG_INVALID = 0, ++ NL80211_MNTR_FLAG_FCSFAIL = 1, ++ NL80211_MNTR_FLAG_PLCPFAIL = 2, ++ NL80211_MNTR_FLAG_CONTROL = 3, ++ NL80211_MNTR_FLAG_OTHER_BSS = 4, ++ NL80211_MNTR_FLAG_COOK_FRAMES = 5, ++ NL80211_MNTR_FLAG_ACTIVE = 6, ++ __NL80211_MNTR_FLAG_AFTER_LAST = 7, ++ NL80211_MNTR_FLAG_MAX = 6, ++}; ++ ++enum nl80211_bss_scan_width { ++ NL80211_BSS_CHAN_WIDTH_20 = 0, ++ NL80211_BSS_CHAN_WIDTH_10 = 1, ++ NL80211_BSS_CHAN_WIDTH_5 = 2, ++}; ++ ++struct nl80211_wowlan_tcp_data_seq { ++ __u32 start; ++ __u32 offset; ++ __u32 len; ++}; ++ ++struct nl80211_wowlan_tcp_data_token { ++ __u32 offset; ++ __u32 len; ++ __u8 token_stream[0]; ++}; ++ ++struct nl80211_wowlan_tcp_data_token_feature { ++ __u32 min_len; ++ __u32 max_len; ++ __u32 bufsize; ++}; ++ ++enum nl80211_ext_feature_index { ++ NL80211_EXT_FEATURE_VHT_IBSS = 0, ++ NL80211_EXT_FEATURE_RRM = 1, ++ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER = 2, ++ NL80211_EXT_FEATURE_SCAN_START_TIME = 3, ++ NL80211_EXT_FEATURE_BSS_PARENT_TSF = 4, ++ NL80211_EXT_FEATURE_SET_SCAN_DWELL = 5, ++ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 6, ++ NL80211_EXT_FEATURE_BEACON_RATE_HT = 7, ++ NL80211_EXT_FEATURE_BEACON_RATE_VHT = 8, ++ NL80211_EXT_FEATURE_FILS_STA = 9, ++ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA = 10, ++ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED = 11, ++ NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 12, ++ NL80211_EXT_FEATURE_CQM_RSSI_LIST = 13, ++ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 14, ++ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK = 15, ++ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X = 16, ++ NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 17, ++ NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP = 18, ++ NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 19, ++ NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 20, ++ NL80211_EXT_FEATURE_MFP_OPTIONAL = 21, ++ NL80211_EXT_FEATURE_LOW_SPAN_SCAN = 22, ++ NL80211_EXT_FEATURE_LOW_POWER_SCAN = 23, ++ NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN = 24, ++ NL80211_EXT_FEATURE_DFS_OFFLOAD = 25, ++ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211 = 26, ++ NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 27, ++ NL80211_EXT_FEATURE_TXQS = 28, ++ NL80211_EXT_FEATURE_SCAN_RANDOM_SN = 29, ++ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT = 30, ++ NUM_NL80211_EXT_FEATURES = 31, ++ MAX_NL80211_EXT_FEATURES = 30, ++}; ++ ++enum nl80211_dfs_state { ++ NL80211_DFS_USABLE = 0, ++ NL80211_DFS_UNAVAILABLE = 1, ++ NL80211_DFS_AVAILABLE = 2, ++}; ++ ++struct nl80211_vendor_cmd_info { ++ __u32 vendor_id; ++ __u32 subcmd; ++}; ++ ++struct ieee80211_he_cap_elem { ++ u8 mac_cap_info[5]; ++ u8 phy_cap_info[9]; ++}; ++ ++struct ieee80211_he_mcs_nss_supp { ++ __le16 rx_mcs_80; ++ __le16 tx_mcs_80; ++ __le16 rx_mcs_160; ++ __le16 tx_mcs_160; ++ __le16 rx_mcs_80p80; ++ __le16 tx_mcs_80p80; ++}; ++ ++enum environment_cap { ++ ENVIRON_ANY = 0, ++ ENVIRON_INDOOR = 1, ++ ENVIRON_OUTDOOR = 2, ++}; ++ ++struct regulatory_request { ++ struct callback_head callback_head; ++ int wiphy_idx; ++ enum nl80211_reg_initiator initiator; ++ enum nl80211_user_reg_hint_type user_reg_hint_type; ++ char alpha2[3]; ++ enum nl80211_dfs_regions dfs_region; ++ bool intersect; ++ bool processed; ++ enum environment_cap country_ie_env; ++ struct list_head list; ++}; ++ ++struct ieee80211_freq_range { ++ u32 start_freq_khz; ++ u32 end_freq_khz; ++ u32 max_bandwidth_khz; ++}; ++ ++struct ieee80211_power_rule { ++ u32 max_antenna_gain; ++ u32 max_eirp; ++}; ++ ++struct ieee80211_wmm_ac { ++ u16 cw_min; ++ u16 cw_max; ++ u16 cot; ++ u8 aifsn; ++}; ++ ++struct ieee80211_wmm_rule { ++ struct ieee80211_wmm_ac client[4]; ++ struct ieee80211_wmm_ac ap[4]; ++}; ++ ++struct ieee80211_reg_rule { ++ struct ieee80211_freq_range freq_range; ++ struct ieee80211_power_rule power_rule; ++ struct ieee80211_wmm_rule wmm_rule; ++ u32 flags; ++ u32 dfs_cac_ms; ++ bool has_wmm; ++}; ++ ++struct ieee80211_regdomain { ++ struct callback_head callback_head; ++ u32 n_reg_rules; ++ char alpha2[3]; ++ enum nl80211_dfs_regions dfs_region; ++ struct ieee80211_reg_rule reg_rules[0]; ++}; ++ ++struct ieee80211_channel { ++ enum nl80211_band band; ++ u16 center_freq; ++ u16 hw_value; ++ u32 flags; ++ int max_antenna_gain; ++ int max_power; ++ int max_reg_power; ++ bool beacon_found; ++ u32 orig_flags; ++ int orig_mag; ++ int orig_mpwr; ++ enum nl80211_dfs_state dfs_state; ++ long unsigned int dfs_state_entered; ++ unsigned int dfs_cac_ms; ++}; ++ ++struct ieee80211_rate { ++ u32 flags; ++ u16 bitrate; ++ u16 hw_value; ++ u16 hw_value_short; ++}; ++ ++struct ieee80211_sta_ht_cap { ++ u16 cap; ++ bool ht_supported; ++ u8 ampdu_factor; ++ u8 ampdu_density; ++ struct ieee80211_mcs_info mcs; ++ char: 8; ++} __attribute__((packed)); ++ ++struct ieee80211_sta_vht_cap { ++ bool vht_supported; ++ u32 cap; ++ struct ieee80211_vht_mcs_info vht_mcs; ++}; ++ ++struct ieee80211_sta_he_cap { ++ bool has_he; ++ struct ieee80211_he_cap_elem he_cap_elem; ++ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; ++ u8 ppe_thres[25]; ++} __attribute__((packed)); ++ ++struct ieee80211_sband_iftype_data { ++ u16 types_mask; ++ struct ieee80211_sta_he_cap he_cap; ++}; ++ ++struct ieee80211_supported_band { ++ struct ieee80211_channel *channels; ++ struct ieee80211_rate *bitrates; ++ enum nl80211_band band; ++ int n_channels; ++ int n_bitrates; ++ struct ieee80211_sta_ht_cap ht_cap; ++ struct ieee80211_sta_vht_cap vht_cap; ++ u16 n_iftype_data; ++ const struct ieee80211_sband_iftype_data *iftype_data; ++}; ++ ++struct key_params { ++ const u8 *key; ++ const u8 *seq; ++ int key_len; ++ int seq_len; ++ u32 cipher; ++}; ++ ++struct mac_address { ++ u8 addr[6]; ++}; ++ ++struct cfg80211_ssid { ++ u8 ssid[32]; ++ u8 ssid_len; ++}; ++ ++enum cfg80211_signal_type { ++ CFG80211_SIGNAL_TYPE_NONE = 0, ++ CFG80211_SIGNAL_TYPE_MBM = 1, ++ CFG80211_SIGNAL_TYPE_UNSPEC = 2, ++}; ++ ++struct ieee80211_txrx_stypes; ++ ++struct ieee80211_iface_combination; ++ ++struct wiphy_wowlan_support; ++ ++struct cfg80211_wowlan; ++ ++struct wiphy_iftype_ext_capab; ++ ++struct iw_handler_def; ++ ++struct wiphy_coalesce_support; ++ ++struct wiphy_vendor_command; ++ ++struct wiphy { ++ u8 perm_addr[6]; ++ u8 addr_mask[6]; ++ struct mac_address *addresses; ++ const struct ieee80211_txrx_stypes *mgmt_stypes; ++ const struct ieee80211_iface_combination *iface_combinations; ++ int n_iface_combinations; ++ u16 software_iftypes; ++ u16 n_addresses; ++ u16 interface_modes; ++ u16 max_acl_mac_addrs; ++ u32 flags; ++ u32 regulatory_flags; ++ u32 features; ++ u8 ext_features[4]; ++ u32 ap_sme_capa; ++ enum cfg80211_signal_type signal_type; ++ int bss_priv_size; ++ u8 max_scan_ssids; ++ u8 max_sched_scan_reqs; ++ u8 max_sched_scan_ssids; ++ u8 max_match_sets; ++ u16 max_scan_ie_len; ++ u16 max_sched_scan_ie_len; ++ u32 max_sched_scan_plans; ++ u32 max_sched_scan_plan_interval; ++ u32 max_sched_scan_plan_iterations; ++ int n_cipher_suites; ++ const u32 *cipher_suites; ++ u8 retry_short; ++ u8 retry_long; ++ u32 frag_threshold; ++ u32 rts_threshold; ++ u8 coverage_class; ++ char fw_version[32]; ++ u32 hw_version; ++ const struct wiphy_wowlan_support *wowlan; ++ struct cfg80211_wowlan *wowlan_config; ++ u16 max_remain_on_channel_duration; ++ u8 max_num_pmkids; ++ u32 available_antennas_tx; ++ u32 available_antennas_rx; ++ u32 probe_resp_offload; ++ const u8 *extended_capabilities; ++ const u8 *extended_capabilities_mask; ++ u8 extended_capabilities_len; ++ const struct wiphy_iftype_ext_capab *iftype_ext_capab; ++ unsigned int num_iftype_ext_capab; ++ const void *privid; ++ struct ieee80211_supported_band *bands[3]; ++ void (*reg_notifier)(struct wiphy *, struct regulatory_request *); ++ const struct ieee80211_regdomain *regd; ++ struct device dev; ++ bool registered; ++ struct dentry *debugfsdir; ++ const struct ieee80211_ht_cap *ht_capa_mod_mask; ++ const struct ieee80211_vht_cap *vht_capa_mod_mask; ++ struct list_head wdev_list; ++ possible_net_t _net; ++ const struct iw_handler_def *wext; ++ const struct wiphy_coalesce_support *coalesce; ++ const struct wiphy_vendor_command *vendor_commands; ++ const struct nl80211_vendor_cmd_info *vendor_events; ++ int n_vendor_commands; ++ int n_vendor_events; ++ u16 max_ap_assoc_sta; ++ u8 max_num_csa_counters; ++ u8 max_adj_channel_rssi_comp; ++ u32 bss_select_support; ++ u64 cookie_counter; ++ u8 nan_supported_bands; ++ u32 txq_limit; ++ u32 txq_memory_limit; ++ u32 txq_quantum; ++ long: 64; ++ char priv[0]; ++}; ++ ++struct cfg80211_match_set { ++ struct cfg80211_ssid ssid; ++ u8 bssid[6]; ++ s32 rssi_thold; ++}; ++ ++struct cfg80211_sched_scan_plan { ++ u32 interval; ++ u32 iterations; ++}; ++ ++struct cfg80211_sched_scan_request { ++ u64 reqid; ++ struct cfg80211_ssid *ssids; ++ int n_ssids; ++ u32 n_channels; ++ enum nl80211_bss_scan_width scan_width; ++ const u8 *ie; ++ size_t ie_len; ++ u32 flags; ++ struct cfg80211_match_set *match_sets; ++ int n_match_sets; ++ s32 min_rssi_thold; ++ u32 delay; ++ struct cfg80211_sched_scan_plan *scan_plans; ++ int n_scan_plans; ++ u8 mac_addr[6]; ++ u8 mac_addr_mask[6]; ++ bool relative_rssi_set; ++ s8 relative_rssi; ++ struct cfg80211_bss_select_adjust rssi_adjust; ++ struct wiphy *wiphy; ++ struct net_device *dev; ++ long unsigned int scan_start; ++ bool report_results; ++ struct callback_head callback_head; ++ u32 owner_nlportid; ++ bool nl_owner_dead; ++ struct list_head list; ++ struct ieee80211_channel *channels[0]; ++}; ++ ++struct cfg80211_pkt_pattern { ++ const u8 *mask; ++ const u8 *pattern; ++ int pattern_len; ++ int pkt_offset; ++}; ++ ++struct cfg80211_wowlan_tcp { ++ struct socket *sock; ++ __be32 src; ++ __be32 dst; ++ u16 src_port; ++ u16 dst_port; ++ u8 dst_mac[6]; ++ int payload_len; ++ const u8 *payload; ++ struct nl80211_wowlan_tcp_data_seq payload_seq; ++ u32 data_interval; ++ u32 wake_len; ++ const u8 *wake_data; ++ const u8 *wake_mask; ++ u32 tokens_size; ++ struct nl80211_wowlan_tcp_data_token payload_tok; ++}; ++ ++struct cfg80211_wowlan { ++ bool any; ++ bool disconnect; ++ bool magic_pkt; ++ bool gtk_rekey_failure; ++ bool eap_identity_req; ++ bool four_way_handshake; ++ bool rfkill_release; ++ struct cfg80211_pkt_pattern *patterns; ++ struct cfg80211_wowlan_tcp *tcp; ++ int n_patterns; ++ struct cfg80211_sched_scan_request *nd_config; ++}; ++ ++struct ieee80211_iface_limit { ++ u16 max; ++ u16 types; ++}; ++ ++struct ieee80211_iface_combination { ++ const struct ieee80211_iface_limit *limits; ++ u32 num_different_channels; ++ u16 max_interfaces; ++ u8 n_limits; ++ bool beacon_int_infra_match; ++ u8 radar_detect_widths; ++ u8 radar_detect_regions; ++ u32 beacon_int_min_gcd; ++}; ++ ++struct ieee80211_txrx_stypes { ++ u16 tx; ++ u16 rx; ++}; ++ ++struct wiphy_wowlan_tcp_support { ++ const struct nl80211_wowlan_tcp_data_token_feature *tok; ++ u32 data_payload_max; ++ u32 data_interval_max; ++ u32 wake_payload_max; ++ bool seq; ++}; ++ ++struct wiphy_wowlan_support { ++ u32 flags; ++ int n_patterns; ++ int pattern_max_len; ++ int pattern_min_len; ++ int max_pkt_offset; ++ int max_nd_match_sets; ++ const struct wiphy_wowlan_tcp_support *tcp; ++}; ++ ++struct wiphy_coalesce_support { ++ int n_rules; ++ int max_delay; ++ int n_patterns; ++ int pattern_max_len; ++ int pattern_min_len; ++ int max_pkt_offset; ++}; ++ ++struct wiphy_vendor_command { ++ struct nl80211_vendor_cmd_info info; ++ u32 flags; ++ int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int); ++ int (*dumpit)(struct wiphy *, struct wireless_dev *, struct sk_buff *, const void *, int, long unsigned int *); ++}; ++ ++struct wiphy_iftype_ext_capab { ++ enum nl80211_iftype iftype; ++ const u8 *extended_capabilities; ++ const u8 *extended_capabilities_mask; ++ u8 extended_capabilities_len; ++}; ++ ++struct iw_request_info; ++ ++typedef int (*iw_handler)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *); ++ ++struct iw_handler_def { ++ const iw_handler *standard; ++ __u16 num_standard; ++ struct iw_statistics * (*get_wireless_stats)(struct net_device *); ++}; ++ ++struct iw_request_info { ++ __u16 cmd; ++ __u16 flags; ++}; ++ ++struct iw_ioctl_description { ++ __u8 header_type; ++ __u8 token_type; ++ __u16 token_size; ++ __u16 min_tokens; ++ __u16 max_tokens; ++ __u32 flags; ++}; ++ ++typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, struct iw_request_info *, iw_handler); ++ ++struct netlbl_af4list { ++ __be32 addr; ++ __be32 mask; ++ u32 valid; ++ struct list_head list; ++}; ++ ++struct netlbl_af6list { ++ struct in6_addr addr; ++ struct in6_addr mask; ++ u32 valid; ++ struct list_head list; ++}; ++ ++struct netlbl_domaddr_map { ++ struct list_head list4; ++ struct list_head list6; ++}; ++ ++struct netlbl_dommap_def { ++ u32 type; ++ union { ++ struct netlbl_domaddr_map *addrsel; ++ struct cipso_v4_doi *cipso; ++ struct calipso_doi *calipso; ++ }; ++}; ++ ++struct netlbl_domaddr4_map { ++ struct netlbl_dommap_def def; ++ struct netlbl_af4list list; ++}; ++ ++struct netlbl_domaddr6_map { ++ struct netlbl_dommap_def def; ++ struct netlbl_af6list list; ++}; ++ ++struct netlbl_dom_map { ++ char *domain; ++ u16 family; ++ struct netlbl_dommap_def def; ++ u32 valid; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct netlbl_domhsh_tbl { ++ struct list_head *tbl; ++ u32 size; ++}; ++ ++enum { ++ NLBL_MGMT_C_UNSPEC = 0, ++ NLBL_MGMT_C_ADD = 1, ++ NLBL_MGMT_C_REMOVE = 2, ++ NLBL_MGMT_C_LISTALL = 3, ++ NLBL_MGMT_C_ADDDEF = 4, ++ NLBL_MGMT_C_REMOVEDEF = 5, ++ NLBL_MGMT_C_LISTDEF = 6, ++ NLBL_MGMT_C_PROTOCOLS = 7, ++ NLBL_MGMT_C_VERSION = 8, ++ __NLBL_MGMT_C_MAX = 9, ++}; ++ ++enum { ++ NLBL_MGMT_A_UNSPEC = 0, ++ NLBL_MGMT_A_DOMAIN = 1, ++ NLBL_MGMT_A_PROTOCOL = 2, ++ NLBL_MGMT_A_VERSION = 3, ++ NLBL_MGMT_A_CV4DOI = 4, ++ NLBL_MGMT_A_IPV6ADDR = 5, ++ NLBL_MGMT_A_IPV6MASK = 6, ++ NLBL_MGMT_A_IPV4ADDR = 7, ++ NLBL_MGMT_A_IPV4MASK = 8, ++ NLBL_MGMT_A_ADDRSELECTOR = 9, ++ NLBL_MGMT_A_SELECTORLIST = 10, ++ NLBL_MGMT_A_FAMILY = 11, ++ NLBL_MGMT_A_CLPDOI = 12, ++ __NLBL_MGMT_A_MAX = 13, ++}; ++ ++struct netlbl_domhsh_walk_arg { ++ struct netlink_callback *nl_cb; ++ struct sk_buff *skb; ++ u32 seq; ++}; ++ ++enum { ++ NLBL_UNLABEL_C_UNSPEC = 0, ++ NLBL_UNLABEL_C_ACCEPT = 1, ++ NLBL_UNLABEL_C_LIST = 2, ++ NLBL_UNLABEL_C_STATICADD = 3, ++ NLBL_UNLABEL_C_STATICREMOVE = 4, ++ NLBL_UNLABEL_C_STATICLIST = 5, ++ NLBL_UNLABEL_C_STATICADDDEF = 6, ++ NLBL_UNLABEL_C_STATICREMOVEDEF = 7, ++ NLBL_UNLABEL_C_STATICLISTDEF = 8, ++ __NLBL_UNLABEL_C_MAX = 9, ++}; ++ ++enum { ++ NLBL_UNLABEL_A_UNSPEC = 0, ++ NLBL_UNLABEL_A_ACPTFLG = 1, ++ NLBL_UNLABEL_A_IPV6ADDR = 2, ++ NLBL_UNLABEL_A_IPV6MASK = 3, ++ NLBL_UNLABEL_A_IPV4ADDR = 4, ++ NLBL_UNLABEL_A_IPV4MASK = 5, ++ NLBL_UNLABEL_A_IFACE = 6, ++ NLBL_UNLABEL_A_SECCTX = 7, ++ __NLBL_UNLABEL_A_MAX = 8, ++}; ++ ++struct netlbl_unlhsh_tbl { ++ struct list_head *tbl; ++ u32 size; ++}; ++ ++struct netlbl_unlhsh_addr4 { ++ u32 secid; ++ struct netlbl_af4list list; ++ struct callback_head rcu; ++}; ++ ++struct netlbl_unlhsh_addr6 { ++ u32 secid; ++ struct netlbl_af6list list; ++ struct callback_head rcu; ++}; ++ ++struct netlbl_unlhsh_iface { ++ int ifindex; ++ struct list_head addr4_list; ++ struct list_head addr6_list; ++ u32 valid; ++ struct list_head list; ++ struct callback_head rcu; ++}; ++ ++struct netlbl_unlhsh_walk_arg { ++ struct netlink_callback *nl_cb; ++ struct sk_buff *skb; ++ u32 seq; ++}; ++ ++enum { ++ NLBL_CIPSOV4_C_UNSPEC = 0, ++ NLBL_CIPSOV4_C_ADD = 1, ++ NLBL_CIPSOV4_C_REMOVE = 2, ++ NLBL_CIPSOV4_C_LIST = 3, ++ NLBL_CIPSOV4_C_LISTALL = 4, ++ __NLBL_CIPSOV4_C_MAX = 5, ++}; ++ ++enum { ++ NLBL_CIPSOV4_A_UNSPEC = 0, ++ NLBL_CIPSOV4_A_DOI = 1, ++ NLBL_CIPSOV4_A_MTYPE = 2, ++ NLBL_CIPSOV4_A_TAG = 3, ++ NLBL_CIPSOV4_A_TAGLST = 4, ++ NLBL_CIPSOV4_A_MLSLVLLOC = 5, ++ NLBL_CIPSOV4_A_MLSLVLREM = 6, ++ NLBL_CIPSOV4_A_MLSLVL = 7, ++ NLBL_CIPSOV4_A_MLSLVLLST = 8, ++ NLBL_CIPSOV4_A_MLSCATLOC = 9, ++ NLBL_CIPSOV4_A_MLSCATREM = 10, ++ NLBL_CIPSOV4_A_MLSCAT = 11, ++ NLBL_CIPSOV4_A_MLSCATLST = 12, ++ __NLBL_CIPSOV4_A_MAX = 13, ++}; ++ ++struct netlbl_cipsov4_doiwalk_arg { ++ struct netlink_callback *nl_cb; ++ struct sk_buff *skb; ++ u32 seq; ++}; ++ ++struct netlbl_domhsh_walk_arg___2 { ++ struct netlbl_audit *audit_info; ++ u32 doi; ++}; ++ ++enum { ++ NLBL_CALIPSO_C_UNSPEC = 0, ++ NLBL_CALIPSO_C_ADD = 1, ++ NLBL_CALIPSO_C_REMOVE = 2, ++ NLBL_CALIPSO_C_LIST = 3, ++ NLBL_CALIPSO_C_LISTALL = 4, ++ __NLBL_CALIPSO_C_MAX = 5, ++}; ++ ++enum { ++ NLBL_CALIPSO_A_UNSPEC = 0, ++ NLBL_CALIPSO_A_DOI = 1, ++ NLBL_CALIPSO_A_MTYPE = 2, ++ __NLBL_CALIPSO_A_MAX = 3, ++}; ++ ++struct netlbl_calipso_doiwalk_arg { ++ struct netlink_callback *nl_cb; ++ struct sk_buff *skb; ++ u32 seq; ++}; ++ ++struct dcbmsg { ++ __u8 dcb_family; ++ __u8 cmd; ++ __u16 dcb_pad; ++}; ++ ++enum dcbnl_commands { ++ DCB_CMD_UNDEFINED = 0, ++ DCB_CMD_GSTATE = 1, ++ DCB_CMD_SSTATE = 2, ++ DCB_CMD_PGTX_GCFG = 3, ++ DCB_CMD_PGTX_SCFG = 4, ++ DCB_CMD_PGRX_GCFG = 5, ++ DCB_CMD_PGRX_SCFG = 6, ++ DCB_CMD_PFC_GCFG = 7, ++ DCB_CMD_PFC_SCFG = 8, ++ DCB_CMD_SET_ALL = 9, ++ DCB_CMD_GPERM_HWADDR = 10, ++ DCB_CMD_GCAP = 11, ++ DCB_CMD_GNUMTCS = 12, ++ DCB_CMD_SNUMTCS = 13, ++ DCB_CMD_PFC_GSTATE = 14, ++ DCB_CMD_PFC_SSTATE = 15, ++ DCB_CMD_BCN_GCFG = 16, ++ DCB_CMD_BCN_SCFG = 17, ++ DCB_CMD_GAPP = 18, ++ DCB_CMD_SAPP = 19, ++ DCB_CMD_IEEE_SET = 20, ++ DCB_CMD_IEEE_GET = 21, ++ DCB_CMD_GDCBX = 22, ++ DCB_CMD_SDCBX = 23, ++ DCB_CMD_GFEATCFG = 24, ++ DCB_CMD_SFEATCFG = 25, ++ DCB_CMD_CEE_GET = 26, ++ DCB_CMD_IEEE_DEL = 27, ++ __DCB_CMD_ENUM_MAX = 28, ++ DCB_CMD_MAX = 27, ++}; ++ ++enum dcbnl_attrs { ++ DCB_ATTR_UNDEFINED = 0, ++ DCB_ATTR_IFNAME = 1, ++ DCB_ATTR_STATE = 2, ++ DCB_ATTR_PFC_STATE = 3, ++ DCB_ATTR_PFC_CFG = 4, ++ DCB_ATTR_NUM_TC = 5, ++ DCB_ATTR_PG_CFG = 6, ++ DCB_ATTR_SET_ALL = 7, ++ DCB_ATTR_PERM_HWADDR = 8, ++ DCB_ATTR_CAP = 9, ++ DCB_ATTR_NUMTCS = 10, ++ DCB_ATTR_BCN = 11, ++ DCB_ATTR_APP = 12, ++ DCB_ATTR_IEEE = 13, ++ DCB_ATTR_DCBX = 14, ++ DCB_ATTR_FEATCFG = 15, ++ DCB_ATTR_CEE = 16, ++ __DCB_ATTR_ENUM_MAX = 17, ++ DCB_ATTR_MAX = 16, ++}; ++ ++enum ieee_attrs { ++ DCB_ATTR_IEEE_UNSPEC = 0, ++ DCB_ATTR_IEEE_ETS = 1, ++ DCB_ATTR_IEEE_PFC = 2, ++ DCB_ATTR_IEEE_APP_TABLE = 3, ++ DCB_ATTR_IEEE_PEER_ETS = 4, ++ DCB_ATTR_IEEE_PEER_PFC = 5, ++ DCB_ATTR_IEEE_PEER_APP = 6, ++ DCB_ATTR_IEEE_MAXRATE = 7, ++ DCB_ATTR_IEEE_QCN = 8, ++ DCB_ATTR_IEEE_QCN_STATS = 9, ++ DCB_ATTR_DCB_BUFFER = 10, ++ __DCB_ATTR_IEEE_MAX = 11, ++}; ++ ++enum ieee_attrs_app { ++ DCB_ATTR_IEEE_APP_UNSPEC = 0, ++ DCB_ATTR_IEEE_APP = 1, ++ __DCB_ATTR_IEEE_APP_MAX = 2, ++}; ++ ++enum cee_attrs { ++ DCB_ATTR_CEE_UNSPEC = 0, ++ DCB_ATTR_CEE_PEER_PG = 1, ++ DCB_ATTR_CEE_PEER_PFC = 2, ++ DCB_ATTR_CEE_PEER_APP_TABLE = 3, ++ DCB_ATTR_CEE_TX_PG = 4, ++ DCB_ATTR_CEE_RX_PG = 5, ++ DCB_ATTR_CEE_PFC = 6, ++ DCB_ATTR_CEE_APP_TABLE = 7, ++ DCB_ATTR_CEE_FEAT = 8, ++ __DCB_ATTR_CEE_MAX = 9, ++}; ++ ++enum peer_app_attr { ++ DCB_ATTR_CEE_PEER_APP_UNSPEC = 0, ++ DCB_ATTR_CEE_PEER_APP_INFO = 1, ++ DCB_ATTR_CEE_PEER_APP = 2, ++ __DCB_ATTR_CEE_PEER_APP_MAX = 3, ++}; ++ ++enum dcbnl_pfc_up_attrs { ++ DCB_PFC_UP_ATTR_UNDEFINED = 0, ++ DCB_PFC_UP_ATTR_0 = 1, ++ DCB_PFC_UP_ATTR_1 = 2, ++ DCB_PFC_UP_ATTR_2 = 3, ++ DCB_PFC_UP_ATTR_3 = 4, ++ DCB_PFC_UP_ATTR_4 = 5, ++ DCB_PFC_UP_ATTR_5 = 6, ++ DCB_PFC_UP_ATTR_6 = 7, ++ DCB_PFC_UP_ATTR_7 = 8, ++ DCB_PFC_UP_ATTR_ALL = 9, ++ __DCB_PFC_UP_ATTR_ENUM_MAX = 10, ++ DCB_PFC_UP_ATTR_MAX = 9, ++}; ++ ++enum dcbnl_pg_attrs { ++ DCB_PG_ATTR_UNDEFINED = 0, ++ DCB_PG_ATTR_TC_0 = 1, ++ DCB_PG_ATTR_TC_1 = 2, ++ DCB_PG_ATTR_TC_2 = 3, ++ DCB_PG_ATTR_TC_3 = 4, ++ DCB_PG_ATTR_TC_4 = 5, ++ DCB_PG_ATTR_TC_5 = 6, ++ DCB_PG_ATTR_TC_6 = 7, ++ DCB_PG_ATTR_TC_7 = 8, ++ DCB_PG_ATTR_TC_MAX = 9, ++ DCB_PG_ATTR_TC_ALL = 10, ++ DCB_PG_ATTR_BW_ID_0 = 11, ++ DCB_PG_ATTR_BW_ID_1 = 12, ++ DCB_PG_ATTR_BW_ID_2 = 13, ++ DCB_PG_ATTR_BW_ID_3 = 14, ++ DCB_PG_ATTR_BW_ID_4 = 15, ++ DCB_PG_ATTR_BW_ID_5 = 16, ++ DCB_PG_ATTR_BW_ID_6 = 17, ++ DCB_PG_ATTR_BW_ID_7 = 18, ++ DCB_PG_ATTR_BW_ID_MAX = 19, ++ DCB_PG_ATTR_BW_ID_ALL = 20, ++ __DCB_PG_ATTR_ENUM_MAX = 21, ++ DCB_PG_ATTR_MAX = 20, ++}; ++ ++enum dcbnl_tc_attrs { ++ DCB_TC_ATTR_PARAM_UNDEFINED = 0, ++ DCB_TC_ATTR_PARAM_PGID = 1, ++ DCB_TC_ATTR_PARAM_UP_MAPPING = 2, ++ DCB_TC_ATTR_PARAM_STRICT_PRIO = 3, ++ DCB_TC_ATTR_PARAM_BW_PCT = 4, ++ DCB_TC_ATTR_PARAM_ALL = 5, ++ __DCB_TC_ATTR_PARAM_ENUM_MAX = 6, ++ DCB_TC_ATTR_PARAM_MAX = 5, ++}; ++ ++enum dcbnl_cap_attrs { ++ DCB_CAP_ATTR_UNDEFINED = 0, ++ DCB_CAP_ATTR_ALL = 1, ++ DCB_CAP_ATTR_PG = 2, ++ DCB_CAP_ATTR_PFC = 3, ++ DCB_CAP_ATTR_UP2TC = 4, ++ DCB_CAP_ATTR_PG_TCS = 5, ++ DCB_CAP_ATTR_PFC_TCS = 6, ++ DCB_CAP_ATTR_GSP = 7, ++ DCB_CAP_ATTR_BCN = 8, ++ DCB_CAP_ATTR_DCBX = 9, ++ __DCB_CAP_ATTR_ENUM_MAX = 10, ++ DCB_CAP_ATTR_MAX = 9, ++}; ++ ++enum dcbnl_numtcs_attrs { ++ DCB_NUMTCS_ATTR_UNDEFINED = 0, ++ DCB_NUMTCS_ATTR_ALL = 1, ++ DCB_NUMTCS_ATTR_PG = 2, ++ DCB_NUMTCS_ATTR_PFC = 3, ++ __DCB_NUMTCS_ATTR_ENUM_MAX = 4, ++ DCB_NUMTCS_ATTR_MAX = 3, ++}; ++ ++enum dcbnl_bcn_attrs { ++ DCB_BCN_ATTR_UNDEFINED = 0, ++ DCB_BCN_ATTR_RP_0 = 1, ++ DCB_BCN_ATTR_RP_1 = 2, ++ DCB_BCN_ATTR_RP_2 = 3, ++ DCB_BCN_ATTR_RP_3 = 4, ++ DCB_BCN_ATTR_RP_4 = 5, ++ DCB_BCN_ATTR_RP_5 = 6, ++ DCB_BCN_ATTR_RP_6 = 7, ++ DCB_BCN_ATTR_RP_7 = 8, ++ DCB_BCN_ATTR_RP_ALL = 9, ++ DCB_BCN_ATTR_BCNA_0 = 10, ++ DCB_BCN_ATTR_BCNA_1 = 11, ++ DCB_BCN_ATTR_ALPHA = 12, ++ DCB_BCN_ATTR_BETA = 13, ++ DCB_BCN_ATTR_GD = 14, ++ DCB_BCN_ATTR_GI = 15, ++ DCB_BCN_ATTR_TMAX = 16, ++ DCB_BCN_ATTR_TD = 17, ++ DCB_BCN_ATTR_RMIN = 18, ++ DCB_BCN_ATTR_W = 19, ++ DCB_BCN_ATTR_RD = 20, ++ DCB_BCN_ATTR_RU = 21, ++ DCB_BCN_ATTR_WRTT = 22, ++ DCB_BCN_ATTR_RI = 23, ++ DCB_BCN_ATTR_C = 24, ++ DCB_BCN_ATTR_ALL = 25, ++ __DCB_BCN_ATTR_ENUM_MAX = 26, ++ DCB_BCN_ATTR_MAX = 25, ++}; ++ ++enum dcb_general_attr_values { ++ DCB_ATTR_VALUE_UNDEFINED = 255, ++}; ++ ++enum dcbnl_app_attrs { ++ DCB_APP_ATTR_UNDEFINED = 0, ++ DCB_APP_ATTR_IDTYPE = 1, ++ DCB_APP_ATTR_ID = 2, ++ DCB_APP_ATTR_PRIORITY = 3, ++ __DCB_APP_ATTR_ENUM_MAX = 4, ++ DCB_APP_ATTR_MAX = 3, ++}; ++ ++enum dcbnl_featcfg_attrs { ++ DCB_FEATCFG_ATTR_UNDEFINED = 0, ++ DCB_FEATCFG_ATTR_ALL = 1, ++ DCB_FEATCFG_ATTR_PG = 2, ++ DCB_FEATCFG_ATTR_PFC = 3, ++ DCB_FEATCFG_ATTR_APP = 4, ++ __DCB_FEATCFG_ATTR_ENUM_MAX = 5, ++ DCB_FEATCFG_ATTR_MAX = 4, ++}; ++ ++struct dcb_app_type { ++ int ifindex; ++ struct dcb_app app; ++ struct list_head list; ++ u8 dcbx; ++}; ++ ++struct dcb_ieee_app_prio_map { ++ u64 map[8]; ++}; ++ ++struct dcb_ieee_app_dscp_map { ++ u8 map[64]; ++}; ++ ++enum dcbevent_notif_type { ++ DCB_APP_EVENT = 1, ++}; ++ ++struct reply_func { ++ int type; ++ int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *); ++}; ++ ++struct switchdev_trans_item { ++ struct list_head list; ++ void *data; ++ void (*destructor)(const void *); ++}; ++ ++struct switchdev_notifier_info { ++ struct net_device *dev; ++}; ++ ++typedef void switchdev_deferred_func_t(struct net_device *, const void *); ++ ++struct switchdev_deferred_item { ++ struct list_head list; ++ struct net_device *dev; ++ switchdev_deferred_func_t *func; ++ long unsigned int data[0]; ++}; ++ ++struct compress_format { ++ unsigned char magic[2]; ++ const char *name; ++ decompress_fn decompressor; ++}; ++ ++struct group_data { ++ int limit[21]; ++ int base[20]; ++ int permute[258]; ++ int minLen; ++ int maxLen; ++}; ++ ++struct bunzip_data { ++ int writeCopies; ++ int writePos; ++ int writeRunCountdown; ++ int writeCount; ++ int writeCurrent; ++ long int (*fill)(void *, long unsigned int); ++ long int inbufCount; ++ long int inbufPos; ++ unsigned char *inbuf; ++ unsigned int inbufBitCount; ++ unsigned int inbufBits; ++ unsigned int crc32Table[256]; ++ unsigned int headerCRC; ++ unsigned int totalCRC; ++ unsigned int writeCRC; ++ unsigned int *dbuf; ++ unsigned int dbufSize; ++ unsigned char selectors[32768]; ++ struct group_data groups[6]; ++ int io_error; ++ int byteCount[256]; ++ unsigned char symToByte[256]; ++ unsigned char mtfSymbol[256]; ++}; ++ ++struct rc { ++ long int (*fill)(void *, long unsigned int); ++ uint8_t *ptr; ++ uint8_t *buffer; ++ uint8_t *buffer_end; ++ long int buffer_size; ++ uint32_t code; ++ uint32_t range; ++ uint32_t bound; ++ void (*error)(char *); ++}; ++ ++struct lzma_header { ++ uint8_t pos; ++ uint32_t dict_size; ++ uint64_t dst_size; ++} __attribute__((packed)); ++ ++struct writer { ++ uint8_t *buffer; ++ uint8_t previous_byte; ++ size_t buffer_pos; ++ int bufsize; ++ size_t global_pos; ++ long int (*flush)(void *, long unsigned int); ++ struct lzma_header *header; ++}; ++ ++struct cstate { ++ int state; ++ uint32_t rep0; ++ uint32_t rep1; ++ uint32_t rep2; ++ uint32_t rep3; ++}; ++ ++struct xz_dec___2; ++ ++enum cpio_fields { ++ C_MAGIC = 0, ++ C_INO = 1, ++ C_MODE = 2, ++ C_UID = 3, ++ C_GID = 4, ++ C_NLINK = 5, ++ C_MTIME = 6, ++ C_FILESIZE = 7, ++ C_MAJ = 8, ++ C_MIN = 9, ++ C_RMAJ = 10, ++ C_RMIN = 11, ++ C_NAMESIZE = 12, ++ C_CHKSUM = 13, ++ C_NFIELDS = 14, ++}; ++ ++struct fprop_local_single { ++ long unsigned int events; ++ unsigned int period; ++ raw_spinlock_t lock; ++}; ++ ++struct pt_regs___2; ++ ++struct klist_waiter { ++ struct list_head list; ++ struct klist_node *node; ++ struct task_struct *process; ++ int woken; ++}; ++ ++struct uevent_sock { ++ struct list_head list; ++ struct sock *sk; ++}; ++ ++struct radix_tree_preload { ++ unsigned int nr; ++ struct radix_tree_node *nodes; ++}; ++ ++typedef struct { ++ long unsigned int key[2]; ++} hsiphash_key_t; ++ ++enum format_type { ++ FORMAT_TYPE_NONE = 0, ++ FORMAT_TYPE_WIDTH = 1, ++ FORMAT_TYPE_PRECISION = 2, ++ FORMAT_TYPE_CHAR = 3, ++ FORMAT_TYPE_STR = 4, ++ FORMAT_TYPE_PTR = 5, ++ FORMAT_TYPE_PERCENT_CHAR = 6, ++ FORMAT_TYPE_INVALID = 7, ++ FORMAT_TYPE_LONG_LONG = 8, ++ FORMAT_TYPE_ULONG = 9, ++ FORMAT_TYPE_LONG = 10, ++ FORMAT_TYPE_UBYTE = 11, ++ FORMAT_TYPE_BYTE = 12, ++ FORMAT_TYPE_USHORT = 13, ++ FORMAT_TYPE_SHORT = 14, ++ FORMAT_TYPE_UINT = 15, ++ FORMAT_TYPE_INT = 16, ++ FORMAT_TYPE_SIZE_T = 17, ++ FORMAT_TYPE_PTRDIFF = 18, ++}; ++ ++struct printf_spec { ++ unsigned int type: 8; ++ int field_width: 24; ++ unsigned int flags: 8; ++ unsigned int base: 8; ++ int precision: 16; ++}; ++ ++ ++#include "ext_def.h" ++#endif /* __VMLINUX_H__ */ +diff --git a/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_link.meta b/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_link.meta +index 170f41f..1261304 100644 +--- a/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_link.meta ++++ b/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_link.meta +@@ -2,7 +2,7 @@ version = "1.0.0" + measurements: + ( + { +- name: "nginx_statistic", ++ name: "nginx_link", + fields: + ( + { +@@ -20,6 +20,11 @@ measurements: + type: "Unknown", + name: "server_ip", + }, ++ { ++ description: "virtual port", ++ type: "Unknown", ++ name: "virtual_port", ++ }, + { + description: "server port", + type: "Unknown", +diff --git a/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_probe.c b/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_probe.c +index c9f2723..7ebb0c3 100644 +--- a/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_probe.c ++++ b/gala-gopher/src/probes/extends/ebpf.probe/src/nginxprobe/nginx_probe.c +@@ -108,7 +108,7 @@ void ngxprobe_arg_parse(char opt, char *arg, int idx) + return; + } + +-#define METRIC_STATISTIC_NAME "nginx_statistic" ++#define METRIC_STATISTIC_NAME "nginx_link" + void print_statistic_map(int fd) + { + int ret = 0; +@@ -134,11 +134,12 @@ void print_statistic_map(int fd) + } + + fprintf(stdout, +- "|%s|%s|%s|%s|%s|%u|%u|\n", ++ "|%s|%s|%s|%s|%u|%s|%u|%u|\n", + METRIC_STATISTIC_NAME, + cip_str, + ngxip_str, + nk.sip_str, ++ ntohs(d.ngx_ip.port), + (colon ? (colon + 1) : "0"), + nk.is_l7, + d.link_count); +diff --git a/gala-ragdoll/ragdoll/controllers/confs_controller.py b/gala-ragdoll/ragdoll/controllers/confs_controller.py +index 7569078..a865836 100644 +--- a/gala-ragdoll/ragdoll/controllers/confs_controller.py ++++ b/gala-ragdoll/ragdoll/controllers/confs_controller.py +@@ -46,6 +46,12 @@ def get_the_sync_status_of_domain(body=None): # noqa: E501 + + domain = body.domain_name + ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + # check the domian is exist + isExist = Format.isDomainExist(domain) + if not isExist: +@@ -240,12 +246,20 @@ def query_real_confs(body=None): # noqa: E501 + + domain = body.domain_name + hostList = body.host_ids ++ ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + # check the domain is Exist + isExist = Format.isDomainExist(domain) + if not isExist: + codeNum = 400 + base_rsp = BaseResponse(codeNum, "The current domain does not exist, please create the domain first.") + return base_rsp, codeNum ++ + # check whether the host is configured in the domain + isHostListExist = Format.isHostInDomain(domain) + print("isHostListExist is : {}".format(isHostListExist)) +@@ -260,6 +274,8 @@ def query_real_confs(body=None): # noqa: E501 + # If hostList is not empty, the actual contents of the currently given host are queried. + conf_tools = ConfTools() + port = conf_tools.load_port_by_conf() ++ existHost = [] ++ failedHost = [] + if len(hostList) > 0: + hostTool = HostTools() + existHost, failedHost = hostTool.getHostExistStatus(domain, hostList) +@@ -274,7 +290,6 @@ def query_real_confs(body=None): # noqa: E501 + resText = json.loads(response.text) + print("host/getHost return code is : {}".format(response.status_code)) + +- + if len(existHost) == 0 or len(failedHost) == len(hostList): + codeNum = 400 + base_rsp = BaseResponse(codeNum, "The host information is not set in the current domain." + +@@ -405,6 +420,13 @@ def sync_conf_to_host_from_domain(body=None): # noqa: E501 + domain = body.domain_name + hostList = body.host_ids + ++ # check the input domain ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + # check whether the domain exists + isExist = Format.isDomainExist(domain) + if not isExist: +diff --git a/gala-ragdoll/ragdoll/controllers/domain_controller.py b/gala-ragdoll/ragdoll/controllers/domain_controller.py +index 23cede0..ba9c6ce 100644 +--- a/gala-ragdoll/ragdoll/controllers/domain_controller.py ++++ b/gala-ragdoll/ragdoll/controllers/domain_controller.py +@@ -31,7 +31,7 @@ def create_domain(body=None): # noqa: E501 + body = [Domain.from_dict(d) for d in connexion.request.get_json()] # noqa: E501 + + if len(body) == 0: +- base_rsp = BaseResponse(400, "The entered domian is empty") ++ base_rsp = BaseResponse(400, "The input domain cannot be empty, please check the domain.") + return base_rsp + + successDomain = [] +@@ -39,14 +39,9 @@ def create_domain(body=None): # noqa: E501 + + for domain in body: + tempDomainName = domain.domain_name +- isVerFication = Format.domainCheck(tempDomainName) +- if not isVerFication: +- codeNum = 400 +- codeString = "Interface input parameters verification failed. Please check the input parameters." +- base_rsp = BaseResponse(codeNum, codeString) +- return base_rsp, codeNum ++ checkRes = Format.domainCheck(tempDomainName) + isExist = Format.isDomainExist(tempDomainName) +- if isExist: ++ if isExist or not checkRes: + failedDomain.append(tempDomainName) + else: + successDomain.append(tempDomainName) +@@ -84,8 +79,9 @@ def delete_domain(domainName): # noqa: E501 + failedDomain = [] + + for tempDomainName in domainName: ++ checkRes = Format.domainCheck(tempDomainName) + isExist = Format.isDomainExist(tempDomainName) +- if isExist: ++ if checkRes and isExist: + domainPath = os.path.join(TARGETDIR, tempDomainName) + successDomain.append(tempDomainName) + shutil.rmtree(domainPath) +diff --git a/gala-ragdoll/ragdoll/controllers/format.py b/gala-ragdoll/ragdoll/controllers/format.py +index 8289692..c302a35 100644 +--- a/gala-ragdoll/ragdoll/controllers/format.py ++++ b/gala-ragdoll/ragdoll/controllers/format.py +@@ -14,14 +14,12 @@ class Format(object): + @staticmethod + def domainCheck(domainName): + res = True +- if domainName == "" or domainName == " " or domainName == "/" or (" " in domainName): ++ if not re.match(r"^[A-Za-z0-9_\.-]*$", domainName) or domainName == "" or len(domainName) > 255: + return False + return res + + @staticmethod + def isDomainExist(domainName): +- if domainName == "" or domainName == " " or domainName == "/" or (" " in domainName): +- return False + TARGETDIR = Format.get_git_dir() + domainPath = os.path.join(TARGETDIR, domainName) + if os.path.exists(domainPath): +diff --git a/gala-ragdoll/ragdoll/controllers/host_controller.py b/gala-ragdoll/ragdoll/controllers/host_controller.py +index 3196cbb..fe3bbed 100644 +--- a/gala-ragdoll/ragdoll/controllers/host_controller.py ++++ b/gala-ragdoll/ragdoll/controllers/host_controller.py +@@ -34,10 +34,16 @@ def add_host_in_domain(body=None): # noqa: E501 + # check whether host_infos is empty + if len(host_infos) == 0: + num = 400 +- base_rsp = BaseResponse(num, "The entered host is empty") ++ base_rsp = BaseResponse(num, "Enter host info cannot be empty, please check the host info.") + return base_rsp, num + +- # check whether the domain exists ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ ++ # check whether the domain exists + isExist = Format.isDomainExist(domain) + if not isExist: + num = 400 +@@ -104,6 +110,13 @@ def delete_host_in_domain(body=None): # noqa: E501 + domain = body.domain_name + hostInfos = body.host_infos + ++ # check the input domain ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + # check whether the domain exists + isExist = Format.isDomainExist(domain) + if not isExist: +@@ -200,6 +213,13 @@ def get_host_by_domain_name(body=None): # noqa: E501 + + domain = body.domain_name + ++ # check the input domain ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + # check whether the domain exists + isExist = Format.isDomainExist(domain) + if not isExist: +diff --git a/gala-ragdoll/ragdoll/controllers/management_controller.py b/gala-ragdoll/ragdoll/controllers/management_controller.py +index 48f3a5e..ed7e37f 100644 +--- a/gala-ragdoll/ragdoll/controllers/management_controller.py ++++ b/gala-ragdoll/ragdoll/controllers/management_controller.py +@@ -44,6 +44,13 @@ def add_management_confs_in_domain(body=None): # noqa: E501 + domain = body.domain_name + conf_files = body.conf_files + ++ # check the input domain ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + # check whether the domain exists + isExist = Format.isDomainExist(domain) + if not isExist: +@@ -70,7 +77,8 @@ def add_management_confs_in_domain(body=None): # noqa: E501 + contents_list_null.append(d_conf) + else: + codeNum = 400 +- base_rsp = BaseResponse(codeNum, "Invalid input exists.") ++ base_rsp = BaseResponse(codeNum, "The input parameters are not compliant, " + ++ "please check the input parameters.") + return base_rsp, codeNum + + successConf = [] +@@ -81,7 +89,17 @@ def add_management_confs_in_domain(body=None): # noqa: E501 + # Content is not an empty scene and is directly analyed and parsed + if len(contents_list_non_null) > 0: + for d_conf in contents_list_non_null: ++ if not d_conf.contents.strip(): ++ codeNum = 400 ++ base_rsp = BaseResponse(codeNum, "The input parameters are not compliant, " + ++ "please check the input parameters.") ++ return base_rsp, codeNum + content_string = object_parse.parse_content_to_json(d_conf.file_path, d_conf.contents) ++ if not json.loads(content_string): ++ codeNum = 400 ++ base_rsp = BaseResponse(codeNum, "Input configuration content verification failed, " + ++ "please check the config.") ++ return base_rsp, codeNum + # create the file and expected value in domain + feature_path = yang_module.get_feature_by_real_path(domain, d_conf.file_path) + result = conf_tools.wirteFileInPath(feature_path, content_string + '\n') +@@ -144,6 +162,11 @@ def add_management_confs_in_domain(body=None): # noqa: E501 + content = d_file.get("content") + content_string = object_parse.parse_content_to_json(file_path, content) + # create the file and expected value in domain ++ if not json.loads(content_string): ++ codeNum = 400 ++ base_rsp = BaseResponse(codeNum, "Input configuration content verification failed," + ++ "please check the config in the host.") ++ return base_rsp, codeNum + feature_path = yang_module.get_feature_by_real_path(domain, file_path) + result = conf_tools.wirteFileInPath(feature_path, content_string) + if result: +@@ -194,7 +217,14 @@ def delete_management_confs_in_domain(body=None): # noqa: E501 + + # check whether the domain exists + domain = body.domain_name +- print("body is : {}".format(body)) ++ ++ # check the input domain ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + isExist = Format.isDomainExist(domain) + if not isExist: + codeNum = 400 +@@ -279,9 +309,17 @@ def get_management_confs_in_domain(body=None): # noqa: E501 + """ + if connexion.request.is_json: + body = DomainName.from_dict(connexion.request.get_json()) # noqa: E501 +- ++ + # Check whether the domain exists + domain = body.domain_name ++ ++ # check the input domain ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + isExist = Format.isDomainExist(domain) + if not isExist: + base_rsp = BaseResponse(400, "The current domain does not exist") +@@ -293,7 +331,6 @@ def get_management_confs_in_domain(body=None): # noqa: E501 + + # get the path in domain + domainPath = os.path.join(TARGETDIR, domain) +- print("########## domainPath is : {} ########## ".format(domainPath)) + + # When there is a file path is the path of judgment for the configuration items + for root, dirs, files in os.walk(domainPath): +@@ -338,6 +375,14 @@ def query_changelog_of_management_confs_in_domain(body=None): # noqa: E501 + # check whether the domain exists + domain = body.domain_name + print("body is : {}".format(body)) ++ ++ # check the input domain ++ checkRes = Format.domainCheck(domain) ++ if not checkRes: ++ num = 400 ++ base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") ++ return base_rsp, num ++ + isExist = Format.isDomainExist(domain) + if not isExist: + base_rsp = BaseResponse(400, "The current domain does not exist") +diff --git a/gala-ragdoll/ragdoll/parses/ini_parse.py b/gala-ragdoll/ragdoll/parses/ini_parse.py +index 4c92030..6163a47 100644 +--- a/gala-ragdoll/ragdoll/parses/ini_parse.py ++++ b/gala-ragdoll/ragdoll/parses/ini_parse.py +@@ -29,6 +29,8 @@ class IniJsonParse(object): + desc: return an json listfrom the obj object + """ + dic = {} ++ if not obj: ++ return dic + sections = obj.sections() + for d_sec in sections: + d_dic = {} +diff --git a/gala-ragdoll/ragdoll/test/test_collect.py b/gala-ragdoll/ragdoll/test/test_collect.py +new file mode 100644 +index 0000000..cde68a2 +--- /dev/null ++++ b/gala-ragdoll/ragdoll/test/test_collect.py +@@ -0,0 +1,11 @@ ++import requests ++import json ++ ++data = {'infos': [{'host_id': '6777c1740fca11ec979a525400056d9d', 'config_list': ['/etc/coremail/coremail.conf']}]} ++url = 'http://127.0.0.1:11111/manage/config/collect' ++ ++ ++response = requests.request('post', url, json=data) ++print(response.text) ++print(type(response.text)) ++ +diff --git a/gala-spider/README.md b/gala-spider/README.md +index 397f631..c59a3b8 100644 +--- a/gala-spider/README.md ++++ b/gala-spider/README.md +@@ -65,7 +65,11 @@ gala-spider用于架构感知探测结果呈现,可以根据各个节点上报 + + ### 运行架构 + +-![topo_logic](D:\code\A-Ops\gala-spider\doc\pic\topo_logic.png) ++![topo_logic](doc/pic/topo_logic.png) ++ ++### 接口文档 ++ ++[Restful API](doc/swagger.yaml) + + ## 详细介绍 + +diff --git a/gala-spider/config/gala-spider.conf b/gala-spider/config/gala-spider.conf +index 0e9ec01..13ce1de 100644 +--- a/gala-spider/config/gala-spider.conf ++++ b/gala-spider/config/gala-spider.conf +@@ -11,7 +11,7 @@ broker = + + [table_info] + base_table_name = ["tcp_link", "lvs_link"] +-other_table_name = ["nginx_statistic" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] ++other_table_name = ["nginx_link" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] + + [option] + exclude_addr = ["1.2.3.4"] +@@ -26,4 +26,4 @@ temp_tcp_file = "/var/tmp/spider/tcpline.txt" + temp_other_file = "/var/tmp/spider/otherline.txt" + + [spider] +-port = 11115 +\ No newline at end of file ++port = 11115 +diff --git a/gala-spider/doc/conf_introduction.md b/gala-spider/doc/conf_introduction.md +index dd28c0e..3ef9ad2 100644 +--- a/gala-spider/doc/conf_introduction.md ++++ b/gala-spider/doc/conf_introduction.md +@@ -47,7 +47,7 @@ password = xxxx + ``` + [table_info] -- 可支持的技术点 + base_table_name = ["tcp_link", "lvs_link"] +-other_table_name = ["nginx_statistic" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] ++other_table_name = ["nginx_link" , "lvs_link" , "haproxy_link" , "dnsmasq_link"] + ``` + + ### option +@@ -55,4 +55,4 @@ other_table_name = ["nginx_statistic" , "lvs_link" , "haproxy_link" , "dnsmasq_l + ``` + [option] -- 一些其他的配置项 + exclude_addr = ["1.2.3.4"] -- 例外处理配置项,默认为1.2.3.4即不做例外处理;设置后可以对某类IP不进行算法处理 +-``` +\ No newline at end of file ++``` +diff --git a/gala-spider/doc/swagger.yaml b/gala-spider/doc/swagger.yaml +new file mode 100644 +index 0000000..30e841f +--- /dev/null ++++ b/gala-spider/doc/swagger.yaml +@@ -0,0 +1,353 @@ ++--- ++swagger: "2.0" ++info: ++ description: "Topo Graph Engine Service" ++ version: "0.0.1" ++ title: "Topo Graph Engine Service" ++ contact: ++ email: "zhengxian@huawei.com" ++host: "0.0.0.0:11115" ++basePath: "/gala-spider/api/v1" ++tags: ++- name: "gala-spider" ++ description: "Topo Graph Engine Service" ++schemes: ++- "http" ++paths: ++ /get_status: ++ get: ++ tags: ++ - "gala-spider" ++ summary: "get Topo Graph Engine Service health status" ++ description: "get Topo Graph Engine Service health status" ++ operationId: "get_topo_graph_status" ++ consumes: ++ - "application/json" ++ produces: ++ - "application/json" ++ parameters: [] ++ responses: ++ "200": ++ description: "successful operation" ++ schema: ++ $ref: "#/definitions/BaseResponse" ++ x-swagger-router-controller: "spider.controllers.gala_spider" ++ /get_entities: ++ get: ++ tags: ++ - "gala-spider" ++ summary: "get observed entity list" ++ description: "get observed entity list" ++ operationId: "get_observed_entity_list" ++ consumes: ++ - "application/json" ++ produces: ++ - "application/json" ++ parameters: ++ - name: "timestamp" ++ in: "query" ++ description: "the time that cared" ++ required: false ++ type: "integer" ++ format: "int64" ++ responses: ++ "200": ++ description: "successful operation" ++ schema: ++ $ref: "#/definitions/EntitiesResponse" ++ x-swagger-router-controller: "spider.controllers.gala_spider" ++definitions: ++ EntitiesResponse: ++ type: "object" ++ properties: ++ code: ++ type: "integer" ++ format: "int32" ++ msg: ++ type: "string" ++ timestamp: ++ type: "integer" ++ format: "int64" ++ entityids: ++ type: "array" ++ items: ++ type: "string" ++ entities: ++ type: "array" ++ items: ++ $ref: "#/definitions/Entity" ++ example: ++ msg: "msg" ++ code: 0 ++ entities: ++ - dependingitems: ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ dependeditems: ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ name: "name" ++ entityid: "entityid" ++ type: "PROCESS" ++ attrs: ++ - vtype: "int" ++ value: "value" ++ key: "key" ++ - vtype: "int" ++ value: "value" ++ key: "key" ++ - dependingitems: ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ dependeditems: ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ name: "name" ++ entityid: "entityid" ++ type: "PROCESS" ++ attrs: ++ - vtype: "int" ++ value: "value" ++ key: "key" ++ - vtype: "int" ++ value: "value" ++ key: "key" ++ entityids: ++ - "entityids" ++ - "entityids" ++ timestamp: 6 ++ Entity: ++ type: "object" ++ properties: ++ entityid: ++ type: "string" ++ type: ++ type: "string" ++ enum: ++ - "PROCESS" ++ - "CONTAINER" ++ - "POD" ++ - "VM" ++ - "BM" ++ - "TCP-LINK" ++ - "LVS-LINK" ++ - "NGNIX-LINK" ++ name: ++ type: "string" ++ dependingitems: ++ type: "array" ++ items: ++ $ref: "#/definitions/Dependenceitem" ++ dependeditems: ++ type: "array" ++ items: ++ $ref: "#/definitions/Dependenceitem" ++ attrs: ++ type: "array" ++ items: ++ $ref: "#/definitions/Attr" ++ example: ++ dependingitems: ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ dependeditems: ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ - calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ name: "name" ++ entityid: "entityid" ++ type: "PROCESS" ++ attrs: ++ - vtype: "int" ++ value: "value" ++ key: "key" ++ - vtype: "int" ++ value: "value" ++ key: "key" ++ Dependenceitem: ++ type: "object" ++ properties: ++ calls: ++ type: "array" ++ items: ++ $ref: "#/definitions/Call" ++ runOns: ++ type: "array" ++ items: ++ $ref: "#/definitions/Runon" ++ example: ++ calls: ++ - id: "id" ++ type: "TCP-LINK" ++ - id: "id" ++ type: "TCP-LINK" ++ runOns: ++ - id: "id" ++ type: "PROCESS" ++ - id: "id" ++ type: "PROCESS" ++ Call: ++ type: "object" ++ properties: ++ type: ++ type: "string" ++ enum: ++ - "TCP-LINK" ++ - "LVS-LINK" ++ - "NGNIX-LINK" ++ id: ++ type: "string" ++ example: ++ id: "id" ++ type: "TCP-LINK" ++ Runon: ++ type: "object" ++ properties: ++ type: ++ type: "string" ++ enum: ++ - "PROCESS" ++ - "CONTAINER" ++ - "POD" ++ - "VM" ++ - "BM" ++ id: ++ type: "string" ++ example: ++ id: "id" ++ type: "PROCESS" ++ Attr: ++ type: "object" ++ properties: ++ key: ++ type: "string" ++ value: ++ type: "string" ++ vtype: ++ type: "string" ++ enum: ++ - "int" ++ - "string" ++ - "float" ++ example: ++ vtype: "int" ++ value: "value" ++ key: "key" ++ BaseResponse: ++ type: "object" ++ properties: ++ code: ++ type: "integer" ++ format: "int32" ++ msg: ++ type: "string" ++ example: ++ msg: "msg" ++ code: 0 +diff --git a/gala-spider/spider/controllers/gala_spider.py b/gala-spider/spider/controllers/gala_spider.py +index f10cbb8..ae147cd 100644 +--- a/gala-spider/spider/controllers/gala_spider.py ++++ b/gala-spider/spider/controllers/gala_spider.py +@@ -24,7 +24,9 @@ def get_observed_entity_list(timestamp=None): # noqa: E501 + """ + entities = [] + # obtain tcp_link entities +- edges_table, edges_infos, nodes_table, lb_tables = node_entity_process() ++ edges_table, edges_infos, nodes_table, lb_tables, vm_tables = node_entity_process() ++ if edges_table is None: ++ return 500 + for key in edges_table.keys(): + if len(edges_table[key]) == 5: + edge_attrs = [] +@@ -79,25 +81,49 @@ def get_observed_entity_list(timestamp=None): # noqa: E501 + dependingitems = Dependenceitem(calls = right_calls, run_ons = on_runon), + attrs = node_attrs) + entities.append(entity) +- for key in lb_tables.keys(): +- lb_attrs = [] +- left_call = Call(type = "PROCESS", +- id = lb_tables[key]['src']) +- right_call = Call(type = "PROCESS", +- id = lb_tables[key]['dst']) +- run_on = Runon(type = "PROCESS", +- id = lb_tables[key]['on']) +- lb_attrs.append(Attr(key='example', value = "0.1", vtype = "float")) +- entity = Entity(entityid = lb_tables[key]['lb_id'], +- type = "NGINX-LINK", +- name = lb_tables[key]['lb_id'], +- dependeditems = Dependenceitem(calls = left_call), +- dependingitems = Dependenceitem(calls = right_call, run_ons = run_on)) ++ if lb_tables is not None: ++ for key in lb_tables.keys(): ++ if len(lb_tables[key]) < 4: ++ continue ++ lb_attrs = [] ++ left_call = Call(type = "PROCESS", ++ id = lb_tables[key]['src']) ++ right_call = Call(type = "PROCESS", ++ id = lb_tables[key]['dst']) ++ run_on = Runon(type = "PROCESS", ++ id = lb_tables[key]['on']) ++ lb_attrs.append(Attr(key='example', value = "0.1", vtype = "float")) ++ entity = Entity(entityid = lb_tables[key]['lb_id'], ++ type = lb_tables[key]['tname'].upper(), ++ name = lb_tables[key]['lb_id'], ++ dependeditems = Dependenceitem(calls = left_call), ++ dependingitems = Dependenceitem(calls = right_call, run_ons = run_on)) ++ entities.append(entity) ++ for key in vm_tables.keys(): ++ procs = [] ++ for i in range(len(vm_tables[key]['proc'])): ++ val = vm_tables[key]['proc'].pop() ++ proc = Runon(type = "PROCESS", ++ id = val) ++ procs.append(proc) ++ entity = Entity(entityid = key, ++ type = "VM", ++ name = key, ++ dependeditems = Dependenceitem(run_ons = procs), ++ dependingitems = Dependenceitem()) + entities.append(entity) +- entities_res = EntitiesResponse(code = 200, +- msg = "Successful", ++ ++ if len(entities) == 0: ++ code = 500 ++ msg = "Empty" ++ else: ++ code = 200 ++ msg = "Successful" ++ entities_res = EntitiesResponse(code = code, ++ msg = msg, + timestamp = 12345678, + entities = entities) ++ clear_tmp() + return entities_res, 200 + + +diff --git a/gala-spider/spider/data_process/data_to_entity.py b/gala-spider/spider/data_process/data_to_entity.py +index ecfd815..eb4ccf2 100644 +--- a/gala-spider/spider/data_process/data_to_entity.py ++++ b/gala-spider/spider/data_process/data_to_entity.py +@@ -17,7 +17,7 @@ def tcp_entity_process(): + f = open(ast.literal_eval(temp_tcp_file)) + else: + print("/var/tmp/spider/tcpline.txt not here.") +- sys.exit() ++ return None, None + lines = f.readline() + while lines: + # obtain label = hostname + process_name +@@ -80,7 +80,7 @@ def lb_entity_process(): + f = open(ast.literal_eval(temp_other_file)) + else: + print("/var/tmp/spider/otherline.txt not here.") +- sys.exit() ++ return None + lines = f.readline() + while lines: + line_json = json.loads(lines) +@@ -94,21 +94,20 @@ def lb_entity_process(): + v_ip = line_json.get("virtual_ip") + s_ip = line_json.get("server_ip") + s_port = line_json.get("server_port") +- if table_name == "nginx_statistic": +- process_name = "nginx" +- lb_tables.setdefault((hostname, process_name), {}).setdefault("c-v", (c_ip, v_ip, s_port)) +- lb_tables.setdefault((hostname, process_name), {}).setdefault("v-s", (v_ip, s_ip, s_port)) +- elif table_name == "haproxy_link": +- process_name = "haproxy" +- lb_tables.setdefault((hostname, process_name), {}).setdefault("c-v", (c_ip, v_ip, s_port)) +- lb_tables.setdefault((hostname, process_name), {}).setdefault("v-s", (v_ip, s_ip, s_port)) ++ v_port = line_json.get("virtual_port") ++ lb_tables.setdefault((c_ip, v_ip, s_ip, v_port, s_port), {}).setdefault("hname", hostname) ++ lb_tables.setdefault((c_ip, v_ip, s_ip, v_port, s_port), {}).setdefault("tname", table_name) + lines = f.readline() + return lb_tables + + + def node_entity_process(): + nodes_table = {} ++ vm_table = {} + edges_table, edges_infos = tcp_entity_process() ++ if edges_table is None: ++ print("Please wait kafka consumer datas...") ++ return None, None, None, None + lb_tables = lb_entity_process() + for key in edges_table.keys(): + if len(edges_table[key]) == 2: +@@ -126,37 +125,38 @@ def node_entity_process(): + nodes_table.setdefault(dst_node_id, {}).setdefault('host', edges_table[key]['0']['h']) + nodes_table.setdefault(dst_node_id, {}).setdefault('l_edge', []) + nodes_table[dst_node_id].get('l_edge').append((edge_id, "TCP_LINK")) +- for lb_key in lb_tables.keys(): +- if lb_tables[lb_key]['c-v'][0] == key[0] and \ +- lb_tables[lb_key]['c-v'][1] == key[1] and \ +- lb_tables[lb_key]['c-v'][2] == key[2]: +- lb_tables.setdefault(lb_key, {}).setdefault('src', src_node_id) +- if lb_tables[lb_key]['v-s'][0] == key[0] and \ +- lb_tables[lb_key]['v-s'][1] == key[1] and \ +- lb_tables[lb_key]['v-s'][2] == key[2]: +- lb_tables.setdefault(lb_key, {}).setdefault('dst', dst_node_id) ++ if lb_tables is not None: ++ for lb_key in lb_tables.keys(): ++ if lb_key[0] == key[0] and lb_key[1] == key[1] and lb_key[3] == key[2]: ++ lb_tables.setdefault(lb_key, {}).setdefault('src', src_node_id) ++ if lb_key[1] == key[0] and lb_key[2] == key[1] and lb_key[4] == key[2]: ++ lb_tables.setdefault(lb_key, {}).setdefault('dst', dst_node_id) + +- for key in lb_tables.keys(): +- print("lb----", key, lb_tables[key]) +- lb_node_id = node_entity_name(key[0], key[1], None) +- lb_tables.setdefault(key, {}).setdefault('on', lb_node_id) +- if key[1] == "dnsmasq": +- type = "DNSMASQ-LINK" +- elif key[1] == "nginx": +- type = "NGINX-LINK" +- lb_id = edge_entity_name("nginx_link", None, lb_tables[key]['dst'], None, lb_tables[key]['src']) +- lb_tables.setdefault(key, {}).setdefault("lb_id", lb_id) +- elif key[1] == "haproxy": +- lb_id = edge_entity_name("haproxy_link", None, lb_tables[key]['dst'], None, lb_tables[key]['src']) +- lb_tables.setdefault(key, {}).setdefault("lb_id", lb_id) +- +- nodes_table.setdefault(lb_node_id, {}).setdefault('lb_edge', []) +- nodes_table[lb_node_id].get('lb_edge').append((lb_tables[key]['lb_id'], type)) ++ if lb_tables is not None: ++ for key in lb_tables.keys(): ++ print("lb----", key, lb_tables[key]) ++ lb_node_id = node_entity_name(lb_tables[key]['hname'], lb_tables[key]['tname'].split("_")[0], None) ++ lb_tables.setdefault(key, {}).setdefault('on', lb_node_id) ++ if key[1] == "dnsmasq_link": ++ type = key[1].upper() ++ # Add process code here.... ++ else: ++ if lb_tables[key]['dst'] is not None and lb_tables[key]['src'] is not None: ++ lb_id = edge_entity_name(lb_tables[key]['hname'], None, lb_tables[key]['dst'], None, lb_tables[key]['src']) ++ lb_tables.setdefault(key, {}).setdefault("lb_id", lb_id) ++ nodes_table.setdefault(lb_node_id, {}).setdefault('lb_edge', []) ++ nodes_table[lb_node_id].get('lb_edge').append((lb_tables[key]['lb_id'], lb_tables[key]['tname'].upper())) + + for key in nodes_table.keys(): + print("node----", key, nodes_table[key]) ++ host = nodes_table[key]['host'] ++ vm_table.setdefault(host, {}).setdefault('proc', []) ++ vm_table[host].get('proc').append(key) ++ ++ for key in vm_table.keys(): ++ print("vm-----", key, vm_table[key]) + +- return edges_table, edges_infos, nodes_table, lb_tables ++ return edges_table, edges_infos, nodes_table, lb_tables, vm_table + + + def clear_tmp(): +diff --git a/gala-spider/spider/db_agent/db_process.py b/gala-spider/spider/db_agent/db_process.py +index 12a31bd..739bc8e 100644 +--- a/gala-spider/spider/db_agent/db_process.py ++++ b/gala-spider/spider/db_agent/db_process.py +@@ -33,7 +33,7 @@ def db_kafka_agent(): + with open(ast.literal_eval(temp_tcp_file), 'a+') as d_file: + d_file.write(lines) + d_file.write('\n') +- print(lines) ++ #print(lines) + if line_json.get("table_name") in ast.literal_eval(other_table): + with open(ast.literal_eval(temp_other_file), 'a+') as o_file: + o_file.write(lines) +-- +2.27.0 + diff --git a/A-Ops.spec b/A-Ops.spec index 08e0323..e004c92 100644 --- a/A-Ops.spec +++ b/A-Ops.spec @@ -7,6 +7,7 @@ URL: https://gitee.com/openeuler/A-Ops Source0: %{name}-%{version}.tar.gz Source1: A-Ops-web-node-modules.tar.gz patch0001: 0001-modify-deploymanager-ansible-playbook.patch +patch0002: 0002-fix-issue-for-the-4th-test.patch # build for gopher BuildRequires: cmake gcc-c++ yum elfutils-devel clang >= 10.0.1 llvm libconfig-devel @@ -487,7 +488,7 @@ fi %changelog * Mon Sep 13 2021 chemingdao - v1.0.2-3 -- modify spec for aops-web build +- modify spec for aops-web build and fix some issues. * Tue Sep 11 2021 yangyunyi - v1.0.2-2 - modify ansible playbook