!184 [sync] PR-173: bind:CVE-2023-2911 CVE-2023-2828

From: @openeuler-sync-bot 
Reviewed-by: @sunsuwan 
Signed-off-by: @sunsuwan
This commit is contained in:
openeuler-ci-bot 2023-09-04 13:06:01 +00:00 committed by Gitee
commit d013c0cd65
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
3 changed files with 287 additions and 1 deletions

View File

@ -0,0 +1,170 @@
From 321a498e73e9fa00d69ee9cfa4756505e5e5c8e8 Mon Sep 17 00:00:00 2001
From: Evan Hunt <each@isc.org>
Date: Thu, 25 May 2023 23:53:50 -0700
Subject: [PATCH] CVE-2023-2828
Every `named` instance configured to run as a recursive resolver maintains a cache database holding the responses to the queries it has recently sent to authoritative servers. The size limit for that cache database can be configured using the `max-cache-size` statement in the configuration file; it defaults to 90% of the total amount of memory available on the host. When the size of the cache reaches 7/8 of the configured limit, a cache-cleaning algorithm starts to remove expired and/or least-recently used RRsets from the cache, to keep memory use below the configured limit. It has been discovered that the effectiveness of the cache-cleaning algorithm used in `named` can be severely diminished by querying the resolver for specific RRsets in a certain order, effectively allowing the configured `max-cache-size` limit to be significantly exceeded.
---
lib/dns/rbtdb.c | 105 ++++++++++++++++++++++++++++++------------------
1 file changed, 65 insertions(+), 40 deletions(-)
diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c
index 1795f9d..c25023c 100644
--- a/lib/dns/rbtdb.c
+++ b/lib/dns/rbtdb.c
@@ -599,7 +599,7 @@ static void
expire_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header, bool tree_locked,
expire_t reason);
static void
-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, isc_stdtime_t now,
+overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize,
bool tree_locked);
static isc_result_t
resign_insert(dns_rbtdb_t *rbtdb, int idx, rdatasetheader_t *newheader);
@@ -6794,6 +6794,16 @@ cleanup:
static dns_dbmethods_t zone_methods;
+static size_t
+rdataset_size(rdatasetheader_t *header) {
+ if (!NONEXISTENT(header)) {
+ return (dns_rdataslab_size((unsigned char *)header,
+ sizeof(*header)));
+ }
+
+ return (sizeof(*header));
+}
+
static isc_result_t
addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
isc_stdtime_t now, dns_rdataset_t *rdataset, unsigned int options,
@@ -6957,7 +6967,8 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
}
if (cache_is_overmem) {
- overmem_purge(rbtdb, rbtnode->locknum, now, tree_locked);
+ overmem_purge(rbtdb, rbtnode->locknum, rdataset_size(newheader),
+ tree_locked);
}
NODE_LOCK(&rbtdb->node_locks[rbtnode->locknum].lock,
@@ -6976,10 +6987,18 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
}
header = isc_heap_element(rbtdb->heaps[rbtnode->locknum], 1);
- if (header != NULL && header->rdh_ttl + rbtdb->serve_stale_ttl <
- now - RBTDB_VIRTUAL)
- {
- expire_header(rbtdb, header, tree_locked, expire_ttl);
+ if (header != NULL) {
+ dns_ttl_t rdh_ttl = header->rdh_ttl;
+
+ /* Only account for stale TTL if cache is not overmem */
+ if (!cache_is_overmem) {
+ rdh_ttl += rbtdb->serve_stale_ttl;
+ }
+
+ if (rdh_ttl < now - RBTDB_VIRTUAL) {
+ expire_header(rbtdb, header, tree_locked,
+ expire_ttl);
+ }
}
/*
@@ -10495,52 +10514,58 @@ update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header, isc_stdtime_t now) {
ISC_LIST_PREPEND(rbtdb->rdatasets[header->node->locknum], header, link);
}
+static size_t
+expire_lru_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, size_t purgesize,
+ bool tree_locked) {
+ rdatasetheader_t *header, *header_prev;
+ size_t purged = 0;
+
+ for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
+ header != NULL && purged <= purgesize; header = header_prev)
+ {
+ header_prev = ISC_LIST_PREV(header, link);
+ /*
+ * Unlink the entry at this point to avoid checking it
+ * again even if it's currently used someone else and
+ * cannot be purged at this moment. This entry won't be
+ * referenced any more (so unlinking is safe) since the
+ * TTL was reset to 0.
+ */
+ ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header, link);
+ size_t header_size = rdataset_size(header);
+ expire_header(rbtdb, header, tree_locked, expire_lru);
+ purged += header_size;
+ }
+
+ return (purged);
+}
+
/*%
- * Purge some expired and/or stale (i.e. unused for some period) cache entries
- * under an overmem condition. To recover from this condition quickly, up to
- * 2 entries will be purged. This process is triggered while adding a new
- * entry, and we specifically avoid purging entries in the same LRU bucket as
- * the one to which the new entry will belong. Otherwise, we might purge
- * entries of the same name of different RR types while adding RRsets from a
- * single response (consider the case where we're adding A and AAAA glue records
- * of the same NS name).
+ * Purge some stale (i.e. unused for some period - LRU based cleaning) cache
+ * entries under the overmem condition. To recover from this condition quickly,
+ * we cleanup entries up to the size of newly added rdata (passed as purgesize).
+ *
+ * This process is triggered while adding a new entry, and we specifically avoid
+ * purging entries in the same LRU bucket as the one to which the new entry will
+ * belong. Otherwise, we might purge entries of the same name of different RR
+ * types while adding RRsets from a single response (consider the case where
+ * we're adding A and AAAA glue records of the same NS name).
*/
static void
-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, isc_stdtime_t now,
+overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize,
bool tree_locked) {
- rdatasetheader_t *header, *header_prev;
unsigned int locknum;
- int purgecount = 2;
+ size_t purged = 0;
for (locknum = (locknum_start + 1) % rbtdb->node_lock_count;
- locknum != locknum_start && purgecount > 0;
+ locknum != locknum_start && purged <= purgesize;
locknum = (locknum + 1) % rbtdb->node_lock_count)
{
NODE_LOCK(&rbtdb->node_locks[locknum].lock,
isc_rwlocktype_write);
- header = isc_heap_element(rbtdb->heaps[locknum], 1);
- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL) {
- expire_header(rbtdb, header, tree_locked, expire_ttl);
- purgecount--;
- }
-
- for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
- header != NULL && purgecount > 0; header = header_prev)
- {
- header_prev = ISC_LIST_PREV(header, link);
- /*
- * Unlink the entry at this point to avoid checking it
- * again even if it's currently used someone else and
- * cannot be purged at this moment. This entry won't be
- * referenced any more (so unlinking is safe) since the
- * TTL was reset to 0.
- */
- ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header,
- link);
- expire_header(rbtdb, header, tree_locked, expire_lru);
- purgecount--;
- }
+ purged += expire_lru_headers(rbtdb, locknum, purgesize - purged,
+ tree_locked);
NODE_UNLOCK(&rbtdb->node_locks[locknum].lock,
isc_rwlocktype_write);
--
2.33.0

View File

@ -0,0 +1,107 @@
From 240caa32b9cab90a38ab863fd64e6becf5d1393c Mon Sep 17 00:00:00 2001
From: Evan Hunt <each@isc.org>
Date: Thu, 25 May 2023 23:53:50 -0700
Subject: [PATCH] Stale answer lookups could loop when over recursion quota
When a query was aborted because of the recursion quota being exceeded,
but triggered a stale answer response and a stale data refresh query,
it could cause named to loop back where we are iterating and following
a delegation. Having no good answer in cache, we would fall back to
using serve-stale again, use the stale data, try to refresh the RRset,
and loop back again, without ever terminating until crashing due to
stack overflow.
This happens because in the functions 'query_notfound()' and
'query_delegation_recurse()', we check whether we can fall back to
serving stale data. We shouldn't do so if we are already refreshing
an RRset due to having prioritized stale data in cache.
In other words, we need to add an extra check to 'query_usestale()' to
disallow serving stale data if we are currently refreshing a stale
RRset.
As an additional mitigation to prevent looping, we now use the result
code ISC_R_ALREADYRUNNING rather than ISC_R_FAILURE when a recursion
loop is encountered, and we check for that condition in
'query_usestale()' as well.
---
lib/ns/query.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/lib/ns/query.c b/lib/ns/query.c
index 1444de5..3ce6742 100644
--- a/lib/ns/query.c
+++ b/lib/ns/query.c
@@ -5696,6 +5696,7 @@ query_refresh_rrset(query_ctx_t *orig_qctx) {
qctx.client->query.dboptions &= ~(DNS_DBFIND_STALETIMEOUT |
DNS_DBFIND_STALEOK |
DNS_DBFIND_STALEENABLED);
+ qctx.client->nodetach = false;
/*
* We'll need some resources...
@@ -5920,7 +5921,14 @@ query_lookup(query_ctx_t *qctx) {
"%s stale answer used, an attempt to "
"refresh the RRset will still be made",
namebuf);
+
qctx->refresh_rrset = STALE(qctx->rdataset);
+
+ /*
+ * If we are refreshing the RRSet, we must not
+ * detach from the client in query_send().
+ */
+ qctx->client->nodetach = qctx->refresh_rrset;
}
} else {
/*
@@ -6272,7 +6280,7 @@ ns_query_recurse(ns_client_t *client, dns_rdatatype_t qtype, dns_name_t *qname,
if (recparam_match(&client->query.recparam, qtype, qname, qdomain)) {
ns_client_log(client, NS_LOGCATEGORY_CLIENT, NS_LOGMODULE_QUERY,
ISC_LOG_INFO, "recursion loop detected");
- return (ISC_R_FAILURE);
+ return (ISC_R_ALREADYRUNNING);
}
recparam_update(&client->query.recparam, qtype, qname, qdomain);
@@ -7235,10 +7243,21 @@ query_usestale(query_ctx_t *qctx, isc_result_t result) {
return (false);
}
- if (result == DNS_R_DUPLICATE || result == DNS_R_DROP) {
+ if (qctx->refresh_rrset) {
+ /*
+ * This is a refreshing query, we have already prioritized
+ * stale data, so don't enable serve-stale again.
+ */
+ return (false);
+ }
+
+ if (result == DNS_R_DUPLICATE || result == DNS_R_DROP ||
+ result == ISC_R_ALREADYRUNNING)
+ {
/*
* Don't enable serve-stale if the result signals a duplicate
- * query or query that is being dropped.
+ * query or a query that is being dropped or can't proceed
+ * because of a recursion loop.
*/
return (false);
}
@@ -11490,12 +11509,7 @@ ns_query_done(query_ctx_t *qctx) {
/*
* Client may have been detached after query_send(), so
* we test and store the flag state here, for safety.
- * If we are refreshing the RRSet, we must not detach from the client
- * in the query_send(), so we need to override the flag.
*/
- if (qctx->refresh_rrset) {
- qctx->client->nodetach = true;
- }
nodetach = qctx->client->nodetach;
query_send(qctx->client);
--
2.33.0

View File

@ -30,7 +30,7 @@ Summary: The Berkeley Internet Name Domain (BIND) DNS (Domain Name System) serv
Name: bind
License: MPLv2.0
Version: 9.16.23
Release: 17
Release: 18
Epoch: 32
Url: https://www.isc.org/downloads/bind/
#
@ -205,6 +205,9 @@ Patch6122:backport-Fix-a-cleanup-bug-when-isc_task_create-fails-in-dns_catz_new_
Patch6123:backport-Searching-catzs-zones-requires-a-read-lock.patch
Patch6124:backport-Fix-view-s-zones-reverting-bug-during-reconfiguration.patch
Patch6125:backport-CVE-2023-2911.patch
Patch6126:backport-CVE-2023-2828.patch
Patch9000:bugfix-limit-numbers-of-test-threads.patch
%{?systemd_ordering}
@ -1213,6 +1216,12 @@ fi;
%endif
%changelog
* Mon Jun 26 2023 zhanghao <zhanghao383@huawei.com> - 32:9.16.23-18
- Type:CVE
- CVE:CVE-2023-2911 CVE-2023-2828
- SUG:NA
- DESC:fix CVE-2023-2911 CVE-2023-2828
* Tue Apr 11 2023 zhanghao <zhanghao383@huawei.com> - 32:9.16.23-17
- Type:bugfix
- CVE:NA