Introduce new latest version 13.3 for postgresql

This introduces 13.3 postgresql version into openEuler, and now, 13.3 is the latest version like upstream postgresql community.
The latest develop branch of postgresql community is 14.X, and the 14.X version is in beta status, means not stable. So we introduce 13.3 this time.
This commit is contained in:
bzhaoop 2021-06-17 09:39:43 +08:00
parent 1b9765d8a5
commit 5bb860cbef
45 changed files with 1530 additions and 6525 deletions

View File

@ -1,108 +0,0 @@
From 90adc16ea13750a6b6f704c6cf65dc0f1bdb845c Mon Sep 17 00:00:00 2001
From: Michael Paquier <michael@paquier.xyz>
Date: Mon, 17 Jun 2019 21:48:34 +0900
Subject: [PATCH] Fix buffer overflow when parsing SCRAM verifiers in backend
Any authenticated user can overflow a stack-based buffer by changing the
user's own password to a purpose-crafted value. This often suffices to
execute arbitrary code as the PostgreSQL operating system account.
This fix is contributed by multiple folks, based on an initial analysis
from Tom Lane. This issue has been introduced by 68e61ee, so it was
possible to make use of it at authentication time. It became more
easily to trigger after ccae190 which has made the SCRAM parsing more
strict when changing a password, in the case where the client passes
down a verifier already hashed using SCRAM. Back-patch to v10 where
SCRAM has been introduced.
Reported-by: Alexander Lakhin
Author: Jonathan Katz, Heikki Linnakangas, Michael Paquier
Security: CVE-2019-10164
Backpatch-through: 10
---
src/backend/libpq/auth-scram.c | 35 ++++++++++++++++++++++++++--------
src/test/regress/expected/password.out | 23 ++++++++++++++++++++++
src/test/regress/sql/password.sql | 18 +++++++++++++++++
3 files changed, 68 insertions(+), 8 deletions(-)
diff -Nurp postgresql-10.5/src/backend/libpq/auth-scram.c postgresql-10.5-bak/src/backend/libpq/auth-scram.c
--- postgresql-10.5/src/backend/libpq/auth-scram.c 2018-08-06 16:05:31.000000000 -0400
+++ postgresql-10.5-bak/src/backend/libpq/auth-scram.c 2019-08-01 10:03:08.505000000 -0400
@@ -474,6 +474,12 @@ scram_verify_plain_password(const char *
/*
* Parse and validate format of given SCRAM verifier.
*
+ * On success, the iteration count, salt, stored key, and server key are
+ * extracted from the verifier, and returned to the caller. For 'stored_key'
+ * and 'server_key', the caller must pass pre-allocated buffers of size
+ * SCRAM_KEY_LEN. Salt is returned as a base64-encoded, null-terminated
+ * string. The buffer for the salt is palloc'd by this function.
+ *
* Returns true if the SCRAM verifier has been parsed, and false otherwise.
*/
static bool
@@ -489,6 +495,8 @@ parse_scram_verifier(const char *verifie
char *serverkey_str;
int decoded_len;
char *decoded_salt_buf;
+ char *decoded_stored_buf;
+ char *decoded_server_buf;
/*
* The verifier is of form:
@@ -521,7 +529,8 @@ parse_scram_verifier(const char *verifie
* although we return the encoded version to the caller.
*/
decoded_salt_buf = palloc(pg_b64_dec_len(strlen(salt_str)));
- decoded_len = pg_b64_decode(salt_str, strlen(salt_str), decoded_salt_buf);
+ decoded_len = pg_b64_decode(salt_str, strlen(salt_str),
+ decoded_salt_buf);
if (decoded_len < 0)
goto invalid_verifier;
*salt = pstrdup(salt_str);
@@ -529,28 +538,38 @@ parse_scram_verifier(const char *verifie
/*
* Decode StoredKey and ServerKey.
*/
- if (pg_b64_dec_len(strlen(storedkey_str) != SCRAM_KEY_LEN))
- goto invalid_verifier;
+ decoded_stored_buf = palloc(pg_b64_dec_len(strlen(storedkey_str)));
decoded_len = pg_b64_decode(storedkey_str, strlen(storedkey_str),
- (char *) stored_key);
+ decoded_stored_buf);
if (decoded_len != SCRAM_KEY_LEN)
goto invalid_verifier;
+ memcpy(stored_key, decoded_stored_buf, SCRAM_KEY_LEN);
- if (pg_b64_dec_len(strlen(serverkey_str) != SCRAM_KEY_LEN))
- goto invalid_verifier;
+ decoded_server_buf = palloc(pg_b64_dec_len(strlen(serverkey_str)));
decoded_len = pg_b64_decode(serverkey_str, strlen(serverkey_str),
- (char *) server_key);
+ decoded_server_buf);
if (decoded_len != SCRAM_KEY_LEN)
goto invalid_verifier;
+ memcpy(server_key, decoded_server_buf, SCRAM_KEY_LEN);
return true;
invalid_verifier:
- pfree(v);
*salt = NULL;
return false;
}
+/*
+ * Generate plausible SCRAM verifier parameters for mock authentication.
+ *
+ * In a normal authentication, these are extracted from the verifier
+ * stored in the server. This function generates values that look
+ * realistic, for when there is no stored verifier.
+ *
+ * Like in parse_scram_verifier(), for 'stored_key' and 'server_key', the
+ * caller must pass pre-allocated buffers of size SCRAM_KEY_LEN, and
+ * the buffer for the salt is palloc'd by this function.
+ */
static void
mock_scram_verifier(const char *username, int *iterations, char **salt,
uint8 *stored_key, uint8 *server_key)

View File

@ -1,77 +0,0 @@
From d72a7e4da1001b29a661a4b1a52cb5c4d708bab0 Mon Sep 17 00:00:00 2001
From: Michael Paquier <michael@paquier.xyz>
Date: Mon, 17 Jun 2019 22:14:09 +0900
Subject: [PATCH] Fix buffer overflow when processing SCRAM final message in
libpq
When a client connects to a rogue server sending specifically-crafted
messages, this can suffice to execute arbitrary code as the operating
system account used by the client.
While on it, fix one error handling when decoding an incorrect salt
included in the first message received from server.
Author: Michael Paquier
Reviewed-by: Jonathan Katz, Heikki Linnakangas
Security: CVE-2019-10164
Backpatch-through: 10
---
src/interfaces/libpq/fe-auth-scram.c | 21 ++++++++++++++++++++-
1 file changed, 20 insertions(+), 1 deletion(-)
diff --git a/src/interfaces/libpq/fe-auth-scram.c b/src/interfaces/libpq/fe-auth-scram.c
index 7fa7f34c80..4cdf9ba93b 100644
--- a/src/interfaces/libpq/fe-auth-scram.c
+++ b/src/interfaces/libpq/fe-auth-scram.c
@@ -462,6 +462,12 @@ read_server_first_message(fe_scram_state *state, char *input,
state->saltlen = pg_b64_decode(encoded_salt,
strlen(encoded_salt),
state->salt);
+ if (state->saltlen < 0)
+ {
+ printfPQExpBuffer(errormessage,
+ libpq_gettext("malformed SCRAM message (invalid salt)\n"));
+ return false;
+ }
iterations_str = read_attr_value(&input, 'i', errormessage);
if (iterations_str == NULL)
@@ -492,6 +498,7 @@ read_server_final_message(fe_scram_state *state, char *input,
PQExpBuffer errormessage)
{
char *encoded_server_signature;
+ char *decoded_server_signature;
int server_signature_len;
state->server_final_message = strdup(input);
@@ -525,15 +532,27 @@ read_server_final_message(fe_scram_state *state, char *input,
printfPQExpBuffer(errormessage,
libpq_gettext("malformed SCRAM message (garbage at end of server-final-message)\n"));
+ server_signature_len = pg_b64_dec_len(strlen(encoded_server_signature));
+ decoded_server_signature = malloc(server_signature_len);
+ if (!decoded_server_signature)
+ {
+ printfPQExpBuffer(errormessage,
+ libpq_gettext("out of memory\n"));
+ return false;
+ }
+
server_signature_len = pg_b64_decode(encoded_server_signature,
strlen(encoded_server_signature),
- state->ServerSignature);
+ decoded_server_signature);
if (server_signature_len != SCRAM_KEY_LEN)
{
+ free(decoded_server_signature);
printfPQExpBuffer(errormessage,
libpq_gettext("malformed SCRAM message (invalid server signature)\n"));
return false;
}
+ memcpy(state->ServerSignature, decoded_server_signature, SCRAM_KEY_LEN);
+ free(decoded_server_signature);
return true;
}
--
2.11.0

View File

@ -1,224 +0,0 @@
From 2062007cbf7008de383c8f2f4a9ad466ea243acf Mon Sep 17 00:00:00 2001
From: Noah Misch <noah@leadboat.com>
Date: Mon, 5 Aug 2019 07:48:41 -0700
Subject: [PATCH] Require the schema qualification in pg_temp.type_name(arg).
Commit aa27977fe21a7dfa4da4376ad66ae37cb8f0d0b5 introduced this
restriction for pg_temp.function_name(arg); do likewise for types
created in temporary schemas. Programs that this breaks should add
"pg_temp." schema qualification or switch to arg::type_name syntax.
Back-patch to 9.4 (all supported versions).
Reviewed by Tom Lane. Reported by Tom Lane.
Security: CVE-2019-10208
---
src/backend/catalog/namespace.c | 15 ++++++++++++++-
src/backend/parser/parse_func.c | 7 ++++++-
src/backend/parser/parse_type.c | 24 +++++++++++++++++++++---
src/backend/utils/adt/ruleutils.c | 8 ++++++++
src/include/catalog/namespace.h | 1 +
src/include/parser/parse_type.h | 3 +++
src/test/regress/expected/temp.out | 15 +++++++++++++++
src/test/regress/sql/temp.sql | 12 ++++++++++++
8 files changed, 79 insertions(+), 5 deletions(-)
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 5a737c8ab3..6b75ce10da 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -739,13 +739,23 @@ RelationIsVisible(Oid relid)
/*
* TypenameGetTypid
+ * Wrapper for binary compatibility.
+ */
+Oid
+TypenameGetTypid(const char *typname)
+{
+ return TypenameGetTypidExtended(typname, true);
+}
+
+/*
+ * TypenameGetTypidExtended
* Try to resolve an unqualified datatype name.
* Returns OID if type found in search path, else InvalidOid.
*
* This is essentially the same as RelnameGetRelid.
*/
Oid
-TypenameGetTypid(const char *typname)
+TypenameGetTypidExtended(const char *typname, bool temp_ok)
{
Oid typid;
ListCell *l;
@@ -756,6 +766,9 @@ TypenameGetTypid(const char *typname)
{
Oid namespaceId = lfirst_oid(l);
+ if (!temp_ok && namespaceId == myTempNamespace)
+ continue; /* do not look in temp namespace */
+
typid = GetSysCacheOid2(TYPENAMENSP,
PointerGetDatum(typname),
ObjectIdGetDatum(namespaceId));
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index 25ae4e5949..571d3d1b62 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -1769,7 +1769,12 @@ FuncNameAsType(List *funcname)
Oid result;
Type typtup;
- typtup = LookupTypeName(NULL, makeTypeNameFromNameList(funcname), NULL, false);
+ /*
+ * temp_ok=false protects the <refsect1 id="sql-createfunction-security">
+ * contract for writing SECURITY DEFINER functions safely.
+ */
+ typtup = LookupTypeNameExtended(NULL, makeTypeNameFromNameList(funcname),
+ NULL, false, false);
if (typtup == NULL)
return InvalidOid;
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index d0b3fbeb57..1171381487 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -33,6 +33,18 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
/*
* LookupTypeName
+ * Wrapper for typical case.
+ */
+Type
+LookupTypeName(ParseState *pstate, const TypeName *typeName,
+ int32 *typmod_p, bool missing_ok)
+{
+ return LookupTypeNameExtended(pstate,
+ typeName, typmod_p, true, missing_ok);
+}
+
+/*
+ * LookupTypeNameExtended
* Given a TypeName object, lookup the pg_type syscache entry of the type.
* Returns NULL if no such type can be found. If the type is found,
* the typmod value represented in the TypeName struct is computed and
@@ -51,11 +63,17 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
* found but is a shell, and there is typmod decoration, an error will be
* thrown --- this is intentional.
*
+ * If temp_ok is false, ignore types in the temporary namespace. Pass false
+ * when the caller will decide, using goodness of fit criteria, whether the
+ * typeName is actually a type or something else. If typeName always denotes
+ * a type (or denotes nothing), pass true.
+ *
* pstate is only used for error location info, and may be NULL.
*/
Type
-LookupTypeName(ParseState *pstate, const TypeName *typeName,
- int32 *typmod_p, bool missing_ok)
+LookupTypeNameExtended(ParseState *pstate,
+ const TypeName *typeName, int32 *typmod_p,
+ bool temp_ok, bool missing_ok)
{
Oid typoid;
HeapTuple tup;
@@ -172,7 +190,7 @@ LookupTypeName(ParseState *pstate, const TypeName *typeName,
else
{
/* Unqualified type name, so search the search path */
- typoid = TypenameGetTypid(typname);
+ typoid = TypenameGetTypidExtended(typname, temp_ok);
}
/* If an array reference, return the array type instead */
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 3d1a701775..01789291bb 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -9273,6 +9273,14 @@ get_coercion_expr(Node *arg, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
+
+ /*
+ * Never emit resulttype(arg) functional notation. A pg_proc entry could
+ * take precedence, and a resulttype in pg_temp would require schema
+ * qualification that format_type_with_typemod() would usually omit. We've
+ * standardized on arg::resulttype, but CAST(arg AS resulttype) notation
+ * would work fine.
+ */
appendStringInfo(buf, "::%s",
format_type_with_typemod(resulttype, resulttypmod));
}
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index f2ee935623..59c5ea5be2 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -66,6 +66,7 @@ extern Oid RelnameGetRelid(const char *relname);
extern bool RelationIsVisible(Oid relid);
extern Oid TypenameGetTypid(const char *typname);
+extern Oid TypenameGetTypidExtended(const char *typname, bool temp_ok);
extern bool TypeIsVisible(Oid typid);
extern FuncCandidateList FuncnameGetCandidates(List *names,
diff --git a/src/include/parser/parse_type.h b/src/include/parser/parse_type.h
index 7b843d0b9d..ac1bf1009e 100644
--- a/src/include/parser/parse_type.h
+++ b/src/include/parser/parse_type.h
@@ -21,6 +21,9 @@ typedef HeapTuple Type;
extern Type LookupTypeName(ParseState *pstate, const TypeName *typeName,
int32 *typmod_p, bool missing_ok);
+extern Type LookupTypeNameExtended(ParseState *pstate,
+ const TypeName *typeName, int32 *typmod_p,
+ bool temp_ok, bool missing_ok);
extern Oid LookupTypeNameOid(ParseState *pstate, const TypeName *typeName,
bool missing_ok);
extern Type typenameType(ParseState *pstate, const TypeName *typeName,
diff --git a/src/test/regress/expected/temp.out b/src/test/regress/expected/temp.out
index 7046deb0c8..aad562791e 100644
--- a/src/test/regress/expected/temp.out
+++ b/src/test/regress/expected/temp.out
@@ -199,3 +199,18 @@ select pg_temp.whoami();
(1 row)
drop table public.whereami;
+-- types in temp schema
+set search_path = pg_temp, public;
+create domain pg_temp.nonempty as text check (value <> '');
+-- function-syntax invocation of types matches rules for functions
+select nonempty('');
+ERROR: function nonempty(unknown) does not exist
+LINE 1: select nonempty('');
+ ^
+HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+select pg_temp.nonempty('');
+ERROR: value for domain nonempty violates check constraint "nonempty_check"
+-- other syntax matches rules for tables
+select ''::nonempty;
+ERROR: value for domain nonempty violates check constraint "nonempty_check"
+reset search_path;
diff --git a/src/test/regress/sql/temp.sql b/src/test/regress/sql/temp.sql
index d69a243fe7..c14c11444c 100644
--- a/src/test/regress/sql/temp.sql
+++ b/src/test/regress/sql/temp.sql
@@ -151,3 +151,15 @@ select pg_temp.whoami();
select pg_temp.whoami();
drop table public.whereami;
+
+-- types in temp schema
+set search_path = pg_temp, public;
+create domain pg_temp.nonempty as text check (value <> '');
+-- function-syntax invocation of types matches rules for functions
+select nonempty('');
+select pg_temp.nonempty('');
+-- other syntax matches rules for tables
+select ''::nonempty;
+
+reset search_path;
+
--
2.11.0

View File

@ -1,44 +0,0 @@
From 21a134a26100b377191735ad49f996eac03047de Mon Sep 17 00:00:00 2001
From: guiyao <guiyao@huawei.com>
Date: Thu, 17 Dec 2020 09:34:13 -0500
Subject: [PATCH] backport to fix CVE-2018-16850
---
src/backend/utils/adt/ruleutils.c | 10 ++++++----
1 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 4c9a49c..0eea491 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -952,22 +952,24 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
value = fastgetattr(ht_trig, Anum_pg_trigger_tgoldtable,
tgrel->rd_att, &isnull);
if (!isnull)
- tgoldtable = NameStr(*((NameData *) DatumGetPointer(value)));
+ tgoldtable = NameStr(*DatumGetName(value));
else
tgoldtable = NULL;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgnewtable,
tgrel->rd_att, &isnull);
if (!isnull)
- tgnewtable = NameStr(*((NameData *) DatumGetPointer(value)));
+ tgnewtable = NameStr(*DatumGetName(value));
else
tgnewtable = NULL;
if (tgoldtable != NULL || tgnewtable != NULL)
{
appendStringInfoString(&buf, "REFERENCING ");
if (tgoldtable != NULL)
- appendStringInfo(&buf, "OLD TABLE AS %s ", tgoldtable);
+ appendStringInfo(&buf, "OLD TABLE AS %s ",
+ quote_identifier(tgoldtable));
if (tgnewtable != NULL)
- appendStringInfo(&buf, "NEW TABLE AS %s ", tgnewtable);
+ appendStringInfo(&buf, "NEW TABLE AS %s ",
+ quote_identifier(tgnewtable));
}
if (TRIGGER_FOR_ROW(trigrec->tgtype))
--
1.8.3.1

View File

@ -1,75 +0,0 @@
From 1aebfbea83c4a3e1a0aba4b0910135dc5a45666c Mon Sep 17 00:00:00 2001
From: Dean Rasheed <dean.a.rasheed@gmail.com>
Date: Mon, 6 May 2019 11:38:43 +0100
Subject: [PATCH] Fix security checks for selectivity estimation functions with
RLS.
In commit e2d4ef8de8, security checks were added to prevent
user-supplied operators from running over data from pg_statistic
unless the user has table or column privileges on the table, or the
operator is leakproof. For a table with RLS, however, checking for
table or column privileges is insufficient, since that does not
guarantee that the user has permission to view all of the column's
data.
Fix this by also checking for securityQuals on the RTE, and insisting
that the operator be leakproof if there are any. Thus the
leakproofness check will only be skipped if there are no securityQuals
and the user has table or column privileges on the table -- i.e., only
if we know that the user has access to all the data in the column.
Back-patch to 9.5 where RLS was added.
Dean Rasheed, reviewed by Jonathan Katz and Stephen Frost.
Security: CVE-2019-10130
---
src/backend/utils/adt/selfuncs.c | 21 +++++++++++++++------
src/test/regress/expected/rowsecurity.out | 21 +++++++++++++++++++++
src/test/regress/sql/rowsecurity.sql | 20 ++++++++++++++++++++
3 files changed, 56 insertions(+), 6 deletions(-)
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index b41991315520..514612857ad6 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -4597,9 +4597,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
* For simplicity, we insist on the whole
* table being selectable, rather than trying
* to identify which column(s) the index
- * depends on.
+ * depends on. Also require all rows to be
+ * selectable --- there must be no
+ * securityQuals from security barrier views
+ * or RLS policies.
*/
vardata->acl_ok =
+ rte->securityQuals == NIL &&
(pg_class_aclcheck(rte->relid, GetUserId(),
ACL_SELECT) == ACLCHECK_OK);
}
@@ -4663,12 +4667,17 @@ examine_simple_variable(PlannerInfo *root, Var *var,
if (HeapTupleIsValid(vardata->statsTuple))
{
- /* check if user has permission to read this column */
+ /*
+ * Check if user has permission to read this column. We require
+ * all rows to be accessible, so there must be no securityQuals
+ * from security barrier views or RLS policies.
+ */
vardata->acl_ok =
- (pg_class_aclcheck(rte->relid, GetUserId(),
- ACL_SELECT) == ACLCHECK_OK) ||
- (pg_attribute_aclcheck(rte->relid, var->varattno, GetUserId(),
- ACL_SELECT) == ACLCHECK_OK);
+ rte->securityQuals == NIL &&
+ ((pg_class_aclcheck(rte->relid, GetUserId(),
+ ACL_SELECT) == ACLCHECK_OK) ||
+ (pg_attribute_aclcheck(rte->relid, var->varattno, GetUserId(),
+ ACL_SELECT) == ACLCHECK_OK));
}
else
{
--
2.11.0

View File

@ -1,42 +0,0 @@
From b048f558dd7c26a0c630a2cff29d3d8981eaf6b9 Mon Sep 17 00:00:00 2001
From: Alvaro Herrera <alvherre@alvh.no-ip.org>
Date: Mon, 10 Feb 2020 11:47:09 -0300
Subject: [PATCH] Fix priv checks for ALTER <object> DEPENDS ON EXTENSION
Marking an object as dependant on an extension did not have any
privilege check whatsoever; this allowed any user to mark objects as
droppable by anyone able to DROP EXTENSION, which could be used to cause
system-wide havoc. Disallow by checking that the calling user owns the
mentioned object.
(No constraints are placed on the extension.)
Security: CVE-2020-1720
Reported-by: Tom Lane
Discussion: 31605.1566429043@sss.pgh.pa.us
---
src/backend/commands/alter.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index fca85ba2c17f..1cb84182b05f 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -430,6 +430,17 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre
get_object_address_rv(stmt->objectType, stmt->relation, (List *) stmt->object,
&rel, AccessExclusiveLock, false);
+ /*
+ * Verify that the user is entitled to run the command.
+ *
+ * We don't check any privileges on the extension, because that's not
+ * needed. The object owner is stipulating, by running this command, that
+ * the extension owner can drop the object whenever they feel like it,
+ * which is not considered a problem.
+ */
+ check_object_ownership(GetUserId(),
+ stmt->objectType, address, stmt->object, rel);
+
/*
* If a relation was involved, it would have been opened and locked. We
* don't need the relation here, but we'll retain the lock until commit.

View File

@ -1,96 +0,0 @@
From 11da97024abbe76b8c81e3f2375b2a62e9717c67 Mon Sep 17 00:00:00 2001
From: Noah Misch <noah@leadboat.com>
Date: Mon, 10 Aug 2020 09:22:54 -0700
Subject: [PATCH] Empty search_path in logical replication apply worker and
walsender.
This is like CVE-2018-1058 commit
582edc369cdbd348d68441fc50fa26a84afd0c1a. Today, a malicious user of a
publisher or subscriber database can invoke arbitrary SQL functions
under an identity running replication, often a superuser. This fix may
cause "does not exist" or "no schema has been selected to create in"
errors in a replication process. After upgrading, consider watching
server logs for these errors. Objects accruing schema qualification in
the wake of the earlier commit are unlikely to need further correction.
Back-patch to v10, which introduced logical replication.
Security: CVE-2020-14349
reason: fix CVE-2020-14349
https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=11da97024abbe76b8c81e3f2375b2a62e9717c67
Signed-off-by: Noah Misch <noah@leadboat.com>
---
.../libpqwalreceiver/libpqwalreceiver.c | 17 +++++++++++++++++
src/backend/replication/logical/worker.c | 6 ++++++
src/test/subscription/t/001_rep_changes.pl | 4 ++++
3 files changed, 27 insertions(+)
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 37b481c..564e6d3 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -23,6 +23,7 @@
#include "pqexpbuffer.h"
#include "access/xlog.h"
#include "catalog/pg_type.h"
+#include "fe_utils/connect.h"
#include "funcapi.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
@@ -210,6 +211,22 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
return NULL;
}
+ if (logical)
+ {
+ PGresult *res;
+
+ res = libpqrcv_PQexec(conn->streamConn,
+ ALWAYS_SECURE_SEARCH_PATH_SQL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("could not clear search path: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ }
+ PQclear(res);
+ }
+
conn->logical = logical;
return conn;
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index bd60094..07b765a 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -1548,6 +1548,12 @@ ApplyWorkerMain(Datum main_arg)
BackgroundWorkerInitializeConnectionByOid(MyLogicalRepWorker->dbid,
MyLogicalRepWorker->userid);
+ /*
+ * Set always-secure search path, so malicious users can't redirect user
+ * code (e.g. pg_index.indexprs).
+ */
+ SetConfigOption("search_path", "", PGC_SUSET, PGC_S_OVERRIDE);
+
/* Load the subscription into persistent memory context. */
ApplyContext = AllocSetContextCreate(TopMemoryContext,
"ApplyContext",
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index 0136c79..cda275b 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -16,6 +16,10 @@ $node_subscriber->init(allows_streaming => 'logical');
$node_subscriber->start;
# Create some preexisting content on publisher
+$node_publisher->safe_psql(
+ 'postgres',
+ "CREATE FUNCTION public.pg_get_replica_identity_index(int)
+ RETURNS regclass LANGUAGE sql AS 'SELECT 1/0'"); # shall not call
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab_notrep AS SELECT generate_series(1,10) AS a");
$node_publisher->safe_psql('postgres',
--
2.23.0

View File

@ -1,54 +0,0 @@
From cec57b1a0fbcd3833086ba686897c5883e0a2afc Mon Sep 17 00:00:00 2001
From: Noah Misch <noah@leadboat.com>
Date: Mon, 10 Aug 2020 09:22:54 -0700
Subject: [PATCH] Document clashes between logical replication and untrusted
users.
Back-patch to v10, which introduced logical replication.
Security: CVE-2020-14349
reason: fix CVE-2020-14349
https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=cec57b1a0fbcd3833086ba686897c5883e0a2afc
Signed-off-by: Noah Misch <noah@leadboat.com>
---
doc/src/sgml/logical-replication.sgml | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml
index 41770a4..f5086b2 100644
--- a/doc/src/sgml/logical-replication.sgml
+++ b/doc/src/sgml/logical-replication.sgml
@@ -490,11 +490,27 @@
<sect1 id="logical-replication-security">
<title>Security</title>
+ <para>
+ A user able to modify the schema of subscriber-side tables can execute
+ arbitrary code as a superuser. Limit ownership
+ and <literal>TRIGGER</literal> privilege on such tables to roles that
+ superusers trust. Moreover, if untrusted users can create tables, use only
+ publications that list tables explicitly. That is to say, create a
+ subscription <literal>FOR ALL TABLES</literal> only when superusers trust
+ every user permitted to create a non-temp table on the publisher or the
+ subscriber.
+ </para>
+
<para>
The role used for the replication connection must have
- the <literal>REPLICATION</literal> attribute (or be a superuser). Access for the role must be
- configured in <filename>pg_hba.conf</filename> and it must have the
- <literal>LOGIN</literal> attribute.
+ the <literal>REPLICATION</literal> attribute (or be a superuser). If the
+ role lacks <literal>SUPERUSER</literal> and <literal>BYPASSRLS</literal>,
+ publisher row security policies can execute. If the role does not trust
+ all table owners, include <literal>options=-crow_security=off</literal> in
+ the connection string; if a table owner then adds a row security policy,
+ that setting will cause replication to halt rather than execute the policy.
+ Access for the role must be configured in <filename>pg_hba.conf</filename>
+ and it must have the <literal>LOGIN</literal> attribute.
</para>
<para>
--
2.23.0

File diff suppressed because it is too large Load Diff

View File

@ -1,492 +0,0 @@
From a062fb822382398c7282810029016400feecf644 Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Wed, 21 Oct 2020 16:18:41 -0400
Subject: [PATCH] Fix connection string handling in psql's \connect command.
psql's \connect claims to be able to re-use previous connection
parameters, but in fact it only re-uses the database name, user name,
host name (and possibly hostaddr, depending on version), and port.
This is problematic for assorted use cases. Notably, pg_dump[all]
emits "\connect databasename" commands which we would like to have
re-use all other parameters. If such a script is loaded in a psql run
that initially had "-d connstring" with some non-default parameters,
those other parameters would be lost, potentially causing connection
failure. (Thus, this is the same kind of bug addressed in commits
a45bc8a4f and 8e5793ab6, although the details are much different.)
To fix, redesign do_connect() so that it pulls out all properties
of the old PGconn using PQconninfo(), and then replaces individual
properties in that array. In the case where we don't wish to re-use
anything, get libpq's default settings using PQconndefaults() and
replace entries in that, so that we don't need different code paths
for the two cases.
This does result in an additional behavioral change for cases where
the original connection parameters allowed multiple hosts, say
"psql -h host1,host2", and the \connect request allows re-use of the
host setting. Because the previous coding relied on PQhost(), it
would only permit reconnection to the same host originally selected.
Although one can think of scenarios where that's a good thing, there
are others where it is not. Moreover, that behavior doesn't seem to
meet the principle of least surprise, nor was it documented; nor is
it even clear it was intended, since that coding long pre-dates the
addition of multi-host support to libpq. Hence, this patch is content
to drop it and re-use the host list as given.
Per Peter Eisentraut's comments on bug #16604. Back-patch to all
supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
---
doc/src/sgml/ref/psql-ref.sgml | 44 ++++--
src/bin/psql/command.c | 272 ++++++++++++++++++++++++---------
2 files changed, 230 insertions(+), 86 deletions(-)
diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml
index c8388513b8..7fda949f3b 100644
--- a/doc/src/sgml/ref/psql-ref.sgml
+++ b/doc/src/sgml/ref/psql-ref.sgml
@@ -875,21 +875,17 @@ testdb=&gt;
<term><literal>\c</literal> or <literal>\connect [ -reuse-previous=<replaceable class="parameter">on|off</replaceable> ] [ <replaceable class="parameter">dbname</replaceable> [ <replaceable class="parameter">username</replaceable> ] [ <replaceable class="parameter">host</replaceable> ] [ <replaceable class="parameter">port</replaceable> ] | <replaceable class="parameter">conninfo</replaceable> ]</literal></term>
<listitem>
<para>
- Establishes a new connection to a <productname>PostgreSQL</>
+ Establishes a new connection to a <productname>PostgreSQL</productname>
server. The connection parameters to use can be specified either
- using a positional syntax, or using <replaceable>conninfo</> connection
- strings as detailed in <xref linkend="libpq-connstring">.
+ using a positional syntax (one or more of database name, user,
+ host, and port), or using a <replaceable>conninfo</replaceable>
+ connection string as detailed in
+ <xref linkend="libpq-connstring">. If no arguments are given, a
+ new connection is made using the same parameters as before.
</para>
<para>
- Where the command omits database name, user, host, or port, the new
- connection can reuse values from the previous connection. By default,
- values from the previous connection are reused except when processing
- a <replaceable>conninfo</> string. Passing a first argument
- of <literal>-reuse-previous=on</>
- or <literal>-reuse-previous=off</literal> overrides that default.
- When the command neither specifies nor reuses a particular parameter,
- the <application>libpq</application> default is used. Specifying any
+ Specifying any
of <replaceable class="parameter">dbname</replaceable>,
<replaceable class="parameter">username</replaceable>,
<replaceable class="parameter">host</replaceable> or
@@ -897,12 +893,31 @@ testdb=&gt;
as <literal>-</literal> is equivalent to omitting that parameter.
</para>
+ <para>
+ The new connection can re-use connection parameters from the previous
+ connection; not only database name, user, host, and port, but other
+ settings such as <replaceable>sslmode</replaceable>. By default,
+ parameters are re-used in the positional syntax, but not when
+ a <replaceable>conninfo</replaceable> string is given. Passing a
+ first argument of <literal>-reuse-previous=on</literal>
+ or <literal>-reuse-previous=off</literal> overrides that default. If
+ parameters are re-used, then any parameter not explicitly specified as
+ a positional parameter or in the <replaceable>conninfo</replaceable>
+ string is taken from the existing connection's parameters. An
+ exception is that if the <replaceable>host</replaceable> setting
+ is changed from its previous value using the positional syntax,
+ any <replaceable>hostaddr</replaceable> setting present in the
+ existing connection's parameters is dropped.
+ When the command neither specifies nor reuses a particular parameter,
+ the <application>libpq</application> default is used.
+ </para>
+
<para>
If the new connection is successfully made, the previous
connection is closed.
- If the connection attempt failed (wrong user name, access
- denied, etc.), the previous connection will only be kept if
- <application>psql</application> is in interactive mode. When
+ If the connection attempt fails (wrong user name, access
+ denied, etc.), the previous connection will be kept if
+ <application>psql</application> is in interactive mode. But when
executing a non-interactive script, processing will
immediately stop with an error. This distinction was chosen as
a user convenience against typos on the one hand, and a safety
@@ -917,6 +932,7 @@ testdb=&gt;
=&gt; \c mydb myuser host.dom 6432
=&gt; \c service=foo
=&gt; \c "host=localhost port=5432 dbname=mydb connect_timeout=10 sslmode=disable"
+=&gt; \c -reuse-previous=on sslmode=require -- changes only sslmode
=&gt; \c postgresql://tom@localhost/mydb?application_name=myapp
</programlisting>
</listitem>
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index a30a5f8ea0..91f7f0adc3 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -2979,12 +2979,15 @@ do_connect(enum trivalue reuse_previous_specification,
char *dbname, char *user, char *host, char *port)
{
PGconn *o_conn = pset.db,
- *n_conn;
+ *n_conn = NULL;
+ PQconninfoOption *cinfo;
+ int nconnopts = 0;
+ bool same_host = false;
char *password = NULL;
- bool keep_password;
+ bool success = true;
+ bool keep_password = true;
bool has_connection_string;
bool reuse_previous;
- PQExpBufferData connstr;
if (!o_conn && (!dbname || !user || !host || !port))
{
@@ -3012,6 +3015,11 @@ do_connect(enum trivalue reuse_previous_specification,
reuse_previous = !has_connection_string;
break;
}
+
+ /* If the old connection does not exist, there is nothing to reuse. */
+ if (!o_conn)
+ reuse_previous = false;
+
/* Silently ignore arguments subsequent to a connection string. */
if (has_connection_string)
{
@@ -3020,41 +3028,126 @@ do_connect(enum trivalue reuse_previous_specification,
port = NULL;
}
- /* grab missing values from the old connection */
- if (!user && reuse_previous)
- user = PQuser(o_conn);
- if (!host && reuse_previous)
- host = PQhost(o_conn);
- if (!port && reuse_previous)
- port = PQport(o_conn);
-
/*
- * Any change in the parameters read above makes us discard the password.
- * We also discard it if we're to use a conninfo rather than the
- * positional syntax.
+ * If we intend to re-use connection parameters, collect them out of the
+ * old connection, then replace individual values as necessary. Otherwise,
+ * obtain a PQconninfoOption array containing libpq's defaults, and modify
+ * that. Note this function assumes that PQconninfo, PQconndefaults, and
+ * PQconninfoParse will all produce arrays containing the same options in
+ * the same order.
*/
- if (has_connection_string)
- keep_password = false;
+ if (reuse_previous)
+ cinfo = PQconninfo(o_conn);
else
- keep_password =
- (user && PQuser(o_conn) && strcmp(user, PQuser(o_conn)) == 0) &&
- (host && PQhost(o_conn) && strcmp(host, PQhost(o_conn)) == 0) &&
- (port && PQport(o_conn) && strcmp(port, PQport(o_conn)) == 0);
+ cinfo = PQconndefaults();
- /*
- * Grab missing dbname from old connection. No password discard if this
- * changes: passwords aren't (usually) database-specific.
- */
- if (!dbname && reuse_previous)
+ if (cinfo)
{
- initPQExpBuffer(&connstr);
- appendPQExpBuffer(&connstr, "dbname=");
- appendConnStrVal(&connstr, PQdb(o_conn));
- dbname = connstr.data;
- /* has_connection_string=true would be a dead store */
+ if (has_connection_string)
+ {
+ /* Parse the connstring and insert values into cinfo */
+ PQconninfoOption *replcinfo;
+ char *errmsg;
+
+ replcinfo = PQconninfoParse(dbname, &errmsg);
+ if (replcinfo)
+ {
+ PQconninfoOption *ci;
+ PQconninfoOption *replci;
+
+ for (ci = cinfo, replci = replcinfo;
+ ci->keyword && replci->keyword;
+ ci++, replci++)
+ {
+ Assert(strcmp(ci->keyword, replci->keyword) == 0);
+ /* Insert value from connstring if one was provided */
+ if (replci->val)
+ {
+ /*
+ * We know that both val strings were allocated by
+ * libpq, so the least messy way to avoid memory leaks
+ * is to swap them.
+ */
+ char *swap = replci->val;
+
+ replci->val = ci->val;
+ ci->val = swap;
+ }
+ }
+ Assert(ci->keyword == NULL && replci->keyword == NULL);
+
+ /* While here, determine how many option slots there are */
+ nconnopts = ci - cinfo;
+
+ PQconninfoFree(replcinfo);
+
+ /* We never re-use a password with a conninfo string. */
+ keep_password = false;
+
+ /* Don't let code below try to inject dbname into params. */
+ dbname = NULL;
+ }
+ else
+ {
+ /* PQconninfoParse failed */
+ if (errmsg)
+ {
+ psql_error("%s", errmsg);
+ PQfreemem(errmsg);
+ }
+ else
+ psql_error("out of memory\n");
+ success = false;
+ }
+ }
+ else
+ {
+ /*
+ * If dbname isn't a connection string, then we'll inject it and
+ * the other parameters into the keyword array below. (We can't
+ * easily insert them into the cinfo array because of memory
+ * management issues: PQconninfoFree would misbehave on Windows.)
+ * However, to avoid dependencies on the order in which parameters
+ * appear in the array, make a preliminary scan to set
+ * keep_password and same_host correctly.
+ *
+ * While any change in user, host, or port causes us to ignore the
+ * old connection's password, we don't force that for dbname,
+ * since passwords aren't database-specific.
+ */
+ PQconninfoOption *ci;
+
+ for (ci = cinfo; ci->keyword; ci++)
+ {
+ if (user && strcmp(ci->keyword, "user") == 0)
+ {
+ if (!(ci->val && strcmp(user, ci->val) == 0))
+ keep_password = false;
+ }
+ else if (host && strcmp(ci->keyword, "host") == 0)
+ {
+ if (ci->val && strcmp(host, ci->val) == 0)
+ same_host = true;
+ else
+ keep_password = false;
+ }
+ else if (port && strcmp(ci->keyword, "port") == 0)
+ {
+ if (!(ci->val && strcmp(port, ci->val) == 0))
+ keep_password = false;
+ }
+ }
+
+ /* While here, determine how many option slots there are */
+ nconnopts = ci - cinfo;
+ }
}
else
- connstr.data = NULL;
+ {
+ /* We failed to create the cinfo structure */
+ psql_error("out of memory\n");
+ success = false;
+ }
/*
* If the user asked to be prompted for a password, ask for one now. If
@@ -3066,7 +3159,7 @@ do_connect(enum trivalue reuse_previous_specification,
* the postmaster's log. But libpq offers no API that would let us obtain
* a password and then continue with the first connection attempt.
*/
- if (pset.getPassword == TRI_YES)
+ if (pset.getPassword == TRI_YES && success)
{
password = prompt_for_password(user);
}
@@ -3079,52 +3172,60 @@ do_connect(enum trivalue reuse_previous_specification,
password = NULL;
}
- while (true)
+ /* Loop till we have a connection or fail, which we might've already */
+ while (success)
{
-#define PARAMS_ARRAY_SIZE 8
- const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords));
- const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values));
- int paramnum = -1;
-
- keywords[++paramnum] = "host";
- values[paramnum] = host;
- keywords[++paramnum] = "port";
- values[paramnum] = port;
- keywords[++paramnum] = "user";
- values[paramnum] = user;
+ const char **keywords = pg_malloc((nconnopts + 1) * sizeof(*keywords));
+ const char **values = pg_malloc((nconnopts + 1) * sizeof(*values));
+ int paramnum = 0;
+ PQconninfoOption *ci;
/*
- * Position in the array matters when the dbname is a connection
- * string, because settings in a connection string override earlier
- * array entries only. Thus, user= in the connection string always
- * takes effect, but client_encoding= often will not.
+ * Copy non-default settings into the PQconnectdbParams parameter
+ * arrays; but override any values specified old-style, as well as the
+ * password and a couple of fields we want to set forcibly.
*
- * If you change this code, also change the initial-connection code in
+ * If you change this code, see also the initial-connection code in
* main(). For no good reason, a connection string password= takes
* precedence in main() but not here.
*/
- keywords[++paramnum] = "dbname";
- values[paramnum] = dbname;
- keywords[++paramnum] = "password";
- values[paramnum] = password;
- keywords[++paramnum] = "fallback_application_name";
- values[paramnum] = pset.progname;
- keywords[++paramnum] = "client_encoding";
- values[paramnum] = (pset.notty || getenv("PGCLIENTENCODING")) ? NULL : "auto";
-
+ for (ci = cinfo; ci->keyword; ci++)
+ {
+ keywords[paramnum] = ci->keyword;
+
+ if (dbname && strcmp(ci->keyword, "dbname") == 0)
+ values[paramnum++] = dbname;
+ else if (user && strcmp(ci->keyword, "user") == 0)
+ values[paramnum++] = user;
+ else if (host && strcmp(ci->keyword, "host") == 0)
+ values[paramnum++] = host;
+ else if (host && !same_host && strcmp(ci->keyword, "hostaddr") == 0)
+ {
+ /* If we're changing the host value, drop any old hostaddr */
+ values[paramnum++] = NULL;
+ }
+ else if (port && strcmp(ci->keyword, "port") == 0)
+ values[paramnum++] = port;
+ else if (strcmp(ci->keyword, "password") == 0)
+ values[paramnum++] = password;
+ else if (strcmp(ci->keyword, "fallback_application_name") == 0)
+ values[paramnum++] = pset.progname;
+ else if (strcmp(ci->keyword, "client_encoding") == 0)
+ values[paramnum++] = (pset.notty || getenv("PGCLIENTENCODING")) ? NULL : "auto";
+ else if (ci->val)
+ values[paramnum++] = ci->val;
+ /* else, don't bother making libpq parse this keyword */
+ }
/* add array terminator */
- keywords[++paramnum] = NULL;
+ keywords[paramnum] = NULL;
values[paramnum] = NULL;
- n_conn = PQconnectdbParams(keywords, values, true);
+ /* Note we do not want libpq to re-expand the dbname parameter */
+ n_conn = PQconnectdbParams(keywords, values, false);
pg_free(keywords);
pg_free(values);
- /* We can immediately discard the password -- no longer needed */
- if (password)
- pg_free(password);
-
if (PQstatus(n_conn) == CONNECTION_OK)
break;
@@ -3134,11 +3235,34 @@ do_connect(enum trivalue reuse_previous_specification,
*/
if (!password && PQconnectionNeedsPassword(n_conn) && pset.getPassword != TRI_NO)
{
+ /*
+ * Prompt for password using the username we actually connected
+ * with --- it might've come out of "dbname" rather than "user".
+ */
+ password = prompt_for_password(PQuser(n_conn));
PQfinish(n_conn);
- password = prompt_for_password(user);
+ n_conn = NULL;
continue;
}
+ /*
+ * We'll report the error below ... unless n_conn is NULL, indicating
+ * that libpq didn't have enough memory to make a PGconn.
+ */
+ if (n_conn == NULL)
+ psql_error("out of memory\n");
+
+ success = false;
+ } /* end retry loop */
+
+ /* Release locally allocated data, whether we succeeded or not */
+ if (password)
+ pg_free(password);
+ if (cinfo)
+ PQconninfoFree(cinfo);
+
+ if (!success)
+ {
/*
* Failed to connect to the database. In interactive mode, keep the
* previous connection to the DB; in scripting mode, close our
@@ -3146,7 +3270,11 @@ do_connect(enum trivalue reuse_previous_specification,
*/
if (pset.cur_cmd_interactive)
{
- psql_error("%s", PQerrorMessage(n_conn));
+ if (n_conn)
+ {
+ psql_error("%s", PQerrorMessage(n_conn));
+ PQfinish(n_conn);
+ }
/* pset.db is left unmodified */
if (o_conn)
@@ -3154,7 +3282,12 @@ do_connect(enum trivalue reuse_previous_specification,
}
else
{
- psql_error("\\connect: %s", PQerrorMessage(n_conn));
+ if (n_conn)
+ {
+ psql_error("\\connect: %s", PQerrorMessage(n_conn));
+ PQfinish(n_conn);
+ }
+
if (o_conn)
{
PQfinish(o_conn);
@@ -3162,13 +3295,8 @@ do_connect(enum trivalue reuse_previous_specification,
}
}
- PQfinish(n_conn);
- if (connstr.data)
- termPQExpBuffer(&connstr);
return false;
}
- if (connstr.data)
- termPQExpBuffer(&connstr);
/*
* Replace the old connection with the new one, and update
--
2.23.0

File diff suppressed because it is too large Load Diff

View File

@ -1,782 +0,0 @@
From bb3ab8bfb690721923e987d2dfab683b462c1e88 Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Thu, 24 Sep 2020 18:19:39 -0400
Subject: [PATCH] Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
---
src/bin/pg_dump/pg_backup.h | 36 ++--
src/bin/pg_dump/pg_backup_archiver.c | 96 +++--------
src/bin/pg_dump/pg_backup_archiver.h | 3 +-
src/bin/pg_dump/pg_backup_db.c | 249 +++++++--------------------
src/bin/pg_dump/pg_dump.c | 24 +--
src/bin/pg_dump/pg_restore.c | 14 +-
6 files changed, 131 insertions(+), 291 deletions(-)
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 560de01884..72c6a8db6c 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -58,6 +58,20 @@ typedef enum _teSection
SECTION_POST_DATA /* stuff to be processed after data */
} teSection;
+/* Parameters needed by ConnectDatabase; same for dump and restore */
+typedef struct _connParams
+{
+ /* These fields record the actual command line parameters */
+ char *dbname; /* this may be a connstring! */
+ char *pgport;
+ char *pghost;
+ char *username;
+ trivalue promptPassword;
+ /* If not NULL, this overrides the dbname obtained from command line */
+ /* (but *only* the DB name, not anything else in the connstring) */
+ char *override_dbname;
+} ConnParams;
+
typedef struct _restoreOptions
{
int createDB; /* Issue commands to create the database */
@@ -106,12 +120,9 @@ typedef struct _restoreOptions
SimpleStringList tableNames;
int useDB;
- char *dbname; /* subject to expand_dbname */
- char *pgport;
- char *pghost;
- char *username;
+ ConnParams cparams; /* parameters to use if useDB */
+
int noDataForFailedTables;
- trivalue promptPassword;
int exit_on_error;
int compression;
int suppressDumpWarnings; /* Suppress output of WARNING entries
@@ -126,10 +137,8 @@ typedef struct _restoreOptions
typedef struct _dumpOptions
{
- const char *dbname; /* subject to expand_dbname */
- const char *pghost;
- const char *pgport;
- const char *username;
+ ConnParams cparams;
+
bool oids;
int binary_upgrade;
@@ -239,12 +248,9 @@ typedef void (*SetupWorkerPtrType) (Archive *AH);
* Main archiver interface.
*/
-extern void ConnectDatabase(Archive *AH,
- const char *dbname,
- const char *pghost,
- const char *pgport,
- const char *username,
- trivalue prompt_password);
+extern void ConnectDatabase(Archive *AHX,
+ const ConnParams *cparams,
+ bool isReconnect);
extern void DisconnectDatabase(Archive *AHX);
extern PGconn *GetConnection(Archive *AHX);
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 8892b17790..4003ab3b13 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -144,6 +144,7 @@ InitDumpOptions(DumpOptions *opts)
memset(opts, 0, sizeof(DumpOptions));
/* set any fields that shouldn't default to zeroes */
opts->include_everything = true;
+ opts->cparams.promptPassword = TRI_DEFAULT;
opts->dumpSections = DUMP_UNSECTIONED;
}
@@ -157,6 +158,11 @@ dumpOptionsFromRestoreOptions(RestoreOptions *ropt)
DumpOptions *dopt = NewDumpOptions();
/* this is the inverse of what's at the end of pg_dump.c's main() */
+ dopt->cparams.dbname = ropt->cparams.dbname ? pg_strdup(ropt->cparams.dbname) : NULL;
+ dopt->cparams.pgport = ropt->cparams.pgport ? pg_strdup(ropt->cparams.pgport) : NULL;
+ dopt->cparams.pghost = ropt->cparams.pghost ? pg_strdup(ropt->cparams.pghost) : NULL;
+ dopt->cparams.username = ropt->cparams.username ? pg_strdup(ropt->cparams.username) : NULL;
+ dopt->cparams.promptPassword = ropt->cparams.promptPassword;
dopt->outputClean = ropt->dropSchema;
dopt->dataOnly = ropt->dataOnly;
dopt->schemaOnly = ropt->schemaOnly;
@@ -401,9 +407,7 @@ RestoreArchive(Archive *AHX)
AHX->minRemoteVersion = 0;
AHX->maxRemoteVersion = 9999999;
- ConnectDatabase(AHX, ropt->dbname,
- ropt->pghost, ropt->pgport, ropt->username,
- ropt->promptPassword);
+ ConnectDatabase(AHX, &ropt->cparams, false);
/*
* If we're talking to the DB directly, don't send comments since they
@@ -823,16 +827,8 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
/* If we created a DB, connect to it... */
if (strcmp(te->desc, "DATABASE") == 0)
{
- PQExpBufferData connstr;
-
- initPQExpBuffer(&connstr);
- appendPQExpBufferStr(&connstr, "dbname=");
- appendConnStrVal(&connstr, te->tag);
- /* Abandon struct, but keep its buffer until process exit. */
-
ahlog(AH, 1, "connecting to new database \"%s\"\n", te->tag);
_reconnectToDB(AH, te->tag);
- ropt->dbname = connstr.data;
}
}
@@ -966,7 +962,7 @@ NewRestoreOptions(void)
/* set any fields that shouldn't default to zeroes */
opts->format = archUnknown;
- opts->promptPassword = TRI_DEFAULT;
+ opts->cparams.promptPassword = TRI_DEFAULT;
opts->dumpSections = DUMP_UNSECTIONED;
return opts;
@@ -2388,8 +2384,6 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt,
else
AH->format = fmt;
- AH->promptPassword = TRI_DEFAULT;
-
switch (AH->format)
{
case archCustom:
@@ -3157,27 +3151,20 @@ _doSetWithOids(ArchiveHandle *AH, const bool withOids)
* If we're currently restoring right into a database, this will
* actually establish a connection. Otherwise it puts a \connect into
* the script output.
- *
- * NULL dbname implies reconnecting to the current DB (pretty useless).
*/
static void
_reconnectToDB(ArchiveHandle *AH, const char *dbname)
{
if (RestoringToDB(AH))
- ReconnectToServer(AH, dbname, NULL);
+ ReconnectToServer(AH, dbname);
else
{
- if (dbname)
- {
- PQExpBufferData connectbuf;
+ PQExpBufferData connectbuf;
- initPQExpBuffer(&connectbuf);
- appendPsqlMetaConnect(&connectbuf, dbname);
- ahprintf(AH, "%s\n", connectbuf.data);
- termPQExpBuffer(&connectbuf);
- }
- else
- ahprintf(AH, "%s\n", "\\connect -\n");
+ initPQExpBuffer(&connectbuf);
+ appendPsqlMetaConnect(&connectbuf, dbname);
+ ahprintf(AH, "%s\n", connectbuf.data);
+ termPQExpBuffer(&connectbuf);
}
/*
@@ -4107,9 +4094,7 @@ restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
/*
* Now reconnect the single parent connection.
*/
- ConnectDatabase((Archive *) AH, ropt->dbname,
- ropt->pghost, ropt->pgport, ropt->username,
- ropt->promptPassword);
+ ConnectDatabase((Archive *) AH, &ropt->cparams, true);
/* re-establish fixed state */
_doSetFixedOutputState(AH);
@@ -4690,54 +4675,15 @@ CloneArchive(ArchiveHandle *AH)
clone->public.n_errors = 0;
/*
- * Connect our new clone object to the database: In parallel restore the
- * parent is already disconnected, because we can connect the worker
- * processes independently to the database (no snapshot sync required). In
- * parallel backup we clone the parent's existing connection.
+ * Connect our new clone object to the database, using the same connection
+ * parameters used for the original connection.
*/
- if (AH->mode == archModeRead)
- {
- RestoreOptions *ropt = AH->public.ropt;
-
- Assert(AH->connection == NULL);
-
- /* this also sets clone->connection */
- ConnectDatabase((Archive *) clone, ropt->dbname,
- ropt->pghost, ropt->pgport, ropt->username,
- ropt->promptPassword);
+ ConnectDatabase((Archive *) clone, &clone->public.ropt->cparams, true);
- /* re-establish fixed state */
+ /* re-establish fixed state */
+ if (AH->mode == archModeRead)
_doSetFixedOutputState(clone);
- }
- else
- {
- PQExpBufferData connstr;
- char *pghost;
- char *pgport;
- char *username;
-
- Assert(AH->connection != NULL);
-
- /*
- * Even though we are technically accessing the parent's database
- * object here, these functions are fine to be called like that
- * because all just return a pointer and do not actually send/receive
- * any data to/from the database.
- */
- initPQExpBuffer(&connstr);
- appendPQExpBuffer(&connstr, "dbname=");
- appendConnStrVal(&connstr, PQdb(AH->connection));
- pghost = PQhost(AH->connection);
- pgport = PQport(AH->connection);
- username = PQuser(AH->connection);
-
- /* this also sets clone->connection */
- ConnectDatabase((Archive *) clone, connstr.data,
- pghost, pgport, username, TRI_NO);
-
- termPQExpBuffer(&connstr);
- /* setupDumpWorker will fix up connection state */
- }
+ /* in write case, setupDumpWorker will fix up connection state */
/* Let the format-specific code have a chance too */
(clone->ClonePtr) (clone);
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index 3b69868359..50829c4b2e 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -304,7 +304,6 @@ struct _archiveHandle
/* Stuff for direct DB connection */
char *archdbname; /* DB name *read* from archive */
- trivalue promptPassword;
char *savedPassword; /* password for ropt->username, if known */
char *use_role;
PGconn *connection;
@@ -450,7 +449,7 @@ extern void InitArchiveFmt_Tar(ArchiveHandle *AH);
extern bool isValidTarHeader(char *header);
-extern int ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *newUser);
+extern void ReconnectToServer(ArchiveHandle *AH, const char *dbname);
extern void DropBlobIfExists(ArchiveHandle *AH, Oid oid);
void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH);
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 43f5941f93..4eaea4af4f 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -30,7 +30,6 @@
static const char *modulename = gettext_noop("archiver (db)");
static void _check_database_version(ArchiveHandle *AH);
-static PGconn *_connectDB(ArchiveHandle *AH, const char *newdbname, const char *newUser);
static void notice_processor(void *arg, const char *message);
static void
@@ -76,186 +75,51 @@ _check_database_version(ArchiveHandle *AH)
/*
* Reconnect to the server. If dbname is not NULL, use that database,
- * else the one associated with the archive handle. If username is
- * not NULL, use that user name, else the one from the handle. If
- * both the database and the user match the existing connection already,
- * nothing will be done.
- *
- * Returns 1 in any case.
+ * else the one associated with the archive handle.
*/
-int
-ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username)
-{
- PGconn *newConn;
- const char *newdbname;
- const char *newusername;
-
- if (!dbname)
- newdbname = PQdb(AH->connection);
- else
- newdbname = dbname;
-
- if (!username)
- newusername = PQuser(AH->connection);
- else
- newusername = username;
-
- /* Let's see if the request is already satisfied */
- if (strcmp(newdbname, PQdb(AH->connection)) == 0 &&
- strcmp(newusername, PQuser(AH->connection)) == 0)
- return 1;
-
- newConn = _connectDB(AH, newdbname, newusername);
-
- /* Update ArchiveHandle's connCancel before closing old connection */
- set_archive_cancel_info(AH, newConn);
-
- PQfinish(AH->connection);
- AH->connection = newConn;
-
- /* Start strict; later phases may override this. */
- PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH,
- ALWAYS_SECURE_SEARCH_PATH_SQL));
-
- return 1;
-}
-
-/*
- * Connect to the db again.
- *
- * Note: it's not really all that sensible to use a single-entry password
- * cache if the username keeps changing. In current usage, however, the
- * username never does change, so one savedPassword is sufficient. We do
- * update the cache on the off chance that the password has changed since the
- * start of the run.
- */
-static PGconn *
-_connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
+void
+ReconnectToServer(ArchiveHandle *AH, const char *dbname)
{
- PQExpBufferData connstr;
- PGconn *newConn;
- const char *newdb;
- const char *newuser;
- char *password;
- char passbuf[100];
- bool new_pass;
-
- if (!reqdb)
- newdb = PQdb(AH->connection);
- else
- newdb = reqdb;
-
- if (!requser || strlen(requser) == 0)
- newuser = PQuser(AH->connection);
- else
- newuser = requser;
-
- ahlog(AH, 1, "connecting to database \"%s\" as user \"%s\"\n",
- newdb, newuser);
-
- password = AH->savedPassword;
-
- if (AH->promptPassword == TRI_YES && password == NULL)
- {
- simple_prompt("Password: ", passbuf, sizeof(passbuf), false);
- password = passbuf;
- }
-
- initPQExpBuffer(&connstr);
- appendPQExpBuffer(&connstr, "dbname=");
- appendConnStrVal(&connstr, newdb);
-
- do
- {
- const char *keywords[7];
- const char *values[7];
-
- keywords[0] = "host";
- values[0] = PQhost(AH->connection);
- keywords[1] = "port";
- values[1] = PQport(AH->connection);
- keywords[2] = "user";
- values[2] = newuser;
- keywords[3] = "password";
- values[3] = password;
- keywords[4] = "dbname";
- values[4] = connstr.data;
- keywords[5] = "fallback_application_name";
- values[5] = progname;
- keywords[6] = NULL;
- values[6] = NULL;
-
- new_pass = false;
- newConn = PQconnectdbParams(keywords, values, true);
-
- if (!newConn)
- exit_horribly(modulename, "failed to reconnect to database\n");
-
- if (PQstatus(newConn) == CONNECTION_BAD)
- {
- if (!PQconnectionNeedsPassword(newConn))
- exit_horribly(modulename, "could not reconnect to database: %s",
- PQerrorMessage(newConn));
- PQfinish(newConn);
-
- if (password)
- fprintf(stderr, "Password incorrect\n");
-
- fprintf(stderr, "Connecting to %s as %s\n",
- newdb, newuser);
-
- if (AH->promptPassword != TRI_NO)
- {
- simple_prompt("Password: ", passbuf, sizeof(passbuf), false);
- password = passbuf;
- }
- else
- exit_horribly(modulename, "connection needs password\n");
-
- new_pass = true;
- }
- } while (new_pass);
+ PGconn *oldConn = AH->connection;
+ RestoreOptions *ropt = AH->public.ropt;
/*
- * We want to remember connection's actual password, whether or not we got
- * it by prompting. So we don't just store the password variable.
+ * Save the dbname, if given, in override_dbname so that it will also
+ * affect any later reconnection attempt.
*/
- if (PQconnectionUsedPassword(newConn))
- {
- if (AH->savedPassword)
- free(AH->savedPassword);
- AH->savedPassword = pg_strdup(PQpass(newConn));
- }
+ if (dbname)
+ ropt->cparams.override_dbname = pg_strdup(dbname);
- termPQExpBuffer(&connstr);
-
- /* check for version mismatch */
- _check_database_version(AH);
+ /*
+ * Note: we want to establish the new connection, and in particular update
+ * ArchiveHandle's connCancel, before closing old connection. Otherwise
+ * an ill-timed SIGINT could try to access a dead connection.
+ */
+ AH->connection = NULL; /* dodge error check in ConnectDatabase */
- PQsetNoticeProcessor(newConn, notice_processor, NULL);
+ ConnectDatabase((Archive *) AH, &ropt->cparams, true);
- return newConn;
+ PQfinish(oldConn);
}
-
/*
- * Make a database connection with the given parameters. The
- * connection handle is returned, the parameters are stored in AHX.
- * An interactive password prompt is automatically issued if required.
+ * Make, or remake, a database connection with the given parameters.
+ *
+ * The resulting connection handle is stored in AHX->connection.
*
+ * An interactive password prompt is automatically issued if required.
+ * We store the results of that in AHX->savedPassword.
* Note: it's not really all that sensible to use a single-entry password
* cache if the username keeps changing. In current usage, however, the
* username never does change, so one savedPassword is sufficient.
*/
void
ConnectDatabase(Archive *AHX,
- const char *dbname,
- const char *pghost,
- const char *pgport,
- const char *username,
- trivalue prompt_password)
+ const ConnParams *cparams,
+ bool isReconnect)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
+ trivalue prompt_password;
char *password;
char passbuf[100];
bool new_pass;
@@ -263,6 +127,9 @@ ConnectDatabase(Archive *AHX,
if (AH->connection)
exit_horribly(modulename, "already connected to a database\n");
+ /* Never prompt for a password during a reconnection */
+ prompt_password = isReconnect ? TRI_NO : cparams->promptPassword;
+
password = AH->savedPassword;
if (prompt_password == TRI_YES && password == NULL)
@@ -270,7 +137,6 @@ ConnectDatabase(Archive *AHX,
simple_prompt("Password: ", passbuf, sizeof(passbuf), false);
password = passbuf;
}
- AH->promptPassword = prompt_password;
/*
* Start the connection. Loop until we have a password if requested by
@@ -278,23 +144,35 @@ ConnectDatabase(Archive *AHX,
*/
do
{
- const char *keywords[7];
- const char *values[7];
-
- keywords[0] = "host";
- values[0] = pghost;
- keywords[1] = "port";
- values[1] = pgport;
- keywords[2] = "user";
- values[2] = username;
- keywords[3] = "password";
- values[3] = password;
- keywords[4] = "dbname";
- values[4] = dbname;
- keywords[5] = "fallback_application_name";
- values[5] = progname;
- keywords[6] = NULL;
- values[6] = NULL;
+ const char *keywords[8];
+ const char *values[8];
+ int i = 0;
+
+ /*
+ * If dbname is a connstring, its entries can override the other
+ * values obtained from cparams; but in turn, override_dbname can
+ * override the dbname component of it.
+ */
+ keywords[i] = "host";
+ values[i++] = cparams->pghost;
+ keywords[i] = "port";
+ values[i++] = cparams->pgport;
+ keywords[i] = "user";
+ values[i++] = cparams->username;
+ keywords[i] = "password";
+ values[i++] = password;
+ keywords[i] = "dbname";
+ values[i++] = cparams->dbname;
+ if (cparams->override_dbname)
+ {
+ keywords[i] = "dbname";
+ values[i++] = cparams->override_dbname;
+ }
+ keywords[i] = "fallback_application_name";
+ values[i++] = progname;
+ keywords[i] = NULL;
+ values[i++] = NULL;
+ Assert(i <= lengthof(keywords));
new_pass = false;
AH->connection = PQconnectdbParams(keywords, values, true);
@@ -316,9 +194,16 @@ ConnectDatabase(Archive *AHX,
/* check to see that the backend connection was successfully made */
if (PQstatus(AH->connection) == CONNECTION_BAD)
- exit_horribly(modulename, "connection to database \"%s\" failed: %s",
- PQdb(AH->connection) ? PQdb(AH->connection) : "",
- PQerrorMessage(AH->connection));
+ {
+ if (isReconnect)
+ exit_horribly(modulename, "reconnection to database \"%s\" failed: %s",
+ PQdb(AH->connection) ? PQdb(AH->connection) : "",
+ PQerrorMessage(AH->connection));
+ else
+ exit_horribly(modulename, "connection to database \"%s\" failed: %s",
+ PQdb(AH->connection) ? PQdb(AH->connection) : "",
+ PQerrorMessage(AH->connection));
+ }
/* Start strict; later phases may override this. */
PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH,
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 8080155a95..abcee89d10 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -302,7 +302,6 @@ main(int argc, char **argv)
const char *dumpsnapshot = NULL;
char *use_role = NULL;
int numWorkers = 1;
- trivalue prompt_password = TRI_DEFAULT;
int compressLevel = -1;
int plainText = 0;
ArchiveFormat archiveFormat = archUnknown;
@@ -431,7 +430,7 @@ main(int argc, char **argv)
break;
case 'd': /* database name */
- dopt.dbname = pg_strdup(optarg);
+ dopt.cparams.dbname = pg_strdup(optarg);
break;
case 'E': /* Dump encoding */
@@ -447,7 +446,7 @@ main(int argc, char **argv)
break;
case 'h': /* server host */
- dopt.pghost = pg_strdup(optarg);
+ dopt.cparams.pghost = pg_strdup(optarg);
break;
case 'j': /* number of dump jobs */
@@ -472,7 +471,7 @@ main(int argc, char **argv)
break;
case 'p': /* server port */
- dopt.pgport = pg_strdup(optarg);
+ dopt.cparams.pgport = pg_strdup(optarg);
break;
case 'R':
@@ -497,7 +496,7 @@ main(int argc, char **argv)
break;
case 'U':
- dopt.username = pg_strdup(optarg);
+ dopt.cparams.username = pg_strdup(optarg);
break;
case 'v': /* verbose */
@@ -505,11 +504,11 @@ main(int argc, char **argv)
break;
case 'w':
- prompt_password = TRI_NO;
+ dopt.cparams.promptPassword = TRI_NO;
break;
case 'W':
- prompt_password = TRI_YES;
+ dopt.cparams.promptPassword = TRI_YES;
break;
case 'x': /* skip ACL dump */
@@ -563,8 +562,8 @@ main(int argc, char **argv)
* Non-option argument specifies database name as long as it wasn't
* already specified with -d / --dbname
*/
- if (optind < argc && dopt.dbname == NULL)
- dopt.dbname = argv[optind++];
+ if (optind < argc && dopt.cparams.dbname == NULL)
+ dopt.cparams.dbname = argv[optind++];
/* Complain if any arguments remain */
if (optind < argc)
@@ -677,7 +676,7 @@ main(int argc, char **argv)
* Open the database using the Archiver, so it knows about it. Errors mean
* death.
*/
- ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
+ ConnectDatabase(fout, &dopt.cparams, false);
setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
/*
@@ -860,6 +859,11 @@ main(int argc, char **argv)
ropt->filename = filename;
/* if you change this list, see dumpOptionsFromRestoreOptions */
+ ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
+ ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
+ ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
+ ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL;
+ ropt->cparams.promptPassword = dopt.cparams.promptPassword;
ropt->dropSchema = dopt.outputClean;
ropt->dataOnly = dopt.dataOnly;
ropt->schemaOnly = dopt.schemaOnly;
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 860a211a3c..ce03ded8ec 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -163,7 +163,7 @@ main(int argc, char **argv)
opts->createDB = 1;
break;
case 'd':
- opts->dbname = pg_strdup(optarg);
+ opts->cparams.dbname = pg_strdup(optarg);
break;
case 'e':
opts->exit_on_error = true;
@@ -177,7 +177,7 @@ main(int argc, char **argv)
break;
case 'h':
if (strlen(optarg) != 0)
- opts->pghost = pg_strdup(optarg);
+ opts->cparams.pghost = pg_strdup(optarg);
break;
case 'j': /* number of restore jobs */
@@ -206,7 +206,7 @@ main(int argc, char **argv)
case 'p':
if (strlen(optarg) != 0)
- opts->pgport = pg_strdup(optarg);
+ opts->cparams.pgport = pg_strdup(optarg);
break;
case 'R':
/* no-op, still accepted for backwards compatibility */
@@ -240,7 +240,7 @@ main(int argc, char **argv)
break;
case 'U':
- opts->username = pg_strdup(optarg);
+ opts->cparams.username = pg_strdup(optarg);
break;
case 'v': /* verbose */
@@ -248,11 +248,11 @@ main(int argc, char **argv)
break;
case 'w':
- opts->promptPassword = TRI_NO;
+ opts->cparams.promptPassword = TRI_NO;
break;
case 'W':
- opts->promptPassword = TRI_YES;
+ opts->cparams.promptPassword = TRI_YES;
break;
case 'x': /* skip ACL dump */
@@ -302,7 +302,7 @@ main(int argc, char **argv)
}
/* Should get at most one of -d and -f, else user is confused */
- if (opts->dbname)
+ if (opts->cparams.dbname)
{
if (opts->filename)
{
--
2.23.0

View File

@ -1,241 +0,0 @@
From f97ecea1ed7b09c6f1398540a1d72a57eee70c9f Mon Sep 17 00:00:00 2001
From: Noah Misch <noah@leadboat.com>
Date: Mon, 9 Nov 2020 07:32:09 -0800
Subject: [PATCH] In security-restricted operations, block enqueue of at-commit
user code.
Specifically, this blocks DECLARE ... WITH HOLD and firing of deferred
triggers within index expressions and materialized view queries. An
attacker having permission to create non-temp objects in at least one
schema could execute arbitrary SQL functions under the identity of the
bootstrap superuser. One can work around the vulnerability by disabling
autovacuum and not manually running ANALYZE, CLUSTER, REINDEX, CREATE
INDEX, VACUUM FULL, or REFRESH MATERIALIZED VIEW. (Don't restore from
pg_dump, since it runs some of those commands.) Plain VACUUM (without
FULL) is safe, and all commands are fine when a trusted user owns the
target object. Performance may degrade quickly under this workaround,
however. Back-patch to 9.5 (all supported versions).
Reviewed by Robert Haas. Reported by Etienne Stalmans.
Security: CVE-2020-25695
---
contrib/postgres_fdw/connection.c | 4 +++
src/backend/access/transam/xact.c | 13 ++++----
src/backend/commands/portalcmds.c | 5 +++
src/backend/commands/trigger.c | 12 +++++++
src/test/regress/expected/privileges.out | 42 ++++++++++++++++++++++++
src/test/regress/sql/privileges.sql | 34 +++++++++++++++++++
6 files changed, 104 insertions(+), 6 deletions(-)
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 885bd075798c..5dcff3d07624 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -645,6 +645,10 @@ pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
/*
* pgfdw_xact_callback --- cleanup at main-transaction end.
+ *
+ * This runs just late enough that it must not enter user-defined code
+ * locally. (Entering such code on the remote side is fine. Its remote
+ * COMMIT TRANSACTION may run deferred triggers.)
*/
static void
pgfdw_xact_callback(XactEvent event, void *arg)
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 37f31a2c31ea..00cec50e8402 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1994,9 +1994,10 @@ CommitTransaction(void)
/*
* Do pre-commit processing that involves calling user-defined code, such
- * as triggers. Since closing cursors could queue trigger actions,
- * triggers could open cursors, etc, we have to keep looping until there's
- * nothing left to do.
+ * as triggers. SECURITY_RESTRICTED_OPERATION contexts must not queue an
+ * action that would run here, because that would bypass the sandbox.
+ * Since closing cursors could queue trigger actions, triggers could open
+ * cursors, etc, we have to keep looping until there's nothing left to do.
*/
for (;;)
{
@@ -2014,9 +2015,6 @@ CommitTransaction(void)
break;
}
- CallXactCallbacks(is_parallel_worker ? XACT_EVENT_PARALLEL_PRE_COMMIT
- : XACT_EVENT_PRE_COMMIT);
-
/*
* The remaining actions cannot call any user-defined code, so it's safe
* to start shutting down within-transaction services. But note that most
@@ -2024,6 +2022,9 @@ CommitTransaction(void)
* the transaction-abort path.
*/
+ CallXactCallbacks(is_parallel_worker ? XACT_EVENT_PARALLEL_PRE_COMMIT
+ : XACT_EVENT_PRE_COMMIT);
+
/* If we might have parallel workers, clean them up now. */
if (IsInParallelMode())
AtEOXact_Parallel(true);
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 46369cf3dbee..3d01a782da06 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -27,6 +27,7 @@
#include "commands/portalcmds.h"
#include "executor/executor.h"
#include "executor/tstoreReceiver.h"
+#include "miscadmin.h"
#include "rewrite/rewriteHandler.h"
#include "tcop/pquery.h"
#include "tcop/tcopprot.h"
@@ -64,6 +65,10 @@ PerformCursorOpen(DeclareCursorStmt *cstmt, ParamListInfo params,
*/
if (!(cstmt->options & CURSOR_OPT_HOLD))
RequireTransactionChain(isTopLevel, "DECLARE CURSOR");
+ else if (InSecurityRestrictedOperation())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("cannot create a cursor WITH HOLD within security-restricted operation")));
/*
* Parse analysis was done already, but we still have to run the rule
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index f83840625348..9c04eee48422 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -4144,6 +4144,7 @@ afterTriggerMarkEvents(AfterTriggerEventList *events,
bool immediate_only)
{
bool found = false;
+ bool deferred_found = false;
AfterTriggerEvent event;
AfterTriggerEventChunk *chunk;
@@ -4179,6 +4180,7 @@ afterTriggerMarkEvents(AfterTriggerEventList *events,
*/
if (defer_it && move_list != NULL)
{
+ deferred_found = true;
/* add it to move_list */
afterTriggerAddEvent(move_list, event, evtshared);
/* mark original copy "done" so we don't do it again */
@@ -4186,6 +4188,16 @@ afterTriggerMarkEvents(AfterTriggerEventList *events,
}
}
+ /*
+ * We could allow deferred triggers if, before the end of the
+ * security-restricted operation, we were to verify that a SET CONSTRAINTS
+ * ... IMMEDIATE has fired all such triggers. For now, don't bother.
+ */
+ if (deferred_found && InSecurityRestrictedOperation())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("cannot fire deferred trigger within security-restricted operation")));
+
return found;
}
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
index dacc98450514..26ee16a0c370 100644
--- a/src/test/regress/expected/privileges.out
+++ b/src/test/regress/expected/privileges.out
@@ -1253,6 +1253,48 @@ SELECT has_table_privilege('regress_user1', 'atest4', 'SELECT WITH GRANT OPTION'
t
(1 row)
+-- security-restricted operations
+\c -
+CREATE ROLE regress_sro_user;
+SET SESSION AUTHORIZATION regress_sro_user;
+CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS
+ 'GRANT regress_group2 TO regress_sro_user';
+CREATE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS
+ 'DECLARE c CURSOR WITH HOLD FOR SELECT unwanted_grant(); SELECT true';
+-- REFRESH of this MV will queue a GRANT at end of transaction
+CREATE MATERIALIZED VIEW sro_mv AS SELECT mv_action() WITH NO DATA;
+REFRESH MATERIALIZED VIEW sro_mv;
+ERROR: cannot create a cursor WITH HOLD within security-restricted operation
+CONTEXT: SQL function "mv_action" statement 1
+\c -
+REFRESH MATERIALIZED VIEW sro_mv;
+ERROR: cannot create a cursor WITH HOLD within security-restricted operation
+CONTEXT: SQL function "mv_action" statement 1
+SET SESSION AUTHORIZATION regress_sro_user;
+-- INSERT to this table will queue a GRANT at end of transaction
+CREATE TABLE sro_trojan_table ();
+CREATE FUNCTION sro_trojan() RETURNS trigger LANGUAGE plpgsql AS
+ 'BEGIN PERFORM unwanted_grant(); RETURN NULL; END';
+CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table
+ INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE sro_trojan();
+-- Now, REFRESH will issue such an INSERT, queueing the GRANT
+CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS
+ 'INSERT INTO sro_trojan_table DEFAULT VALUES; SELECT true';
+REFRESH MATERIALIZED VIEW sro_mv;
+ERROR: cannot fire deferred trigger within security-restricted operation
+CONTEXT: SQL function "mv_action" statement 1
+\c -
+REFRESH MATERIALIZED VIEW sro_mv;
+ERROR: cannot fire deferred trigger within security-restricted operation
+CONTEXT: SQL function "mv_action" statement 1
+BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT;
+ERROR: must have admin option on role "regress_group2"
+CONTEXT: SQL function "unwanted_grant" statement 1
+SQL statement "SELECT unwanted_grant()"
+PL/pgSQL function sro_trojan() line 1 at PERFORM
+SQL function "mv_action" statement 1
+DROP OWNED BY regress_sro_user;
+DROP ROLE regress_sro_user;
-- Admin options
SET SESSION AUTHORIZATION regress_user4;
CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
index 4263315a2d87..f979cccea03f 100644
--- a/src/test/regress/sql/privileges.sql
+++ b/src/test/regress/sql/privileges.sql
@@ -761,6 +761,40 @@ SELECT has_table_privilege('regress_user3', 'atest4', 'SELECT'); -- false
SELECT has_table_privilege('regress_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true
+-- security-restricted operations
+\c -
+CREATE ROLE regress_sro_user;
+
+SET SESSION AUTHORIZATION regress_sro_user;
+CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS
+ 'GRANT regress_group2 TO regress_sro_user';
+CREATE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS
+ 'DECLARE c CURSOR WITH HOLD FOR SELECT unwanted_grant(); SELECT true';
+-- REFRESH of this MV will queue a GRANT at end of transaction
+CREATE MATERIALIZED VIEW sro_mv AS SELECT mv_action() WITH NO DATA;
+REFRESH MATERIALIZED VIEW sro_mv;
+\c -
+REFRESH MATERIALIZED VIEW sro_mv;
+
+SET SESSION AUTHORIZATION regress_sro_user;
+-- INSERT to this table will queue a GRANT at end of transaction
+CREATE TABLE sro_trojan_table ();
+CREATE FUNCTION sro_trojan() RETURNS trigger LANGUAGE plpgsql AS
+ 'BEGIN PERFORM unwanted_grant(); RETURN NULL; END';
+CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table
+ INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE sro_trojan();
+-- Now, REFRESH will issue such an INSERT, queueing the GRANT
+CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS
+ 'INSERT INTO sro_trojan_table DEFAULT VALUES; SELECT true';
+REFRESH MATERIALIZED VIEW sro_mv;
+\c -
+REFRESH MATERIALIZED VIEW sro_mv;
+BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT;
+
+DROP OWNED BY regress_sro_user;
+DROP ROLE regress_sro_user;
+
+
-- Admin options
SET SESSION AUTHORIZATION regress_user4;

View File

@ -1,121 +0,0 @@
From a498db87be103f93856dd515a574b2d67d3c1237 Mon Sep 17 00:00:00 2001
From: Noah Misch <noah@leadboat.com>
Date: Mon, 9 Nov 2020 07:32:09 -0800
Subject: [PATCH] Ignore attempts to \gset into specially treated variables.
If an interactive psql session used \gset when querying a compromised
server, the attacker could execute arbitrary code as the operating
system account running psql. Using a prefix not found among specially
treated variables, e.g. every lowercase string, precluded the attack.
Fix by issuing a warning and setting no variable for the column in
question. Users wanting the old behavior can use a prefix and then a
meta-command like "\set HISTSIZE :prefix_HISTSIZE". Back-patch to 9.5
(all supported versions).
Reviewed by Robert Haas. Reported by Nick Cleaton.
Security: CVE-2020-25696
---
src/bin/psql/common.c | 7 +++++++
src/bin/psql/variables.c | 26 ++++++++++++++++++++++++++
src/bin/psql/variables.h | 1 +
src/test/regress/expected/psql.out | 4 ++++
src/test/regress/sql/psql.sql | 3 +++
5 files changed, 41 insertions(+)
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index d04a7943d6a9..4e342c180a6c 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -894,6 +894,13 @@ StoreQueryTuple(const PGresult *result)
/* concatenate prefix and column name */
varname = psprintf("%s%s", pset.gset_prefix, colname);
+ if (VariableHasHook(pset.vars, varname))
+ {
+ psql_error("attempt to \\gset into specially treated variable \"%s\" ignored\n",
+ varname);
+ continue;
+ }
+
if (!PQgetisnull(result, 0, i))
value = PQgetvalue(result, 0, i);
else
diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c
index 120b25c69661..0d28ba9c92bb 100644
--- a/src/bin/psql/variables.c
+++ b/src/bin/psql/variables.c
@@ -360,6 +360,32 @@ SetVariableHooks(VariableSpace space, const char *name,
(void) (*ahook) (current->value);
}
+/*
+ * Return true iff the named variable has substitute and/or assign hook
+ * functions.
+ */
+bool
+VariableHasHook(VariableSpace space, const char *name)
+{
+ struct _variable *current;
+
+ Assert(space);
+ Assert(name);
+
+ for (current = space->next; current; current = current->next)
+ {
+ int cmp = strcmp(current->name, name);
+
+ if (cmp == 0)
+ return (current->substitute_hook != NULL ||
+ current->assign_hook != NULL);
+ if (cmp > 0)
+ break; /* it's not there */
+ }
+
+ return false;
+}
+
/*
* Convenience function to set a variable's value to "on".
*/
diff --git a/src/bin/psql/variables.h b/src/bin/psql/variables.h
index 02d85b1bc2e4..8dc5c20ee8fc 100644
--- a/src/bin/psql/variables.h
+++ b/src/bin/psql/variables.h
@@ -90,6 +90,7 @@ bool DeleteVariable(VariableSpace space, const char *name);
void SetVariableHooks(VariableSpace space, const char *name,
VariableSubstituteHook shook,
VariableAssignHook ahook);
+bool VariableHasHook(VariableSpace space, const char *name);
void PsqlVarEnumError(const char *name, const char *value, const char *suggestions);
diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out
index 0c94144575af..1ae81912c734 100644
--- a/src/test/regress/expected/psql.out
+++ b/src/test/regress/expected/psql.out
@@ -84,6 +84,10 @@ select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_
select 10 as "bad name"
\gset
invalid variable name: "bad name"
+select 97 as "EOF", 'ok' as _foo \gset IGNORE
+attempt to \gset into specially treated variable "IGNOREEOF" ignored
+\echo :IGNORE_foo :IGNOREEOF
+ok 0
-- multiple backslash commands in one line
select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x
1
diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql
index 4a676c311955..7f8ab2e5c218 100644
--- a/src/test/regress/sql/psql.sql
+++ b/src/test/regress/sql/psql.sql
@@ -48,6 +48,9 @@ select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_
select 10 as "bad name"
\gset
+select 97 as "EOF", 'ok' as _foo \gset IGNORE
+\echo :IGNORE_foo :IGNOREEOF
+
-- multiple backslash commands in one line
select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x
select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y

View File

@ -1,163 +0,0 @@
From eeede2470a8ec902c80de449d2c4822330c689ca Mon Sep 17 00:00:00 2001
From: wang_yue111 <648774160@qq.com>
Date: Fri, 26 Feb 2021 12:57:48 +0800
Subject: [PATCH] Fix mishandling of column-level SELECT privileges for join
aliases.
scanNSItemForColumn, expandNSItemAttrs, and ExpandSingleTable would
pass the wrong RTE to markVarForSelectPriv when dealing with a join
ParseNamespaceItem: they'd pass the join RTE, when what we need to
mark is the base table that the join column came from. The end
result was to not fill the base table's selectedCols bitmap correctly,
resulting in an understatement of the set of columns that are read
by the query. The executor would still insist on there being at
least one selectable column; but with a correctly crafted query,
a user having SELECT privilege on just one column of a table would
nonetheless be allowed to read all its columns.
To fix, make markRTEForSelectPriv fetch the correct RTE for itself,
ignoring the possibly-mismatched RTE passed by the caller. Later,
we'll get rid of some now-unused RTE arguments, but that risks
API breaks so we won't do it in released branches.
This problem was introduced by commit 9ce77d75c, so back-patch
to v13 where that came in. Thanks to Sven Klemm for reporting
the problem.
Security: CVE-2021-20229
---
src/backend/parser/parse_relation.c | 40 ++++++++++-----------
src/backend/parser/parse_target.c | 8 +++--
4 files changed, 91 insertions(+), 22 deletions(-)
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index bbbb0b6..7ab4ba7 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -43,7 +43,8 @@ static RangeTblEntry *scanNameSpaceForRelid(ParseState *pstate, Oid relid,
int location);
static void check_lateral_ref_ok(ParseState *pstate, ParseNamespaceItem *nsitem,
int location);
-static void markRTEForSelectPriv(ParseState *pstate, RangeTblEntry *rte,
+static void markRTEForSelectPriv(ParseState *pstate,
+
int rtindex, AttrNumber col);
static void expandRelation(Oid relid, Alias *eref,
int rtindex, int sublevels_up,
@@ -897,20 +898,15 @@ searchRangeTableForCol(ParseState *pstate, const char *alias, char *colname,
/*
* markRTEForSelectPriv
- * Mark the specified column of an RTE as requiring SELECT privilege
+ * Mark the specified column of the RTE with index rtindex
+ * as requiring SELECT privilege
*
* col == InvalidAttrNumber means a "whole row" reference
- *
- * The caller should pass the actual RTE if it has it handy; otherwise pass
- * NULL, and we'll look it up here. (This uglification of the API is
- * worthwhile because nearly all external callers have the RTE at hand.)
*/
static void
-markRTEForSelectPriv(ParseState *pstate, RangeTblEntry *rte,
- int rtindex, AttrNumber col)
+markRTEForSelectPriv(ParseState *pstate, int rtindex, AttrNumber col)
{
- if (rte == NULL)
- rte = rt_fetch(rtindex, pstate->p_rtable);
+ RangeTblEntry *rte = rt_fetch(rtindex, pstate->p_rtable);
if (rte->rtekind == RTE_RELATION)
{
@@ -942,13 +938,13 @@ markRTEForSelectPriv(ParseState *pstate, RangeTblEntry *rte,
{
int varno = ((RangeTblRef *) j->larg)->rtindex;
- markRTEForSelectPriv(pstate, NULL, varno, InvalidAttrNumber);
+ markRTEForSelectPriv(pstate, varno, InvalidAttrNumber);
}
else if (IsA(j->larg, JoinExpr))
{
int varno = ((JoinExpr *) j->larg)->rtindex;
- markRTEForSelectPriv(pstate, NULL, varno, InvalidAttrNumber);
+ markRTEForSelectPriv(pstate, varno, InvalidAttrNumber);
}
else
elog(ERROR, "unrecognized node type: %d",
@@ -957,13 +953,13 @@ markRTEForSelectPriv(ParseState *pstate, RangeTblEntry *rte,
{
int varno = ((RangeTblRef *) j->rarg)->rtindex;
- markRTEForSelectPriv(pstate, NULL, varno, InvalidAttrNumber);
+ markRTEForSelectPriv(pstate, varno, InvalidAttrNumber);
}
else if (IsA(j->rarg, JoinExpr))
{
int varno = ((JoinExpr *) j->rarg)->rtindex;
- markRTEForSelectPriv(pstate, NULL, varno, InvalidAttrNumber);
+ markRTEForSelectPriv(pstate, varno, InvalidAttrNumber);
}
else
elog(ERROR, "unrecognized node type: %d",
@@ -994,10 +990,10 @@ markRTEForSelectPriv(ParseState *pstate, RangeTblEntry *rte,
/*
* markVarForSelectPriv
- * Mark the RTE referenced by a Var as requiring SELECT privilege
+ * Mark the RTE referenced by the Var as requiring SELECT privilege
+ * for the Var's column (the Var could be a whole-row Var, too)
*
- * The caller should pass the Var's referenced RTE if it has it handy
- * (nearly all do); otherwise pass NULL.
+ * The rte argument is unused and will be removed later.
*/
void
markVarForSelectPriv(ParseState *pstate, Var *var, RangeTblEntry *rte)
@@ -1008,7 +1004,7 @@ markVarForSelectPriv(ParseState *pstate, Var *var, RangeTblEntry *rte)
/* Find the appropriate pstate if it's an uplevel Var */
for (lv = 0; lv < var->varlevelsup; lv++)
pstate = pstate->parentParseState;
- markRTEForSelectPriv(pstate, rte, var->varno, var->varattno);
+ markRTEForSelectPriv(pstate, var->varno, var->varattno);
}
/*
@@ -2629,9 +2625,13 @@ expandRelAttrs(ParseState *pstate, RangeTblEntry *rte,
/*
* Require read access to the table. This is normally redundant with the
* markVarForSelectPriv calls below, but not if the table has zero
- * columns.
+ * columns. We need not do anything if the nsitem is for a join: its
+ * component tables will have been marked ACL_SELECT when they were added
+ * to the rangetable. (This step changes things only for the target
+ * relation of UPDATE/DELETE, which cannot be under a join.)
*/
- rte->requiredPerms |= ACL_SELECT;
+ if (rte->rtekind == RTE_RELATION)
+ rte->requiredPerms |= ACL_SELECT;
forboth(name, names, var, vars)
{
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 64a1b75..c7165cb 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -1328,9 +1328,13 @@ ExpandSingleTable(ParseState *pstate, RangeTblEntry *rte,
/*
* Require read access to the table. This is normally redundant with
* the markVarForSelectPriv calls below, but not if the table has zero
- * columns.
+ * columns. We need not do anything if the nsitem is for a join: its
+ * component tables will have been marked ACL_SELECT when they were
+ * added to the rangetable. (This step changes things only for the
+ * target relation of UPDATE/DELETE, which cannot be under a join.)
*/
- rte->requiredPerms |= ACL_SELECT;
+ if (rte->rtekind == RTE_RELATION)
+ rte->requiredPerms |= ACL_SELECT;
/* Require read access to each column */
foreach(l, vars)

View File

@ -1,70 +0,0 @@
From eec462367ee2b41e02c6e29135c857ad6f2da66a Mon Sep 17 00:00:00 2001
From: Michael Paquier <michael@paquier.xyz>
Date: Mon, 26 Aug 2019 11:14:33 +0900
Subject: [PATCH] Fix error handling of vacuumdb when running out of fds
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When trying to use a high number of jobs, vacuumdb has only checked for
a maximum number of jobs used, causing confusing failures when running
out of file descriptors when the jobs open connections to Postgres.
This commit changes the error handling so as we do not check anymore for
a maximum number of allowed jobs when parsing the option value with
FD_SETSIZE, but check instead if a file descriptor is within the
supported range when opening the connections for the jobs so as this is
detected at the earliest time possible.
Also, improve the error message to give a hint about the number of jobs
recommended, using a wording given by the reviewers of the patch.
Reported-by: Andres Freund
Author: Michael Paquier
Reviewed-by: Andres Freund, Álvaro Herrera, Tom Lane
Discussion: https://postgr.es/m/20190818001858.ho3ev4z57fqhs7a5@alap3.anarazel.de
Backpatch-through: 9.5
---
src/bin/scripts/vacuumdb.c | 20 ++++++++++++++------
1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index f888bf73bc..4ac765244a 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -200,12 +200,6 @@ main(int argc, char *argv[])
progname);
exit(1);
}
- if (concurrentCons > FD_SETSIZE - 1)
- {
- fprintf(stderr, _("%s: too many parallel jobs requested (maximum: %d)\n"),
- progname, FD_SETSIZE - 1);
- exit(1);
- }
break;
case 2:
maintenance_db = pg_strdup(optarg);
@@ -443,6 +437,20 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts,
{
conn = connectDatabase(dbname, host, port, username, prompt_password,
progname, echo, false, true);
+
+ /*
+ * Fail and exit immediately if trying to use a socket in an
+ * unsupported range. POSIX requires open(2) to use the lowest
+ * unused file descriptor and the hint given relies on that.
+ */
+ if (PQsocket(conn) >= FD_SETSIZE)
+ {
+ fprintf(stderr,
+ _("%s: too many jobs for this platform -- try %d"),
+ progname, i);
+ exit(1);
+ }
+
init_slot(slots + i, conn);
}
}
--
2.23.0

69
Makefile.regress Normal file
View File

@ -0,0 +1,69 @@
#
# Simplified makefile for running the PostgreSQL regression tests
# in an RPM installation
#
# maximum simultaneous connections for parallel tests
MAXCONNOPT =
ifdef MAX_CONNECTIONS
MAXCONNOPT += --max-connections=$(MAX_CONNECTIONS)
endif
# locale
NOLOCALE =
ifdef NO_LOCALE
NOLOCALE += --no-locale
endif
srcdir := .
REGRESS_OPTS += --dlpath=.
pg_regress_locale_flags = $(if $(ENCODING),--encoding=$(ENCODING)) $(NOLOCALE)
pg_regress_installcheck = ./pg_regress --inputdir=$(srcdir) --bindir=@bindir@ $(pg_regress_locale_flags)
# Test input and expected files. These are created by pg_regress itself, so we
# don't have a rule to create them. We do need rules to clean them however.
ifile_list := $(subst .source,, $(notdir $(wildcard $(srcdir)/input/*.source)))
input_files := $(foreach file, $(ifile_list), sql/$(file).sql)
ofile_list := $(subst .source,, $(notdir $(wildcard $(srcdir)/output/*.source)))
output_files := $(foreach file, $(ofile_list), expected/$(file).out)
abs_srcdir := $(shell pwd)
abs_builddir := $(shell pwd)
check: installcheck-parallel
installcheck: cleandirs
$(pg_regress_installcheck) $(REGRESS_OPTS) --schedule=$(srcdir)/serial_schedule $(EXTRA_TESTS)
installcheck-parallel: cleandirs
$(pg_regress_installcheck) $(REGRESS_OPTS) --schedule=$(srcdir)/parallel_schedule $(MAXCONNOPT) $(EXTRA_TESTS)
# The tests command the server to write into testtablespace and results.
# On a SELinux-enabled system this will fail unless we mark those directories
# as writable by the server.
cleandirs:
-rm -rf testtablespace results
mkdir testtablespace results
if test -x /usr/bin/chcon && ! test -f /.dockerenv; then \
/usr/bin/chcon -u system_u -r object_r -t postgresql_db_t testtablespace results ; \
fi
# old interfaces follow...
runcheck: check
runtest: installcheck
runtest-parallel: installcheck-parallel
##
## Clean up
##
clean distclean maintainer-clean:
rm -f $(output_files) $(input_files)
rm -rf testtablespace
rm -rf results tmp_check log
rm -f regression.diffs regression.out regress.out run_check.out

View File

@ -1,50 +0,0 @@
From 57ba539775f1dd0b0460f1dfe673da00eeef3a2f Mon Sep 17 00:00:00 2001
From: Michael Paquier <michael@paquier.xyz>
Date: Tue, 7 May 2019 14:20:01 +0900
Subject: [PATCH] Remove some code related to 7.3 and older servers from tools
of src/bin/
This code was broken as of 582edc3, and is most likely not used anymore.
Note that pg_dump supports servers down to 8.0, and psql has code to
support servers down to 7.4.
Author: Julien Rouhaud
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/CAOBaU_Y5y=zo3+2gf+2NJC1pvMYPcbRXoQaPXx=U7+C8Qh4CzQ@mail.gmail.com
---
src/bin/scripts/common.c | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index 8073ee0d0e..5088c4c48e 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -145,9 +145,8 @@ connectDatabase(const char *dbname, const char *pghost,
exit(1);
}
- if (PQserverVersion(conn) >= 70300)
- PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL,
- progname, echo));
+ PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL,
+ progname, echo));
return conn;
}
@@ -311,13 +310,6 @@ appendQualifiedRelation(PQExpBuffer buf, const char *spec,
PGresult *res;
int ntups;
- /* Before 7.3, the concept of qualifying a name did not exist. */
- if (PQserverVersion(conn) < 70300)
- {
- appendPQExpBufferStr(&sql, spec);
- return;
- }
-
split_table_columns_spec(spec, PQclientEncoding(conn), &table, &columns);
/*
--
2.23.0

View File

@ -1,76 +0,0 @@
From a1413123f80f470da1ec422592f228aebe4a8866 Mon Sep 17 00:00:00 2001
From: Michael Paquier <michael@paquier.xyz>
Date: Thu, 27 Feb 2020 11:21:14 +0900
Subject: [PATCH 1/2] createdb: Fix quoting of --encoding, --lc-ctype and
--lc-collate
The original coding failed to properly quote those arguments, leading to
failures when using quotes in the values used. As the quoting can be
encoding-sensitive, the connection to the backend needs to be taken
before applying the correct quoting.
Author: Michael Paquier
Reviewed-by: Daniel Gustafsson
Discussion: https://postgr.es/m/20200214041004.GB1998@paquier.xyz
Backpatch-through: 9.5
---
src/bin/scripts/createdb.c | 29 +++++++++++++++++++----------
1 file changed, 19 insertions(+), 10 deletions(-)
diff --git a/src/bin/scripts/createdb.c b/src/bin/scripts/createdb.c
index 8116b084ff..45d26ecb8c 100644
--- a/src/bin/scripts/createdb.c
+++ b/src/bin/scripts/createdb.c
@@ -177,6 +177,13 @@ main(int argc, char *argv[])
dbname = get_user_name_or_exit(progname);
}
+ /* No point in trying to use postgres db when creating postgres db. */
+ if (maintenance_db == NULL && strcmp(dbname, "postgres") == 0)
+ maintenance_db = "template1";
+
+ conn = connectMaintenanceDatabase(maintenance_db, host, port, username,
+ prompt_password, progname, echo);
+
initPQExpBuffer(&sql);
appendPQExpBuffer(&sql, "CREATE DATABASE %s",
@@ -187,23 +194,25 @@ main(int argc, char *argv[])
if (tablespace)
appendPQExpBuffer(&sql, " TABLESPACE %s", fmtId(tablespace));
if (encoding)
- appendPQExpBuffer(&sql, " ENCODING '%s'", encoding);
+ {
+ appendPQExpBufferStr(&sql, " ENCODING ");
+ appendStringLiteralConn(&sql, encoding, conn);
+ }
if (template)
appendPQExpBuffer(&sql, " TEMPLATE %s", fmtId(template));
if (lc_collate)
- appendPQExpBuffer(&sql, " LC_COLLATE '%s'", lc_collate);
+ {
+ appendPQExpBufferStr(&sql, " LC_COLLATE ");
+ appendStringLiteralConn(&sql, lc_collate, conn);
+ }
if (lc_ctype)
- appendPQExpBuffer(&sql, " LC_CTYPE '%s'", lc_ctype);
+ {
+ appendPQExpBufferStr(&sql, " LC_CTYPE ");
+ appendStringLiteralConn(&sql, lc_ctype, conn);
+ }
appendPQExpBufferChar(&sql, ';');
- /* No point in trying to use postgres db when creating postgres db. */
- if (maintenance_db == NULL && strcmp(dbname, "postgres") == 0)
- maintenance_db = "template1";
-
- conn = connectMaintenanceDatabase(maintenance_db, host, port, username,
- prompt_password, progname, echo);
-
if (echo)
printf("%s\n", sql.data);
result = PQexec(conn, sql.data);
--
2.23.0

58
generate-pdf.sh Normal file
View File

@ -0,0 +1,58 @@
#! /bin/sh
# This script builds the PDF version of the PostgreSQL documentation.
#
# In principle we could do this as part of the RPM build, but there are
# good reasons not to:
# 1. The build would take longer and have a larger BuildRequires footprint.
# 2. The generated PDF has timestamps in it, which would inevitably result
# in multilib conflicts due to slightly different timestamps.
# So instead, we run this manually when rebasing to a new upstream release,
# and treat the resulting PDF as a separate Source file.
#
# You will need to have the docbook packages installed to run this.
# Expect it to take about 20 minutes and use about 160MB of disk.
set -e
# Pass package version (e.g., 9.1.2) as argument
VERSION=$1
test -z "$VERSION" && VERSION=`awk '/^Version:/ { print $2; }' postgresql.spec`
TARGETFILE=postgresql-$VERSION-US.pdf
test -f "$TARGETFILE" && echo "$TARGETFILE exists" && exit 1
echo Building $TARGETFILE ...
# Unpack postgresql
rm -rf postgresql-$VERSION
tar xfj postgresql-$VERSION.tar.bz2
cd postgresql-$VERSION
# Apply any patches that affect the PDF documentation
# patch -p1 < ../xxx.patch
# Configure ...
./configure >/dev/null
# Build the PDF docs
cd doc/src/sgml
make postgres-US.pdf >make.log
mv -f postgres-US.pdf ../../../../$TARGETFILE
# Clean up
cd ../../../..
rm -rf postgresql-$VERSION
exit 0

12
generate-sources.sh Normal file
View File

@ -0,0 +1,12 @@
#! /bin/sh
rm sources
set -e
spectool -S *.spec | cut -d' ' -f2 \
| grep -E -e 'postgresql-.*\.tar\.*' -e 'postgresql.*\.pdf' | sort | \
while read line
do
base=`basename "$line"`
echo " * handling $base"
sha512sum --tag "$base" >> sources
done

View File

@ -1,62 +0,0 @@
die() { echo >&2 $"FATAL: $@" ; exit 1 ; }
error() { echo >&2 $"ERROR: $@" ; }
error_q() { echo >&2 $" $@" ; }
warn() { echo >&2 $"WARNING: $@" ; }
warn_q() { echo >&2 $" $@" ; }
info() { echo >&2 $" * $@" ; }
info_q() { echo >&2 $" $@" ; }
debug() { test "$option_debug" != "1" || echo >&2 $"DEBUG: $@"; }
set_var()
{
eval "$1=\"$2\""
}
root_prereq()
{
test -z "$PGSETUP_TEST" || return 0
test "$(id -u)" -eq 0 || die "$0 requires root access for this action"
}
read_config_file()
{
local key="" val=""
test -r "$1" || die "can't read file '$1'"
for i in $2; do
eval "unset __pg_conf_$i"
done
# No easy (and secure) way to read configuration files from bash script,
# sorry.
while read key val; do
[[ $key =~ ^[a-zA-Z_][a-zA-Z0-9_]*$ ]] || return 1
case " $2 " in
*" $key "*)
;;
*)
warn "config file '$1': unknown key '$key'"
continue
;;
esac
# Strip double quotes
case "$val" in
\"*\")
val=${val##\"}
val=${val%%\"}
;;
esac
# Make it reasonably safe. Keep dolar-sign escaped.
eval "__pg_conf_$key=\$val"
done < <(grep -v -e "^$" -e "^#" < "$1")
}

View File

@ -1,9 +0,0 @@
# Macros from this file should exist on system only once, so it package
# maintainer's responsibility to guarantee that all packages which install
# these macros collide, or that (during package-build time) only one such
# package is installed (e.g. similarly to %%scl macro defined installed by
# *-build subpackage).
%postgresql_major 10
%postgresql_module_requires Requires: postgresql-server(:MODULE_COMPAT_10)

View File

@ -1,18 +0,0 @@
# Initialize the PostgreSQL tests environment. This is supposed to be invoked
# in prep/build/install (where you plan to run the testsuite), while it defines
# several useful shell variables and provies useful commands. Effect of this
# command end with shell exit.
%postgresql_tests_init . "/usr/share/postgresql-setup/postgresql_pkg_tests.sh"
# Start the testing postgresql server, setting the actual unix user to be the
# PostgreSQL admin. The $PGHOST/$PGPORT are set appropriately, so psql is able
# to run without passwords. This also sets shell exit trap callback so that if
# something goes wrong and package build fails, the temporary database is
# automatically cleaned up.
%postgresql_tests_start pgtests_start
# Initialize database and run server in one step, if this works well-enough.
%postgresql_tests_run %postgresql_tests_init ; %postgresql_tests_start
%pgtests_init echo 2>&1 WARN: macro pgtests_init will be removed, use postgresql_tests_init ; %postgresql_tests_init
%pgtests_start echo 2>&1 WARN: macro pgtests_start will be removed, use postgresql_tests_start ; %postgresql_tests_start

View File

@ -1 +0,0 @@
6c8e616c91a45142b85c0aeb1f29ebba4a361309e86469e0fb4617b6a73c4011 postgresql-10.5.tar.bz2

View File

@ -0,0 +1 @@
8490741f47c88edc8b6624af009ce19fda4dc9b31c4469ce2551d84075d5d995 postgresql-12.7.tar.bz2

BIN
postgresql-13.3-US.pdf Normal file

Binary file not shown.

BIN
postgresql-13.3.tar.bz2 Normal file

Binary file not shown.

View File

@ -0,0 +1 @@
3cd9454fa8c7a6255b6743b767700925ead1b9ab0d7a0f9dcb1151010f8eb4a1 postgresql-13.3.tar.bz2

View File

@ -1,104 +0,0 @@
#!/bin/bash
# This script verifies that the postgresql data directory has been correctly
# initialized. We do not want to automatically initdb it, because that has
# a risk of catastrophic failure (ie, overwriting a valuable database) in
# corner cases, such as a remotely mounted database on a volume that's a
# bit slow to mount. But we can at least emit a message advising newbies
# what to do.
if test -z "$1"; then
echo "Maintainer error: $0 must be run with one argument"
exit 1
fi
service_name="$1"
if [ -z "$PGDATA" ]; then
echo $"You try to start '$service_name' service"
echo $"but the required \$PGDATA environment variable is not set."
if test 0 -eq 1; then
echo $"You should use the /etc/sysconfig/pgsql/$service_name"
else
echo $"You should use the /etc/systemd/system/$service_name.service.d/ANYTHING.conf"
fi
echo $"configuration file to set \$PGDATA. For more info see"
echo $"the /usr/share/doc/postgresql/README.rpm-dist file."
exit 1
fi
# Warn the user that the configuration should be adjusted via drop-in, in case
# the $PGDATA variable is set different way (and non-default service name is
# used, systemd systems only).
conf_dir="/etc/systemd/system/$service_name.service.d"
if test 0 = 0 && [[ "$service_name" == *@* ]] && test ! -d "$conf_dir"; then
echo >&2 $"WARNING: Note that the '$conf_dir'"
echo >&2 $"directory does not exist while you are using non-default service"
echo >&2 $"name '$service_name'. You are encouraged to use the"
echo >&2 $"$conf_dir directory for systemd configuration according"
echo >&2 $"to /usr/share/doc/postgresql/README.rpm-dist documentation."
fi
# Full PostgreSQL version, e.g. 9.0.2
PGVERSION=10.5
# Major version of PostgreSQL, e.g. 9.0
PGMAJORVERSION=10
# Distribution README file
README_DIST=/usr/share/doc/postgresql/README.rpm-dist
bad_version()
{
local real_version="$1"
. "/usr/share/postgresql-setup/library.sh"
while read id version; do
test "$version" = "$real_version" || continue
local cmd="postgresql-setup --upgrade"
test "postgresql" = "$id" \
|| cmd="$cmd --upgrade-from $id"
echo $"An old version of the database format was found."
echo $"Use '$cmd' to upgrade to version '$PGMAJORVERSION'"
echo $"See $README_DIST for more information."
return
done < <(parse_upgrade_setup list)
echo $"An old version '$real_version' of the database format was found."
echo $"You need to dump and reload before using PostgreSQL $PGVERSION."
echo $"See $README_DIST for more information."
return
}
# Check for the PGDATA structure
if [ -f "$PGDATA/PG_VERSION" ] && [ -d "$PGDATA/base" ]
then
real_version=`cat "$PGDATA/PG_VERSION"`
# Check version of existing PGDATA
if [ x"$real_version" = x"$PGMAJORVERSION" ]
then
: A-OK
else
bad_version "$real_version"
exit 1
fi
else
# No existing PGDATA! Warn the user to initdb it.
echo $"Directory \"$PGDATA\" is missing or empty."
echo $"Use \"/usr/bin/postgresql-setup --initdb\""
echo $"to initialize the database cluster."
echo $"See $README_DIST for more information."
exit 1
fi
exit 0

View File

@ -0,0 +1,99 @@
From 0edaa982336823d4d7af8f10b91579fe0099ef3d Mon Sep 17 00:00:00 2001
From: Tom Stellard <tstellar@redhat.com>
Date: Tue, 20 Apr 2021 20:14:21 -0700
Subject: [PATCH] jit: Workaround potential datalayout mismatch on s390x
LLVM's s390x target uses a different datalayout for z13 and newer processors.
If llvmjit_types.bc is compiled to target a processor older than z13, and
then the JIT runs on a z13 or newer processor, then there will be a mismatch
in datalayouts between llvmjit_types.bc and the JIT engine. This mismatch
causes the JIT to fail at runtime.
---
src/backend/jit/llvm/llvmjit.c | 46 ++++++++++++++++++++++++++++++++--
1 file changed, 44 insertions(+), 2 deletions(-)
diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c
index 98a27f08bf..05b6438ba8 100644
--- a/src/backend/jit/llvm/llvmjit.c
+++ b/src/backend/jit/llvm/llvmjit.c
@@ -776,6 +776,35 @@ llvm_compile_module(LLVMJitContext *context)
errhidecontext(true)));
}
+/*
+ * For the systemz target, LLVM uses a different datalayout for z13 and newer
+ * CPUs than it does for older CPUs. This can cause a mismatch in datalayouts
+ * in the case where the llvm_types_module is compiled with a pre-z13 CPU
+ * and the JIT is running on z13 or newer.
+ * See computeDataLayout() function in
+ * llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp for information on the
+ * datalayout differences.
+ */
+static bool
+needs_systemz_workaround(void)
+{
+ bool ret = false;
+ LLVMContextRef llvm_context;
+ LLVMTypeRef vec_type;
+ LLVMTargetDataRef llvm_layoutref;
+ if (strncmp(LLVMGetTargetName(llvm_targetref), "systemz", strlen("systemz")))
+ {
+ return false;
+ }
+
+ llvm_context = LLVMGetModuleContext(llvm_types_module);
+ vec_type = LLVMVectorType(LLVMIntTypeInContext(llvm_context, 32), 4);
+ llvm_layoutref = LLVMCreateTargetData(llvm_layout);
+ ret = (LLVMABIAlignmentOfType(llvm_layoutref, vec_type) == 16);
+ LLVMDisposeTargetData(llvm_layoutref);
+ return ret;
+}
+
/*
* Per session initialization.
*/
@@ -785,6 +814,7 @@ llvm_session_initialize(void)
MemoryContext oldcontext;
char *error = NULL;
char *cpu = NULL;
+ char *host_features = NULL;
char *features = NULL;
LLVMTargetMachineRef opt0_tm;
LLVMTargetMachineRef opt3_tm;
@@ -816,10 +846,17 @@ llvm_session_initialize(void)
* features not all CPUs have (weird, huh).
*/
cpu = LLVMGetHostCPUName();
- features = LLVMGetHostCPUFeatures();
+ features = host_features = LLVMGetHostCPUFeatures();
elog(DEBUG2, "LLVMJIT detected CPU \"%s\", with features \"%s\"",
cpu, features);
+ if (needs_systemz_workaround())
+ {
+ const char *no_vector =",-vector";
+ features = malloc(sizeof(char) * (strlen(host_features) + strlen(no_vector) + 1));
+ sprintf(features, "%s%s", host_features, no_vector);
+ }
+
opt0_tm =
LLVMCreateTargetMachine(llvm_targetref, llvm_triple, cpu, features,
LLVMCodeGenLevelNone,
@@ -833,8 +870,13 @@ llvm_session_initialize(void)
LLVMDisposeMessage(cpu);
cpu = NULL;
- LLVMDisposeMessage(features);
+ if (features != host_features)
+ {
+ free(features);
+ }
features = NULL;
+ LLVMDisposeMessage(host_features);
+ host_features = NULL;
/* force symbols in main binary to be loaded */
LLVMLoadLibraryPermanently(NULL);
--
2.27.0

View File

@ -0,0 +1,43 @@
We don't build/install interfaces by upstream's implicit rules.
This patch is used on two places; postgresql.spec and libecpg.spec -- keep those
in sync!
Related: rhbz#1618698
diff --git a/src/Makefile b/src/Makefile
index bcdbd95..4bea236 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -20,7 +20,6 @@ SUBDIRS = \
backend/utils/mb/conversion_procs \
backend/snowball \
include \
- interfaces \
backend/replication/libpqwalreceiver \
backend/replication/pgoutput \
fe_utils \
diff --git a/src/Makefile.global.in b/src/Makefile.global.in
index b9d86ac..29df69f 100644
--- a/src/Makefile.global.in
+++ b/src/Makefile.global.in
@@ -549,7 +549,7 @@ endif
# How to link to libpq. (This macro may be used as-is by backend extensions.
# Client-side code should go through libpq_pgport or libpq_pgport_shlib,
# instead.)
-libpq = -L$(libpq_builddir) -lpq
+libpq = -lpq
# libpq_pgport is for use by client executables (not libraries) that use libpq.
# We force clients to pull symbols from the non-shared libraries libpgport
@@ -579,7 +579,6 @@ endif
# Commonly used submake targets
submake-libpq: | submake-generated-headers
- $(MAKE) -C $(libpq_builddir) all
submake-libpgport: | submake-generated-headers
$(MAKE) -C $(top_builddir)/src/port all
--
2.21.0

41
postgresql-logging.patch Normal file
View File

@ -0,0 +1,41 @@
Default to stderr-based logging with a week's worth of daily logfiles.
diff -Naur postgresql-9.1rc1.orig/src/backend/utils/misc/postgresql.conf.sample postgresql-9.1rc1/src/backend/utils/misc/postgresql.conf.sample
--- postgresql-9.1rc1.orig/src/backend/utils/misc/postgresql.conf.sample 2011-08-18 17:23:13.000000000 -0400
+++ postgresql-9.1rc1/src/backend/utils/misc/postgresql.conf.sample 2011-08-18 18:39:39.697526799 -0400
@@ -279,7 +279,7 @@
# requires logging_collector to be on.
# This is used when logging to stderr:
-#logging_collector = off # Enable capturing of stderr and csvlog
+logging_collector = on # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
@@ -355,11 +355,11 @@
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
-#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
+log_filename = 'postgresql-%a.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
-#log_truncate_on_rotation = off # If on, an existing log file with the
+log_truncate_on_rotation = on # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
@@ -367,9 +367,9 @@
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
-#log_rotation_age = 1d # Automatic rotation of logfiles will
+log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
-#log_rotation_size = 10MB # Automatic rotation of logfiles will
+log_rotation_size = 0 # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.

49
postgresql-man.patch Normal file
View File

@ -0,0 +1,49 @@
PostgreSQL ecpg/initdb manual page fixes
This was generated based on automatic Red Hat manual page scan (private
RHBZ#948933).
diff -up postgresql-13.1/doc/src/sgml/man1/ecpg.1.patch6 postgresql-13.1/doc/src/sgml/man1/ecpg.1
--- postgresql-13.1/doc/src/sgml/man1/ecpg.1.patch6 2020-11-09 23:38:03.000000000 +0100
+++ postgresql-13.1/doc/src/sgml/man1/ecpg.1 2020-11-18 09:26:40.547324791 +0100
@@ -81,6 +81,11 @@ ORACLE\&.
Define a C preprocessor symbol\&.
.RE
.PP
+\fB\-h \fR
+.RS 4
+Parse a header file, this option includes option \fB\-c\fR\&.
+.RE
+.PP
\fB\-h\fR
.RS 4
Process header files\&. When this option is specified, the output file extension becomes
@@ -144,6 +149,11 @@ Allow question mark as placeholder for c
.RE
.RE
.PP
+\fB\-\-regression\fR
+.RS 4
+Run in regression testing mode\&.
+.RE
+.PP
\fB\-t\fR
.RS 4
Turn on autocommit of transactions\&. In this mode, each SQL command is automatically committed unless it is inside an explicit transaction block\&. In the default mode, commands are committed only when
diff -up postgresql-13.1/doc/src/sgml/man1/initdb.1.patch6 postgresql-13.1/doc/src/sgml/man1/initdb.1
--- postgresql-13.1/doc/src/sgml/man1/initdb.1.patch6 2020-11-09 23:38:05.000000000 +0100
+++ postgresql-13.1/doc/src/sgml/man1/initdb.1 2020-11-18 09:25:05.082348424 +0100
@@ -311,6 +311,13 @@ determines that an error prevented it fr
.PP
Other options:
.PP
+\fB\-s\fR
+.br
+\fB\-\-show\fR
+.RS 4
+Print the internal settings, then exit\&.
+.RE
+.PP
\fB\-V\fR
.br
\fB\-\-version\fR

View File

@ -0,0 +1,12 @@
diff -up postgresql-13.1/src/interfaces/Makefile.patch10 postgresql-13.1/src/interfaces/Makefile
--- postgresql-13.1/src/interfaces/Makefile.patch10 2021-02-02 21:33:23.235292305 +0100
+++ postgresql-13.1/src/interfaces/Makefile 2021-02-02 21:33:30.281365440 +0100
@@ -12,7 +12,7 @@ subdir = src/interfaces
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
-SUBDIRS = libpq ecpg
+SUBDIRS = libpq
$(recurse)

View File

@ -0,0 +1,58 @@
We should ideally provide '/bin/pg_config' in postgresql-server-devel, and
provide no pg_config binary in libpq package. But most of the Fedora packages
that use pg_config actually only build against PG libraries (and
postgresql-server-devel isn't needed). So.., to avoid the initial rush around
rhbz#1618698 change, rather provide pg_server_config binary, which int urn means
that we'll have to fix only a minimal set of packages which really build
PostgreSQL server modules.
diff -up postgresql-13.1/src/bin/pg_config/Makefile.patch9 postgresql-13.1/src/bin/pg_config/Makefile
--- postgresql-13.1/src/bin/pg_config/Makefile.patch9 2020-11-18 09:28:30.885453275 +0100
+++ postgresql-13.1/src/bin/pg_config/Makefile 2020-11-18 09:31:33.926325327 +0100
@@ -11,6 +11,8 @@
PGFILEDESC = "pg_config - report configuration information"
PGAPPICON=win32
+PG_CONFIG = pg_server_config$(X)
+
subdir = src/bin/pg_config
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
@@ -19,22 +21,22 @@ OBJS = \
$(WIN32RES) \
pg_config.o
-all: pg_config
+all: $(PG_CONFIG)
-pg_config: $(OBJS) | submake-libpgport
- $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X)
+$(PG_CONFIG): $(OBJS) | submake-libpgport
+ $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@
install: all installdirs
- $(INSTALL_SCRIPT) pg_config$(X) '$(DESTDIR)$(bindir)/pg_config$(X)'
+ $(INSTALL_SCRIPT) $(PG_CONFIG) '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
installdirs:
$(MKDIR_P) '$(DESTDIR)$(bindir)'
uninstall:
- rm -f '$(DESTDIR)$(bindir)/pg_config$(X)'
+ rm -f '$(DESTDIR)$(bindir)/$(PG_CONFIG)'
clean distclean maintainer-clean:
- rm -f pg_config$(X) $(OBJS)
+ rm -f $(PG_CONFIG) $(OBJS)
rm -rf tmp_check
check:
diff -up postgresql-13.1/src/bin/pg_config/nls.mk.patch9 postgresql-13.1/src/bin/pg_config/nls.mk
--- postgresql-13.1/src/bin/pg_config/nls.mk.patch9 2020-11-18 09:28:30.885453275 +0100
+++ postgresql-13.1/src/bin/pg_config/nls.mk 2020-11-18 09:32:00.735599526 +0100
@@ -1,4 +1,4 @@
# src/bin/pg_config/nls.mk
-CATALOG_NAME = pg_config
+CATALOG_NAME = pg_server_config
AVAIL_LANGUAGES = cs de es fr he it ja ko pl pt_BR ru sv tr uk vi zh_CN
GETTEXT_FILES = pg_config.c ../../common/config_info.c ../../common/exec.c

View File

@ -1,777 +0,0 @@
#!/bin/bash
#
# postgresql-setup - Initialization and upgrade operations for PostgreSQL
if test "$(id -u)" -eq 0; then
cmd=
for v in PGSETUP_DEBUG PGSETUP_INITDB_OPTIONS PGSETUP_PGUPGRADE_OPTIONS; do
eval var_content=\$$v
test -z "$var_content" && continue
cmd+=$v="$(printf %q "$var_content") "
done
cmd+=$(printf %q "$(readlink -f "$0")")
for arg; do cmd+=" $(printf %q "$arg")" ; done
# Drop root privileges asap. It's not recommended to run postgresql-setup
# script under root nowadays; so we take the liberty to switch to the
# PostgreSQL admin user (by default 'postgres') without any other option.
exec /usr/sbin/runuser -s /bin/sh -l postgres -c "$cmd"
fi
# ensure privacy
umask 0077
: ${RESTORECON=/sbin/restorecon}
test -x $RESTORECON || RESTORECON=:
test -z "$PATH" && export PATH="/sbin:/usr/sbin:/bin:/usr/bin"
test x"$PGSETUP_DEBUG" != x && set -x && PS4='${LINENO}: '
# The current user name.
USER=$(id -u -n)
# Directory containing the postmaster executable
PGENGINE=/usr/bin
# Distribution README file
README_DIST=/usr/share/doc/postgresql/README.rpm-dist
# Home directory of postgres user
POSTGRES_HOMEDIR=/var/lib/pgsql
# The where PostgreSQL server listens by default
PGPORT_DEF=5432
. "/usr/share/postgresql-setup/library.sh"
:
# We upgrade by default from system's default PostgreSQL installation
option_upgradefrom="postgresql"
srvsuff=
test 0 -eq 0 && srvsuff=".service"
USAGE_STRING=$"\
Usage: $0 MODE_OPTION [--unit=UNIT_NAME] [OPTION...]
Script is aimed to help sysadmin with basic database cluster administration.
Usually, \"postgresql-setup --initdb\" and \"postgresql-setup --upgrade\" is
enough, however there are other options described below.
For more info and howto/when use this script please look at the documentation
file $README_DIST.
Available operation mode:
--initdb Initialize new PostgreSQL database cluster. This is usually the
first action you perform after PostgreSQL server installation.
--upgrade Upgrade database cluster for new major version of PostgreSQL
server. See the --upgrade-from option for more info.
Options:
--unit=UNIT_NAME The UNIT_NAME is used to select proper unit
configuration (unit == service or initscript name
on non-systemd systems). For example, if you want
to work with unit called
'postgresql@com_example.service', you should use
'postgresql@com_example' (without trailing .service
string). When no UNIT_NAME is explicitly passed,
the 'postgresql' string is used by default.
--port=PORT port where the initialized server will listen for
connections"
test 0 -eq 0 && \
USAGE_STRING+="
--new-systemd-unit We dropped this option for security reasons.
Nowadays, please use the root-only script
/usr/sbin/postgresql-new-systemd-unit.
--datadir Dropped with --new-systemd-unit."
USAGE_STRING+="
--upgrade-from-unit=UNIT Select proper unit name to upgrade from. This
has similar semantics as --unit option.
--upgrade-ids Print list of available IDs of upgrade scenarios to
standard output.
--upgrade-from=ID Specify id \"old\" postgresql stack to upgrade
from. List of available IDs can be listed by
--upgrade-ids. Default is '$option_upgradefrom'.
Other options:
--help show this help
--version show version of this package
--debug show basic debugging information
Environment:
PGSETUP_INITDB_OPTIONS Options carried by this variable are passed to
subsequent call of \`initdb\` binary (see man
initdb(1)). This variable is used also during
'upgrade' mode because the new cluster is actually
re-initialized from the old one.
PGSETUP_PGUPGRADE_OPTIONS Options in this variable are passed next to the
subsequent call of \`pg_upgrade\`. For more info
about possible options please look at man
pg_upgrade(1).
PGSETUP_DEBUG Set to '1' if you want to see very verbose shell
debugging output."
print_version()
{
echo "postgresql-setup 8.2"
echo $"Built against PostgreSQL version 10.5."
}
check_not_initialized()
{
if test -f "$pgdata/PG_VERSION"; then
error $"Data directory $pgdata is not empty!"
return 1
fi
return 0
}
# code shared between initdb and upgrade actions
perform_initdb()
{
if [ ! -e "$pgdata" ]; then
mkdir "$pgdata" || return 1
fi
$RESTORECON "$pgdata"
test -w "$pgdata" || die "$pgdata is not writeable by $USER"
# Clean up SELinux tagging for pgdata
[ -x /sbin/restorecon ] && /sbin/restorecon "$pgdata"
# Create the initdb log file if needed
if [ ! -e "$initdb_log" ]; then
touch "$initdb_log" || return 1
fi
$RESTORECON "$initdb_log"
test -w "$initdb_log" || echo "$initdb_log is not writeable by $USER"
# Initialize the database
initdbcmd+=( "$PGENGINE"/initdb --pgdata="$pgdata" --auth=ident )
eval "initdbcmd+=( $PGSETUP_INITDB_OPTIONS )"
"${initdbcmd[@]}" >> "$initdb_log" 2>&1 < /dev/null
# Create directory for postmaster log files
mkdir "$pgdata/pg_log"
$RESTORECON "$pgdata/pg_log"
# This if-fork is just to not unnecessarily overwrite what upstream
# generates by initdb (upstream implicitly uses PGPORT_DEF).
if test "$pgport" != "$PGPORT_DEF"; then
local pgconf="$pgdata/postgresql.conf"
sed -i "s|^[[:space:]#]*port[[:space:]]=[^#]*|port = $pgport |g" \
"$pgconf" \
&& grep "^port = " "$pgconf" >/dev/null
if test $? -ne 0; then
error "can not change port in $pgdata/postgresql.conf"
return 1
fi
fi
test -f "$pgdata/PG_VERSION"
}
initdb()
{
port_info=
test "$pgport" != "$PGPORT_DEF" \
&& port_info=$", listening on port '$pgport'"
info $"Initializing database in '$pgdata'$port_info"
if check_not_initialized && perform_initdb; then
info $"Initialized, logs are in ${initdb_log}"
else
error $"Initializing database failed, possibly see $initdb_log"
script_result=1
fi
}
old_data_in_use()
{
local pidfile="$pgdataold/postmaster.pid"
test -f "$pidfile" || return 1
error $"The pidfile '$pidfile' exists. Verify that there is no postmaster"
error_q $"running the $pgdataold directory."
}
upgrade()
{
local inplace=false
test "$pgdata" = "$upgradefrom_data" && inplace=true
debug "running inplace upgrade: $inplace"
# must see previous version in PG_VERSION
local old_data_version="`cat "$upgradefrom_data/PG_VERSION"`"
if [ ! -f "$upgradefrom_data/PG_VERSION" -o \
x"$old_data_version" != x"$upgradefrom_major" ]
then
error $"Cannot upgrade because the database in $upgradefrom_data is of"
error_q $"version $old_data_version but it should be $upgradefrom_major"
exit 1
fi
if [ ! -x "$upgradefrom_engine/postgres" ]; then
error $"Please install the $upgradefrom_package package."
exit 5
fi
if [ ! -x "$PGENGINE/pg_upgrade" ]; then
# The "$PGENGINE/postgres" depends transitively on
# pg_upgrade binary in rather newer packaging, but SCL with PostgreSQL
# 9.4 provides '*-upgrade' package having `pg_upgrade` inside. We need
# to have this installed, too. Keep till {rh,sclo}-postgresql94 is
# still a thing.
error $"Please install the postgresql-upgrade package."
exit 5
fi
# Set up log file for pg_upgrade
rm -f "$upgrade_log"
touch "$upgrade_log" || die "can't write into $upgrade_log file"
$RESTORECON "$upgrade_log"
# Move old DB to pgdataold
if $inplace; then
pgdataold="${pgdata}-old"
rm -rf "$pgdataold"
mv "$pgdata" "$pgdataold" || exit 1
else
pgdataold="$upgradefrom_data"
fi
# Create configuration file for upgrade process
HBA_CONF_BACKUP="$pgdataold/pg_hba.conf.postgresql-setup.`date +%s`"
HBA_CONF_BACKUP_EXISTS=0
if [ ! -f $HBA_CONF_BACKUP ]; then
mv "$pgdataold/pg_hba.conf" "$HBA_CONF_BACKUP"
HBA_CONF_BACKUP_EXISTS=1
# For fluent upgrade 'postgres' user should be able to connect
# to any database without password. Temporarily, no other type
# of connection is needed.
echo "local all postgres ident" > "$pgdataold/pg_hba.conf"
$RESTORECON "$pgdataold"
fi
info $"Upgrading database."
scls_upgrade_hacks=
test -n "$upgradefrom_scls" && {
debug "scls [$upgradefrom_scls] will be enabled"
scls_upgrade_hacks="source scl_source enable $upgradefrom_scls"
}
test x"$upgradefrom_redhat_sockets_hack" = xyes && {
debug "upgrading from redhat server"
socket_hacks="export REDHAT_PGUPGRADE_FROM_RHEL=yes"
}
test -n "$upgradefrom_pghost_override" && {
pghost_override="export PGHOST='$upgradefrom_pghost_override'"
}
local failure_cleanup=true
if old_data_in_use; then
script_result=1
# Cleanup makes sense once perform_initdb gets called.
failure_cleanup=false
elif ! check_not_initialized; then
# Don't try to re-init initialized data directory and also do not
# remove it after this unsuccessful upgrade.
script_result=1
failure_cleanup=false
elif perform_initdb; then
$inplace && link_option=--link
# After creating the empty new-format database, do the upgrade
(
cd # pg_upgrade writes to $PWD
eval "
$scls_upgrade_hacks
$socket_hacks
$pghost_override
"
eval "add_options=( $PGSETUP_PGUPGRADE_OPTIONS )"
"$PGENGINE"/pg_upgrade \
--old-bindir="$upgradefrom_engine" \
--new-bindir="$PGENGINE" \
--old-datadir="$pgdataold" \
--new-datadir="$pgdata" \
$link_option \
--old-port="$PGPORT" \
--new-port="$PGPORT" \
--username=postgres \
"${add_options[@]}" \
>>"$upgrade_log" 2>>"$upgrade_log"
)
if [ $? -ne 0 ]; then
# pg_upgrade failed
error $"pg_upgrade tool failed"
script_result=1
fi
else
error $"initdb failed"
script_result=1
fi
# Move back the backed-up pg_hba.conf regardless of the script_result.
if [ x$HBA_CONF_BACKUP_EXISTS = x1 ]; then
mv -f "$HBA_CONF_BACKUP" "$pgdataold/pg_hba.conf"
fi
if [ $script_result -eq 0 ]; then
info $"Upgraded OK."
warn $"The configuration files were replaced by default configuration."
warn $"The previous configuration and data are stored in folder"
warn $pgdataold.
else
# Clean up after failure.
$failure_cleanup && rm -rf "$pgdata"
$inplace && mv "$pgdataold" "$pgdata"
error $"Upgrade failed."
fi
info $"See $upgrade_log for details."
}
check_daemon_reload()
{
local nr_option=NeedDaemonReload
test 0 = 1 && return 0
local nr_out="`systemctl show -p $nr_option $option_service.service 2>/dev/null`"
if [[ "$nr_out" != "$nr_option=no" ]]; then
error $"Note that systemd configuration for '$option_service' changed."
error_q $"You need to perform 'systemctl daemon-reload' otherwise the"
error_q $"results of this script can be inadequate."
exit 1
fi
}
handle_service_env()
{
local service="$1"
local systemd_env="$(systemctl show -p Environment "${service}.service")" \
|| { return; }
for env_var in `echo "$systemd_env" | sed 's/^Environment=//'`; do
# If one variable name is defined multiple times the last definition wins.
case "$env_var" in
PGDATA=*)
unit_pgdata="${env_var##PGDATA=}"
debug "unit's datadir: '$unit_pgdata'"
;;
PGPORT=*)
unit_pgport="${env_var##PGPORT=}"
debug "unit's pgport: $unit_pgport"
;;
esac
done
}
handle_envfile()
{
local file="$1"
debug "trying to read '$file' env file"
if test ! -r "$file"; then
if test 0 = 1; then
return
fi
error "Can not read EnvironmentFile '$file' specified"
error_q "in ${service}.service"
fi
# Note that the env file parser in systemd does not perform exactly the
# same job.
unset PGPORT PGDATA
. "$file"
envfile_pgdata="$PGDATA"
envfile_pgport="$PGPORT"
unset PGPORT PGDATA
}
handle_service_envfiles()
{
local mode="$1"
local service="$2"
local envfiles="$(systemctl show -p EnvironmentFiles "${service}.service")"\
|| return
test -z "$envfiles" && return
envfiles=$(echo $envfiles | \
sed -e 's/^EnvironmentFile=//' \
-e 's| ([^)]*)$||'
)
# Read the file names line-by-line (spaces may be inside)
while read line; do
handle_envfile "$line"
done <<<"$envfiles"
}
handle_pgconf()
{
local datadir="$1"
local conffile="$datadir/postgresql.conf"
debug "postgresql.conf: $conffile"
test -r "$conffile" || {
error "config file $conffile is not readable or does not exist"
die "Old cluster in '$data' does not seem to be initialized"
}
local sp='[[:space:]]'
local sed_expr="s/^$sp*port$sp*=$sp\([0-9]\+\).*/\1/p"
rv=0
conf_pgport=`sed -n "$sed_expr" $conffile | tail -1` || rv=1
test -n "$conf_pgport" && debug "postgresql.conf pgport: $conf_pgport"
return $rv
}
service_configuration()
{
local data=
local port=
local unit_pgport=
local unit_pgdata=
local envfile_pgport=
local envfile_pgdata=
# 'mode' is 'initdb' or 'upgrade'. Basically, if called with mode=initdb, we
# parse configuration of the current (maybe already configured) service.
# When run with mode=upgrade, we try to parse the configuration of the old
# PostgreSQL configuration that we try to upgrade from.
local mode="$1" datavar="$2" portvar="$3" service="$4"
debug "running service_configuration() for $service, mode: $mode"
if test "0" = 1; then
# Sysvinit has the default PGDATA (for default unit name only)
# configured directly in the initscript, so no additional configuration
# must exist. Set the default value of pgdata here to match whats in
# initscript for the cases when no additional configuration file exists.
# This is done to avoid parsing of whole initscript (for the real value)
# and mainly to not fail in the logic following 'service_configuration'
# call, where we usually want to error that pgdata is not defined..
# Don't set the default pgdata for upgrade case, however, as we must
# upgrade only from already properly configured & working stack (missing
# pgdata here is a good reason to die later).
test initdb = "$mode" && test "$service" = "postgresql" \
&& set_var "$datavar" "/var/lib/pgsql/data"
handle_envfile "/etc/sysconfig/pgsql/$service"
else
# We ship two service files, postgresql.service and
# postgresql@.service. The former has PGDATA set by default
# similarly to sysvinit case.
handle_service_env "$service"
handle_service_envfiles "$option_mode" "$service"
fi
# EnvironmentFile beats Environment configuration in systemd. In sysvinit
# there is no "unit_pgdata". So make sure the envfile_gpdata is used later
# than unit_pgdata.
test -n "$unit_pgdata" && set_var "$datavar" "$unit_pgdata"
test -n "$envfile_pgdata" && set_var "$datavar" "$envfile_pgdata"
# skip for the first run
test initdb = "$mode" && return
set_var data "\$$datavar"
handle_pgconf "$data"
test -n "$conf_pgport" && set_var "$portvar" "$conf_pgport"
test -n "$unit_pgport" && set_var "$portvar" "$unit_pgport"
test -n "$envfile_pgport" && set_var "$portvar" "$envfile_pgport"
}
# <Compat>
# Alow users to use the old style arguments like
# 'postgresql-setup initdb $SERVICE_NAME'.
case "$1" in initdb|upgrade)
action="--$1"
shift
warn "using obsoleted argument syntax, try --help"
old_long_args="help,usage,version,debug"
oldargs=`getopt -o "" -l "$old_long_args" -n "old-options" -- "$@"` \
|| die "can't parse old arguments"
eval set -- "$oldargs"
additional_opts=
while true; do
case "$1" in
--version|--help|--usage|--debug)
additional_opts="$additional_opts $1"
shift
;;
--)
shift
break
;;
esac
done
service="postgresql"
if test -n "$1"; then
service=$1
shift
fi
set -- $additional_opts "$action" --unit "$service" "$@"
warn "arguments transformed to: ${0##*/} $*"
esac
# </Compat>
# postgresql-setup arguments are parsed into those variables
option_mode=none
option_service="postgresql"
option_port=
option_debug=0
option_upgradefrom_unit=
# Content of EnvironmentFile= files fills those:
envfile_pgdata=
envfile_pgport=
# Configuration from (/etc/systemd/system/$option_service.service) fills those
# variables.
unit_pgdata=
unit_pgport=
# Configuration from postgresql.conf:
conf_pgport=
# Key variables. Try to fill them by postgresql.conf, Environment= statement in
# service file or EnvironmentFile= content (the later mentioned has more
# priority).
pgdata=default
pgport=default
## PARSE SCRIPT ARGUMENTS ##
short_opts=""
long_opts="\
initdb,upgrade,\
new-systemd-unit,upgrade-ids,\
unit:,service:,port:,datadir:,upgrade-from:,upgrade-from-unit:,\
debug,\
version,help,usage"
args=`getopt -o "$short_opts" -l "$long_opts" -n "postgresql-setup" -- "$@"` \
|| die "can't parse arguments"
eval set -- "$args"
parse_fail=0
while true; do
case "$1" in
--initdb|--upgrade)
if test "$option_mode" != none; then
error "bad argument $1, mode already specified: --$option_mode"
parse_fail=1
else
option_mode=${1##--}
fi
shift
;;
--unit|--service)
option_service=$2
shift 2
;;
--port)
option_port=$2
shift 2
;;
--datadir|--new-systemd-unit)
error $"Removed option --new-systemd-unit/--datadir, please use"
error_q $"/usr/sbin/postgresql-new-systemd-unit script"
exit 1
;;
--debug)
option_debug=1
shift
;;
--help|--usage)
echo "$USAGE_STRING"
exit 0
;;
--upgrade-from)
option_upgradefrom="$2"
shift 2
;;
--upgrade-from-unit)
option_upgradefrom_unit="$2"
shift 2
;;
--version)
print_version
exit 0
;;
--)
shift
break
;;
*)
die "author's fault: option $1 not handled"
break
;;
esac
done
test $parse_fail -ne 0 && die "can't parse arguments"
test "$option_mode" = none \
&& die "no mode specified, use --initdb or --upgrade, or --help"
## GATHER THE SETUP FIRST ##
initdb_log="$POSTGRES_HOMEDIR/initdb_${option_service}.log"
upgrade_log="$POSTGRES_HOMEDIR/upgrade_${option_service}.log"
debug "mode used: $option_mode"
debug "service name: $option_service"
# load service's pgdata
service_configuration initdb pgdata UNUSED "$option_service"
test "$pgdata" = default \
&& die $"no db datadir (PGDATA) configured for '$option_service$srvsuff' unit"
[[ "$pgdata" =~ ^/.* ]] \
|| die $"the PostgreSQL datadir not absolute path: '$pgdata', try --debug"
## GATHER DATA FROM INITIALIZED DATADIR ##
test -n "$option_port" && pgport=$option_port
if test upgrade = "$option_mode"; then
upgradefrom_data="$upgradefrom_data_default"
if test -z "$option_upgradefrom_unit"; then
if test "postgresql" = "postgresql"; then
# Fedora usecase -> upgrade while keeping the same name of
# service/unit.
option_upgradefrom_unit=$option_service
else
# PGRPMs/RHSCL usecase -> we upgrade from one service/unit name to
# a different one, e.g. from postgresql92 to postgresql93, or from
# postgresql (system version) to postgresql94 (scl).
option_upgradefrom_unit=$upgradefrom_id
# Try to predict situations: postgresql93@second -> postgresql94@second
if [[ "$option_service" =~ ^postgresql@(.*)$ ]]; then
option_upgradefrom_unit="$upgradefrom_id@${BASH_REMATCH[1]}"
fi
fi
fi
test "$option_service" = "$option_upgradefrom_unit" \
|| info "upgrading from '$option_upgradefrom_unit$srvsuff'" \
"to '$option_service$srvsuff'"
service_configuration upgrade upgradefrom_data pgport \
"$option_upgradefrom_unit"
test -n "$option_port" -a "$option_port" != "$pgport" \
&& warn "Old pgport $pgport has bigger priority than --pgport value."
fi
# We expect that for upgrade - the previous stack was in working state (thus
# running on the default port).
test "$option_mode" = upgrade -a "$pgport" = default \
&& pgport=$PGPORT_DEF
# This is mostly for 'initdb'. We assume that the default port is $PGPORT_DEF
# if not set explicitly for default service name 'postgresql'.
if test "$pgport" = default -a "$option_service" = "postgresql"; then
debug $"Using the default port '$PGPORT_DEF'"
pgport=$PGPORT_DEF
fi
if test "$pgport" = default; then
# initdb case.. Note that this may be called by initscripts. If this gets
# called by legacy script, we can't help too much because systemd does not
# allow passing additional arguments to 'service XX initdb' command.
die $"For non-default unit names you must specify port by --port option."
fi
[[ "$option_port" =~ ^[0-9]*$ ]] \
|| die $"port set to '$option_port', must be integer number"
## LAST CHECK THE SETUP ##
check_daemon_reload
# These variables are read by underlying utilites, rather export them.
export PGDATA=$pgdata
export PGPORT=$pgport
debug "final pgdata: $pgdata"
debug "final pgport: $pgport"
script_result=0
test -w "/var/lib/pgsql" || {
# pg_upgrade binary needs to have write-able $PWD (and we use 'su -')
error $"The /var/lib/pgsql directory has wrong permissions."
error_q $"Please make sure the directory is writable by $USER."
exit 1
}
if /usr/bin/mountpoint -q "$pgdata" || /usr/bin/mountpoint -q "$(dirname "$pgdata")"; then
warn $"Note that either your data directory '$pgdata' or"
warn_q $"the parent directory '$(dirname "$pgdata")'"
warn_q $"is a direct mountpoint. This is usually a bad idea and your"
warn_q $"filesystem layout should ideally look like:"
warn_q $"/ROOT_OWNED_MOUNTPOINT/POSTGRES_OWNED_DIRECTORY/DATADIR."
warn_q $"See the upstream documentation for more info:"
warn_q $"http://www.postgresql.org/docs/10/static/creating-cluster.html"
fi
# See how we were called.
case "$option_mode" in
initdb)
initdb
;;
upgrade)
upgrade
;;
*)
echo >&2 "$USAGE_STRING"
exit 2
esac
exit $script_result

BIN
postgresql-setup-8.5.tar.gz Normal file

Binary file not shown.

View File

@ -42,12 +42,12 @@ diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index e278fa0..9ee15d4 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -169,7 +169,7 @@
* here's where to twiddle it. You can also override this at runtime
* with the postmaster's -k switch.
@@ -201,7 +201,7 @@
* support them yet.
*/
#ifndef WIN32
-#define DEFAULT_PGSOCKET_DIR "/tmp"
+#define DEFAULT_PGSOCKET_DIR "/var/run/postgresql"
/*
* This is the default event source for Windows event log.
#else
#define DEFAULT_PGSOCKET_DIR ""
#endif

3
postgresql.pam Normal file
View File

@ -0,0 +1,3 @@
#%PAM-1.0
auth include password-auth
account include password-auth

View File

@ -1,23 +0,0 @@
[Unit]
Description=PostgreSQL database server
After=network.target
[Service]
Type=notify
User=postgres
Group=postgres
OOMScoreAdjust=-1000
Environment=PG_OOM_ADJUST_FILE=/proc/self/oom_score_adj
Environment=PG_OOM_ADJUST_VALUE=0
Environment=PGDATA=/var/lib/pgsql/data
ExecStartPre=/usr/libexec/postgresql-check-db-dir %N
ExecStart=/usr/bin/postmaster -D ${PGDATA}
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
KillSignal=SIGINT
TimeoutSec=0
[Install]
WantedBy=multi-user.target

File diff suppressed because it is too large Load Diff

View File

@ -1,193 +0,0 @@
#! /bin/sh
# Do some "integration" testing against running PostgreSQL server.
# This file is to be sourced.
try_random_port ()
{
_port=$RANDOM
if test -n "$_port" && eval '_port=$(( $_port + 32000 ))' 2>/dev/null; then
echo "$_port"
elif test -x /usr/bin/shuf; then
/usr/bin/shuf -r -n 1 -i 32000-64767
else
echo 54321
fi
}
: ${PGTESTS_DATADIR=`pwd`/datadir}
: ${PGTESTS_ADMIN=`id -u -n`}
: ${PGTESTS_ADMINDB=$PGTESTS_ADMIN}
: ${PGTESTS_ADMINPASS=$PGTESTS_ADMIN}
: ${PGTESTS_PORT=`try_random_port`}
: ${PGTESTS_SOCKETDIR=/tmp}
: ${PGTESTS_USERS=test:test}
: ${PGTESTS_DATABASES=test:test}
# Stop the old cluster and/or remove it's data.
: ${PGTESTS_STARTCLEANUP=:}
# Cleanup once we exit the script.
: ${PGTESTS_CLEANUP=:}
# Cleanup once we exit the script.
: ${PGTESTS_CLEANUP=:}
export PGPORT=$PGTESTS_PORT
export PGHOST=$PGTESTS_SOCKETDIR
:
warning ()
{
echo >&2 " ! $*"
}
__trap_cb ()
{
IFS=' '
for __func in $__TRAP_ACTIONS
do
$__func
done
}
trap __trap_cb EXIT
__pgtests_initdb ()
{
initdb "$PGTESTS_DATADIR" -U "$PGTESTS_ADMIN" \
--auth-local=peer --auth-host=md5 \
${PGTESTS_LOCALE+--locale="$PGTESTS_LOCALE"}
}
__pgtests_start ()
{
pg_ctl -D "$PGTESTS_DATADIR" -l "$PGTESTS_DATADIR"/start.log start \
-o "-k $PGTESTS_SOCKETDIR -p $PGTESTS_PORT" -w || {
cat >&2 "$PGTESTS_DATADIR"/start.log
false
}
}
__pgtests_create_admins_db ()
{
createdb -h "$PGTESTS_SOCKETDIR" "$PGTESTS_ADMINDB" --owner "$PGTESTS_ADMIN" -p "$PGTESTS_PORT"
}
__pgtests_passwd()
{
psql -d postgres --set=user="$1" --set=pass="$2" -tA \
<<<"ALTER USER :\"user\" WITH ENCRYPTED PASSWORD :'pass';"
}
pgtests_start ()
{
unset __TRAP_ACTIONS
if $PGTESTS_STARTCLEANUP; then
# We don't plan to be serious here. This pgtests_* effort is just to
# ease _testing_ against running postgresql server without too much
# writing.
if test -f "$PGTESTS_DATADIR"/postmaster.pid; then
# Give it a try.
warning "Seems like server works, trying to stop."
pg_ctl stop -D "$PGTESTS_DATADIR" -w
fi
# Cleanup testing directory
if test -e "$PGTESTS_DATADIR"; then
warning "Removing old data directory."
rm -r "$PGTESTS_DATADIR"
fi
fi
__pgtests_initdb
__TRAP_ACTIONS="pgtests_cleanup $__TRAP_ACTIONS"
__pgtests_start
__TRAP_ACTIONS="pgtests_stop $__TRAP_ACTIONS"
__pgtests_create_admins_db
__pgtests_passwd "$PGTESTS_ADMIN" "$PGTESTS_ADMINPASS"
for _pgt_user in $PGTESTS_USERS
do
save_IFS=$IFS
IFS=:
_user=
_pass=
for _part in $_pgt_user
do
if test -z "$_user"; then
_user=$_part
else
_pass=$_part
fi
done
createuser "$_user"
__pgtests_passwd "$_user" "$_pass"
IFS=$save_IFS
done
for _pgt_db in $PGTESTS_DATABASES
do
save_IFS=$IFS
IFS=:
_db=
_user=
for _part in $_pgt_db
do
if test -z "$_user"; then
_user=$_part
else
_db=$_part
fi
done
createdb "$_db" --owner "$_part"
IFS=$save_IFS
done
}
__clean_trap_action ()
{
__new_actions=
for __action in $__TRAP_ACTIONS
do
if test "$__action" = "$1"; then
:
else
__new_actions="$__action $__new_actions"
fi
done
__TRAP_ACTIONS=$__new_actions
}
pgtests_cleanup ()
{
if $PGTESTS_CLEANUP && $PGTESTS_AUTOSTOP; then
rm -r "$PGTESTS_DATADIR"
fi
__clean_trap_action pgtests_cleanup
}
pgtests_stop ()
{
if $PGTESTS_AUTOSTOP; then
pg_ctl stop -D "$PGTESTS_DATADIR" -w
fi
__clean_trap_action pgtests_stop
}