!14 Small loop unrolling in O2.

From: @github-27907959 
Reviewed-by: @li-yancheng, @eastb233 
Signed-off-by: @eastb233
This commit is contained in:
openeuler-ci-bot 2022-12-07 01:57:29 +00:00 committed by Gitee
commit eb34b0fb56
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
3 changed files with 723 additions and 1 deletions

View File

@ -0,0 +1,481 @@
From 6c977a4e458eab0dd7684b143baf72240b96fda8 Mon Sep 17 00:00:00 2001
From: Hongyu Wang <hongyu.wang@intel.com>
Date: Thu, 8 Sep 2022 16:52:02 +0800
Subject: [PATCH 4/5] Enable small loop unrolling for O2
Modern processors has multiple way instruction decoders
For x86, icelake/zen3 has 5 uops, so for small loop with <= 4
instructions (usually has 3 uops with a cmp/jmp pair that can be
macro-fused), the decoder would have 2 uops bubble for each iteration
and the pipeline could not be fully utilized.
Therefore, this patch enables loop unrolling for small size loop at O2
to fullfill the decoder as much as possible. It turns on rtl loop
unrolling when targetm.loop_unroll_adjust exists and O2 plus speed only.
In x86 backend the default behavior is to unroll small loops with less
than 4 insns by 1 time.
This improves 548.exchange2 by 9% on icelake and 7.4% on zen3 with
0.9% codesize increment. For other benchmarks the variants are minor
and overall codesize increased by 0.2%.
The kernel image size increased by 0.06%, and no impact on eembc.
gcc/ChangeLog:
* common/config/i386/i386-common.cc (ix86_optimization_table):
Enable small loop unroll at O2 by default.
* config/i386/i386.cc (ix86_loop_unroll_adjust): Adjust unroll
factor if -munroll-only-small-loops enabled and -funroll-loops/
-funroll-all-loops are disabled.
* config/i386/i386.h (struct processor_costs): Add 2 field
small_unroll_ninsns and small_unroll_factor.
* config/i386/i386.opt: Add -munroll-only-small-loops.
* doc/invoke.texi: Document -munroll-only-small-loops.
* loop-init.cc (pass_rtl_unroll_loops::gate): Enable rtl
loop unrolling for -O2-speed and above if target hook
loop_unroll_adjust exists.
(pass_rtl_unroll_loops::execute): Set UAP_UNROLL flag
when target hook loop_unroll_adjust exists.
* config/i386/x86-tune-costs.h: Update all processor costs
with small_unroll_ninsns = 4 and small_unroll_factor = 2.
gcc/testsuite/ChangeLog:
* gcc.dg/guality/loop-1.c: Add additional option
-mno-unroll-only-small-loops.
* gcc.target/i386/pr86270.c: Add -mno-unroll-only-small-loops.
* gcc.target/i386/pr93002.c: Likewise.
---
gcc/common/config/i386/i386-common.cc | 1 +
gcc/config/i386/i386.cc | 18 ++++++++
gcc/config/i386/i386.h | 5 +++
gcc/config/i386/i386.opt | 4 ++
gcc/config/i386/x86-tune-costs.h | 56 +++++++++++++++++++++++++
gcc/doc/invoke.texi | 11 ++++-
gcc/loop-init.cc | 10 +++--
gcc/testsuite/gcc.dg/guality/loop-1.c | 2 +
gcc/testsuite/gcc.target/i386/pr86270.c | 2 +-
gcc/testsuite/gcc.target/i386/pr93002.c | 2 +-
10 files changed, 105 insertions(+), 6 deletions(-)
diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
index 07fdd045f30..e1c1fb07d8a 100644
--- a/gcc/common/config/i386/i386-common.cc
+++ b/gcc/common/config/i386/i386-common.cc
@@ -1687,6 +1687,7 @@ static const struct default_options ix86_option_optimization_table[] =
/* The STC algorithm produces the smallest code at -Os, for x86. */
{ OPT_LEVELS_2_PLUS, OPT_freorder_blocks_algorithm_, NULL,
REORDER_BLOCKS_ALGORITHM_STC },
+ { OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_munroll_only_small_loops, NULL, 1 },
/* Turn off -fschedule-insns by default. It tends to make the
problem with not enough registers even worse. */
{ OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index b16df5b183e..39b2468799c 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -23561,6 +23561,24 @@ ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop)
unsigned i;
unsigned mem_count = 0;
+ /* Unroll small size loop when unroll factor is not explicitly
+ specified. */
+ if (!(flag_unroll_loops
+ || flag_unroll_all_loops
+ || loop->unroll))
+ {
+ nunroll = 1;
+
+ /* Any explicit -f{no-}unroll-{all-}loops turns off
+ -munroll-only-small-loops. */
+ if (ix86_unroll_only_small_loops
+ && !OPTION_SET_P (flag_unroll_loops)
+ && loop->ninsns <= ix86_cost->small_unroll_ninsns)
+ nunroll = ix86_cost->small_unroll_factor;
+
+ return nunroll;
+ }
+
if (!TARGET_ADJUST_UNROLL)
return nunroll;
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index a61c32b8957..421801111a7 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -219,6 +219,11 @@ struct processor_costs {
const char *const align_jump; /* Jump alignment. */
const char *const align_label; /* Label alignment. */
const char *const align_func; /* Function alignment. */
+
+ const unsigned small_unroll_ninsns; /* Insn count limit for small loop
+ to be unrolled. */
+ const unsigned small_unroll_factor; /* Unroll factor for small loop to
+ be unrolled. */
};
extern const struct processor_costs *ix86_cost;
diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
index a6b0e28f238..3d369647bf7 100644
--- a/gcc/config/i386/i386.opt
+++ b/gcc/config/i386/i386.opt
@@ -1214,3 +1214,7 @@ Do not use GOT to access external symbols.
-param=x86-stlf-window-ninsns=
Target Joined UInteger Var(x86_stlf_window_ninsns) Init(64) Param
Instructions number above which STFL stall penalty can be compensated.
+
+munroll-only-small-loops
+Target Var(ix86_unroll_only_small_loops) Init(0) Save
+Enable conservative small loop unrolling.
diff --git a/gcc/config/i386/x86-tune-costs.h b/gcc/config/i386/x86-tune-costs.h
index 017ffa69958..b4303e4e971 100644
--- a/gcc/config/i386/x86-tune-costs.h
+++ b/gcc/config/i386/x86-tune-costs.h
@@ -135,6 +135,8 @@ struct processor_costs ix86_size_cost = {/* costs for tuning for size */
NULL, /* Jump alignment. */
NULL, /* Label alignment. */
NULL, /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* Processor costs (relative to an add) */
@@ -244,6 +246,8 @@ struct processor_costs i386_cost = { /* 386 specific costs */
"4", /* Jump alignment. */
NULL, /* Label alignment. */
"4", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs i486_memcpy[2] = {
@@ -354,6 +358,8 @@ struct processor_costs i486_cost = { /* 486 specific costs */
"16", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs pentium_memcpy[2] = {
@@ -462,6 +468,8 @@ struct processor_costs pentium_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static const
@@ -563,6 +571,8 @@ struct processor_costs lakemont_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
@@ -679,6 +689,8 @@ struct processor_costs pentiumpro_cost = {
"16:11:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs geode_memcpy[2] = {
@@ -786,6 +798,8 @@ struct processor_costs geode_cost = {
NULL, /* Jump alignment. */
NULL, /* Label alignment. */
NULL, /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs k6_memcpy[2] = {
@@ -896,6 +910,8 @@ struct processor_costs k6_cost = {
"32:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"32", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* For some reason, Athlon deals better with REP prefix (relative to loops)
@@ -1007,6 +1023,8 @@ struct processor_costs athlon_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* K8 has optimized REP instruction for medium sized blocks, but for very
@@ -1127,6 +1145,8 @@ struct processor_costs k8_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
@@ -1255,6 +1275,8 @@ struct processor_costs amdfam10_cost = {
"32:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"32", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* BDVER has optimized REP instruction for medium sized blocks, but for
@@ -1376,6 +1398,8 @@ const struct processor_costs bdver_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"11", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
@@ -1529,6 +1553,8 @@ struct processor_costs znver1_cost = {
"16", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* ZNVER2 has optimized REP instruction for medium sized blocks, but for
@@ -1686,6 +1712,8 @@ struct processor_costs znver2_cost = {
"16", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
struct processor_costs znver3_cost = {
@@ -1818,6 +1846,8 @@ struct processor_costs znver3_cost = {
"16", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* skylake_cost should produce code tuned for Skylake familly of CPUs. */
@@ -1942,6 +1972,8 @@ struct processor_costs skylake_cost = {
"16:11:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* icelake_cost should produce code tuned for Icelake family of CPUs.
@@ -2068,6 +2100,8 @@ struct processor_costs icelake_cost = {
"16:11:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* alderlake_cost should produce code tuned for alderlake family of CPUs. */
@@ -2188,6 +2222,8 @@ struct processor_costs alderlake_cost = {
"16:11:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* BTVER1 has optimized REP instruction for medium sized blocks, but for
@@ -2301,6 +2337,8 @@ const struct processor_costs btver1_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"11", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs btver2_memcpy[2] = {
@@ -2411,6 +2449,8 @@ const struct processor_costs btver2_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"11", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs pentium4_memcpy[2] = {
@@ -2520,6 +2560,8 @@ struct processor_costs pentium4_cost = {
NULL, /* Jump alignment. */
NULL, /* Label alignment. */
NULL, /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs nocona_memcpy[2] = {
@@ -2632,6 +2674,8 @@ struct processor_costs nocona_cost = {
NULL, /* Jump alignment. */
NULL, /* Label alignment. */
NULL, /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs atom_memcpy[2] = {
@@ -2742,6 +2786,8 @@ struct processor_costs atom_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs slm_memcpy[2] = {
@@ -2852,6 +2898,8 @@ struct processor_costs slm_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs tremont_memcpy[2] = {
@@ -2976,6 +3024,8 @@ struct processor_costs tremont_cost = {
"16:11:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
static stringop_algs intel_memcpy[2] = {
@@ -3086,6 +3136,8 @@ struct processor_costs intel_cost = {
"16:8:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* Generic should produce code tuned for Core-i7 (and newer chips)
@@ -3205,6 +3257,8 @@ struct processor_costs generic_cost = {
"16:11:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
/* core_cost should produce code tuned for Core familly of CPUs. */
@@ -3331,5 +3385,7 @@ struct processor_costs core_cost = {
"16:11:8", /* Jump alignment. */
"0:0:8", /* Label alignment. */
"16", /* Func alignment. */
+ 4, /* Small unroll limit. */
+ 2, /* Small unroll factor. */
};
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 9ac7f89ebb1..1961cafa2bb 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -1448,7 +1448,8 @@ See RS/6000 and PowerPC Options.
-mgeneral-regs-only -mcall-ms2sysv-xlogues -mrelax-cmpxchg-loop @gol
-mindirect-branch=@var{choice} -mfunction-return=@var{choice} @gol
-mindirect-branch-register -mharden-sls=@var{choice} @gol
--mindirect-branch-cs-prefix -mneeded -mno-direct-extern-access}
+-mindirect-branch-cs-prefix -mneeded -mno-direct-extern-access @gol
+-munroll-only-small-loops}
@emph{x86 Windows Options}
@gccoptlist{-mconsole -mcygwin -mno-cygwin -mdll @gol
@@ -33157,6 +33158,14 @@ treat access to protected symbols as local symbols. The default is
@option{-mno-direct-extern-access} and executable compiled with
@option{-mdirect-extern-access} may not be binary compatible if
protected symbols are used in shared libraries and executable.
+
+@item -munroll-only-small-loops
+@opindex munroll-only-small-loops
+@opindex mno-unroll-only-small-loops
+Controls conservative small loop unrolling. It is default enabled by
+O2, and unrolls loop with less than 4 insns by 1 time. Explicit
+-f[no-]unroll-[all-]loops would disable this flag to avoid any
+unintended unrolling behavior that user does not want.
@end table
@node x86 Windows Options
diff --git a/gcc/loop-init.cc b/gcc/loop-init.cc
index 1e4f6cfd7fb..84336865ef7 100644
--- a/gcc/loop-init.cc
+++ b/gcc/loop-init.cc
@@ -565,9 +565,12 @@ public:
{}
/* opt_pass methods: */
- virtual bool gate (function *)
+ virtual bool gate (function * fun)
{
- return (flag_unroll_loops || flag_unroll_all_loops || cfun->has_unroll);
+ return (flag_unroll_loops || flag_unroll_all_loops || cfun->has_unroll
+ || (targetm.loop_unroll_adjust
+ && optimize >= 2
+ && optimize_function_for_speed_p (fun)));
}
virtual unsigned int execute (function *);
@@ -583,7 +586,8 @@ pass_rtl_unroll_loops::execute (function *fun)
if (dump_file)
df_dump (dump_file);
- if (flag_unroll_loops)
+ if (flag_unroll_loops
+ || targetm.loop_unroll_adjust)
flags |= UAP_UNROLL;
if (flag_unroll_all_loops)
flags |= UAP_UNROLL_ALL;
diff --git a/gcc/testsuite/gcc.dg/guality/loop-1.c b/gcc/testsuite/gcc.dg/guality/loop-1.c
index 1b1f6d32271..a32ea445a3f 100644
--- a/gcc/testsuite/gcc.dg/guality/loop-1.c
+++ b/gcc/testsuite/gcc.dg/guality/loop-1.c
@@ -1,5 +1,7 @@
/* { dg-do run } */
/* { dg-options "-fno-tree-scev-cprop -fno-tree-vectorize -g" } */
+/* { dg-additional-options "-mno-unroll-only-small-loops" { target ia32 } } */
+
#include "../nop.h"
diff --git a/gcc/testsuite/gcc.target/i386/pr86270.c b/gcc/testsuite/gcc.target/i386/pr86270.c
index 81841ef5bd7..cbc9fbb0450 100644
--- a/gcc/testsuite/gcc.target/i386/pr86270.c
+++ b/gcc/testsuite/gcc.target/i386/pr86270.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2" } */
+/* { dg-options "-O2 -mno-unroll-only-small-loops" } */
int *a;
long len;
diff --git a/gcc/testsuite/gcc.target/i386/pr93002.c b/gcc/testsuite/gcc.target/i386/pr93002.c
index 0248fcc00a5..f75a847f75d 100644
--- a/gcc/testsuite/gcc.target/i386/pr93002.c
+++ b/gcc/testsuite/gcc.target/i386/pr93002.c
@@ -1,6 +1,6 @@
/* PR target/93002 */
/* { dg-do compile } */
-/* { dg-options "-O2" } */
+/* { dg-options "-O2 -mno-unroll-only-small-loops" } */
/* { dg-final { scan-assembler-not "cmp\[^\n\r]*-1" } } */
volatile int sink;
--
2.18.2

View File

@ -0,0 +1,231 @@
From 5c07825ca0c34dd946a8cfc0325ddb452d7f65c5 Mon Sep 17 00:00:00 2001
From: Hongyu Wang <hongyu.wang@intel.com>
Date: Sat, 19 Nov 2022 09:38:00 +0800
Subject: [PATCH 5/5] i386: Only enable small loop unrolling in backend [PR
107692]
Followed by the discussion in pr107692, -munroll-only-small-loops
Does not turns on/off -funroll-loops, and current check in
pass_rtl_unroll_loops::gate would cause -fno-unroll-loops do not take
effect. Revert the change about targetm.loop_unroll_adjust and apply
the backend option change to strictly follow the rule that
-funroll-loops takes full control of loop unrolling, and
munroll-only-small-loops just change its behavior to unroll small size
loops.
gcc/ChangeLog:
PR target/107692
* common/config/i386/i386-common.cc (ix86_optimization_table):
Enable loop unroll O2, disable -fweb and -frename-registers
by default.
* config/i386/i386-options.cc
(ix86_override_options_after_change):
Disable small loop unroll when funroll-loops enabled, reset
cunroll_grow_size when it is not explicitly enabled.
(ix86_option_override_internal): Call
ix86_override_options_after_change instead of calling
ix86_recompute_optlev_based_flags and ix86_default_align
separately.
* config/i386/i386.cc (ix86_loop_unroll_adjust): Adjust unroll
factor if -munroll-only-small-loops enabled.
* loop-init.cc (pass_rtl_unroll_loops::gate): Do not enable
loop unrolling for -O2-speed.
(pass_rtl_unroll_loops::execute): Rmove
targetm.loop_unroll_adjust check.
gcc/testsuite/ChangeLog:
PR target/107692
* gcc.dg/guality/loop-1.c: Remove additional option for ia32.
* gcc.target/i386/pr86270.c: Add -fno-unroll-loops.
* gcc.target/i386/pr93002.c: Likewise.
---
gcc/common/config/i386/i386-common.cc | 8 ++++++
gcc/config/i386/i386-options.cc | 34 ++++++++++++++++++++++---
gcc/config/i386/i386.cc | 18 ++++---------
gcc/loop-init.cc | 11 +++-----
gcc/testsuite/gcc.dg/guality/loop-1.c | 2 --
gcc/testsuite/gcc.target/i386/pr86270.c | 2 +-
gcc/testsuite/gcc.target/i386/pr93002.c | 2 +-
7 files changed, 49 insertions(+), 28 deletions(-)
diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
index e1c1fb07d8a..5e777849f91 100644
--- a/gcc/common/config/i386/i386-common.cc
+++ b/gcc/common/config/i386/i386-common.cc
@@ -1687,7 +1687,15 @@ static const struct default_options ix86_option_optimization_table[] =
/* The STC algorithm produces the smallest code at -Os, for x86. */
{ OPT_LEVELS_2_PLUS, OPT_freorder_blocks_algorithm_, NULL,
REORDER_BLOCKS_ALGORITHM_STC },
+
+ /* Turn on -funroll-loops with -munroll-only-small-loops to enable small
+ loop unrolling at -O2. */
+ { OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_funroll_loops, NULL, 1 },
{ OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_munroll_only_small_loops, NULL, 1 },
+ /* Turns off -frename-registers and -fweb which are enabled by
+ funroll-loops. */
+ { OPT_LEVELS_ALL, OPT_frename_registers, NULL, 0 },
+ { OPT_LEVELS_ALL, OPT_fweb, NULL, 0 },
/* Turn off -fschedule-insns by default. It tends to make the
problem with not enough registers even worse. */
{ OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
index 32cc58a764b..b853ff55825 100644
--- a/gcc/config/i386/i386-options.cc
+++ b/gcc/config/i386/i386-options.cc
@@ -1816,8 +1816,37 @@ ix86_recompute_optlev_based_flags (struct gcc_options *opts,
void
ix86_override_options_after_change (void)
{
+ /* Default align_* from the processor table. */
ix86_default_align (&global_options);
+
ix86_recompute_optlev_based_flags (&global_options, &global_options_set);
+
+ /* Disable unrolling small loops when there's explicit
+ -f{,no}unroll-loop. */
+ if ((OPTION_SET_P (flag_unroll_loops))
+ || (OPTION_SET_P (flag_unroll_all_loops)
+ && flag_unroll_all_loops))
+ {
+ if (!OPTION_SET_P (ix86_unroll_only_small_loops))
+ ix86_unroll_only_small_loops = 0;
+ /* Re-enable -frename-registers and -fweb if funroll-loops
+ enabled. */
+ if (!OPTION_SET_P (flag_web))
+ flag_web = flag_unroll_loops;
+ if (!OPTION_SET_P (flag_rename_registers))
+ flag_rename_registers = flag_unroll_loops;
+ /* -fcunroll-grow-size default follws -f[no]-unroll-loops. */
+ if (!OPTION_SET_P (flag_cunroll_grow_size))
+ flag_cunroll_grow_size = flag_unroll_loops
+ || flag_peel_loops
+ || optimize >= 3;
+ }
+ else
+ {
+ if (!OPTION_SET_P (flag_cunroll_grow_size))
+ flag_cunroll_grow_size = flag_peel_loops || optimize >= 3;
+ }
+
}
/* Clear stack slot assignments remembered from previous functions.
@@ -2329,7 +2358,7 @@ ix86_option_override_internal (bool main_args_p,
set_ix86_tune_features (opts, ix86_tune, opts->x_ix86_dump_tunes);
- ix86_recompute_optlev_based_flags (opts, opts_set);
+ ix86_override_options_after_change ();
ix86_tune_cost = processor_cost_table[ix86_tune];
/* TODO: ix86_cost should be chosen at instruction or function granuality
@@ -2360,9 +2389,6 @@ ix86_option_override_internal (bool main_args_p,
|| TARGET_64BIT_P (opts->x_ix86_isa_flags))
opts->x_ix86_regparm = REGPARM_MAX;
- /* Default align_* from the processor table. */
- ix86_default_align (opts);
-
/* Provide default for -mbranch-cost= value. */
SET_OPTION_IF_UNSET (opts, opts_set, ix86_branch_cost,
ix86_tune_cost->branch_cost);
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index 39b2468799c..000415c0e2e 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -23563,20 +23563,12 @@ ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop)
/* Unroll small size loop when unroll factor is not explicitly
specified. */
- if (!(flag_unroll_loops
- || flag_unroll_all_loops
- || loop->unroll))
+ if (ix86_unroll_only_small_loops && !loop->unroll)
{
- nunroll = 1;
-
- /* Any explicit -f{no-}unroll-{all-}loops turns off
- -munroll-only-small-loops. */
- if (ix86_unroll_only_small_loops
- && !OPTION_SET_P (flag_unroll_loops)
- && loop->ninsns <= ix86_cost->small_unroll_ninsns)
- nunroll = ix86_cost->small_unroll_factor;
-
- return nunroll;
+ if (loop->ninsns <= ix86_cost->small_unroll_ninsns)
+ return MIN (nunroll, ix86_cost->small_unroll_factor);
+ else
+ return 1;
}
if (!TARGET_ADJUST_UNROLL)
diff --git a/gcc/loop-init.cc b/gcc/loop-init.cc
index 84336865ef7..ed1b2f6ebab 100644
--- a/gcc/loop-init.cc
+++ b/gcc/loop-init.cc
@@ -565,12 +565,10 @@ public:
{}
/* opt_pass methods: */
- virtual bool gate (function * fun)
+ virtual bool gate (function *)
{
- return (flag_unroll_loops || flag_unroll_all_loops || cfun->has_unroll
- || (targetm.loop_unroll_adjust
- && optimize >= 2
- && optimize_function_for_speed_p (fun)));
+ return (flag_unroll_loops || flag_unroll_all_loops
+ || cfun->has_unroll);
}
virtual unsigned int execute (function *);
@@ -586,8 +584,7 @@ pass_rtl_unroll_loops::execute (function *fun)
if (dump_file)
df_dump (dump_file);
- if (flag_unroll_loops
- || targetm.loop_unroll_adjust)
+ if (flag_unroll_loops)
flags |= UAP_UNROLL;
if (flag_unroll_all_loops)
flags |= UAP_UNROLL_ALL;
diff --git a/gcc/testsuite/gcc.dg/guality/loop-1.c b/gcc/testsuite/gcc.dg/guality/loop-1.c
index a32ea445a3f..1b1f6d32271 100644
--- a/gcc/testsuite/gcc.dg/guality/loop-1.c
+++ b/gcc/testsuite/gcc.dg/guality/loop-1.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
/* { dg-options "-fno-tree-scev-cprop -fno-tree-vectorize -g" } */
-/* { dg-additional-options "-mno-unroll-only-small-loops" { target ia32 } } */
-
#include "../nop.h"
diff --git a/gcc/testsuite/gcc.target/i386/pr86270.c b/gcc/testsuite/gcc.target/i386/pr86270.c
index cbc9fbb0450..98b012caf23 100644
--- a/gcc/testsuite/gcc.target/i386/pr86270.c
+++ b/gcc/testsuite/gcc.target/i386/pr86270.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mno-unroll-only-small-loops" } */
+/* { dg-options "-O2 -fno-unroll-loops" } */
int *a;
long len;
diff --git a/gcc/testsuite/gcc.target/i386/pr93002.c b/gcc/testsuite/gcc.target/i386/pr93002.c
index f75a847f75d..7e2d869e17b 100644
--- a/gcc/testsuite/gcc.target/i386/pr93002.c
+++ b/gcc/testsuite/gcc.target/i386/pr93002.c
@@ -1,6 +1,6 @@
/* PR target/93002 */
/* { dg-do compile } */
-/* { dg-options "-O2 -mno-unroll-only-small-loops" } */
+/* { dg-options "-O2 -fno-unroll-loops" } */
/* { dg-final { scan-assembler-not "cmp\[^\n\r]*-1" } } */
volatile int sink;
--
2.18.2

View File

@ -86,7 +86,7 @@
Summary: Various compilers (C, C++, Objective-C, ...)
Name: %{?scl_prefix}gcc%{gcc_ver}
Version: 12.2.1
Release: 11
Release: 13
# libgcc, libgfortran, libgomp, libstdc++ and crtstuff have
# GCC Runtime Exception.
License: GPLv3+ and GPLv3+ with exceptions and GPLv2+ with exceptions and LGPLv2+ and BSD
@ -138,6 +138,8 @@ Patch0: 0001-change-gcc-version.patch
Patch1: 0002-i386-Add-syscall-to-enable-AMX-for-latest-kernels.patch
Patch2: 0003-Remove-AVX512_VP2INTERSECT-from-PTA_SAPPHIRERAPIDS.patch
Patch3: 0004-Add-attribute-hot-judgement-for-INLINE_HINT_known_ho.patch
Patch4: 0005-Enable-small-loop-unrolling-for-O2.patch
Patch5: 0006-i386-Only-enable-small-loop-unrolling-in-backend-PR-.patch
# On ARM EABI systems, we do want -gnueabi to be part of the
@ -599,6 +601,8 @@ not stable, so plugins must be rebuilt any time GCC is updated.
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
echo '%{vendor} %{version}-%{release}' > gcc/DEV-PHASE
@ -2659,6 +2663,12 @@ end
%doc rpm.doc/changelogs/libcc1/ChangeLog*
%changelog
* Tue Nov 29 2022 Hongyu Wang <hongyu.wang@intel.com> 12.2.1-13
- i386: Only enable small loop unrolling in backend [PR 107692]
* Tue Nov 29 2022 Hongyu Wang <hongyu.wang@intel.com> 12.2.1-12
- Enable small loop unrolling for O2
* Fri Nov 18 2022 Chenxi Mao <chenxi.mao@suse.com> 12.2.1-11
- Support --program-suffix configuration