gcc: merge upstream fix for PR 83496 (fixes FS#814)
[openwrt/openwrt.git] / toolchain / gcc / patches / 7.3.0 / 100-PR-rtl-optimization-83496.patch
1 From: ebotcazou <ebotcazou@138bc75d-0d04-0410-961f-82ee72b054a4>
2 Date: Mon, 26 Feb 2018 16:29:30 +0000
3 Subject: [PATCH] PR rtl-optimization/83496 * reorg.c
4 (steal_delay_list_from_target): Change REDUNDANT array from booleans to
5 RTXes. Call fix_reg_dead_note on every non-null element.
6 (steal_delay_list_from_fallthrough): Call fix_reg_dead_note on a
7 redundant insn, if any. (relax_delay_slots): Likewise.
8 (update_reg_unused_notes): Rename REDUNDANT_INSN to OTHER_INSN.
9
10 git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@257996 138bc75d-0d04-0410-961f-82ee72b054a4
11 ---
12 create mode 120000 gcc/testsuite/gcc.c-torture/execute/20180226-1.c
13
14 --- a/gcc/reorg.c
15 +++ b/gcc/reorg.c
16 @@ -1035,7 +1035,8 @@ check_annul_list_true_false (int annul_t
17
18 static void
19 steal_delay_list_from_target (rtx_insn *insn, rtx condition, rtx_sequence *seq,
20 - vec<rtx_insn *> *delay_list, resources *sets,
21 + vec<rtx_insn *> *delay_list,
22 + struct resources *sets,
23 struct resources *needed,
24 struct resources *other_needed,
25 int slots_to_fill, int *pslots_filled,
26 @@ -1048,7 +1049,7 @@ steal_delay_list_from_target (rtx_insn *
27 int used_annul = 0;
28 int i;
29 struct resources cc_set;
30 - bool *redundant;
31 + rtx_insn **redundant;
32
33 /* We can't do anything if there are more delay slots in SEQ than we
34 can handle, or if we don't know that it will be a taken branch.
35 @@ -1087,7 +1088,7 @@ steal_delay_list_from_target (rtx_insn *
36 if (! targetm.can_follow_jump (insn, seq->insn (0)))
37 return;
38
39 - redundant = XALLOCAVEC (bool, XVECLEN (seq, 0));
40 + redundant = XALLOCAVEC (rtx_insn *, XVECLEN (seq, 0));
41 for (i = 1; i < seq->len (); i++)
42 {
43 rtx_insn *trial = seq->insn (i);
44 @@ -1151,7 +1152,10 @@ steal_delay_list_from_target (rtx_insn *
45 we therefore decided not to copy. */
46 for (i = 1; i < seq->len (); i++)
47 if (redundant[i])
48 - update_block (seq->insn (i), insn);
49 + {
50 + fix_reg_dead_note (redundant[i], insn);
51 + update_block (seq->insn (i), insn);
52 + }
53
54 /* Show the place to which we will be branching. */
55 *pnew_thread = first_active_target_insn (JUMP_LABEL (seq->insn (0)));
56 @@ -1198,6 +1202,7 @@ steal_delay_list_from_fallthrough (rtx_i
57 for (i = 1; i < seq->len (); i++)
58 {
59 rtx_insn *trial = seq->insn (i);
60 + rtx_insn *prior_insn;
61
62 /* If TRIAL sets CC0, stealing it will move it too far from the use
63 of CC0. */
64 @@ -1209,8 +1214,9 @@ steal_delay_list_from_fallthrough (rtx_i
65 break;
66
67 /* If this insn was already done, we don't need it. */
68 - if (redundant_insn (trial, insn, *delay_list))
69 + if ((prior_insn = redundant_insn (trial, insn, *delay_list)))
70 {
71 + fix_reg_dead_note (prior_insn, insn);
72 update_block (trial, insn);
73 delete_from_delay_slot (trial);
74 continue;
75 @@ -1790,15 +1796,14 @@ fix_reg_dead_note (rtx_insn *start_insn,
76 }
77 }
78
79 -/* Delete any REG_UNUSED notes that exist on INSN but not on REDUNDANT_INSN.
80 +/* Delete any REG_UNUSED notes that exist on INSN but not on OTHER_INSN.
81
82 This handles the case of udivmodXi4 instructions which optimize their
83 - output depending on whether any REG_UNUSED notes are present.
84 - we must make sure that INSN calculates as many results as REDUNDANT_INSN
85 - does. */
86 + output depending on whether any REG_UNUSED notes are present. We must
87 + make sure that INSN calculates as many results as OTHER_INSN does. */
88
89 static void
90 -update_reg_unused_notes (rtx_insn *insn, rtx redundant_insn)
91 +update_reg_unused_notes (rtx_insn *insn, rtx other_insn)
92 {
93 rtx link, next;
94
95 @@ -1810,8 +1815,7 @@ update_reg_unused_notes (rtx_insn *insn,
96 || !REG_P (XEXP (link, 0)))
97 continue;
98
99 - if (! find_regno_note (redundant_insn, REG_UNUSED,
100 - REGNO (XEXP (link, 0))))
101 + if (!find_regno_note (other_insn, REG_UNUSED, REGNO (XEXP (link, 0))))
102 remove_note (insn, link);
103 }
104 }
105 @@ -2324,9 +2328,8 @@ follow_jumps (rtx label, rtx_insn *jump,
106 taken and THREAD_IF_TRUE is set. This is used for the branch at the
107 end of a loop back up to the top.
108
109 - OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the
110 - thread. I.e., it is the fallthrough code of our jump or the target of the
111 - jump when we are the only jump going there.
112 + OWN_THREAD is true if we are the only user of the thread, i.e. it is
113 + the target of the jump when we are the only jump going there.
114
115 If OWN_THREAD is false, it must be the "true" thread of a jump. In that
116 case, we can only take insns from the head of the thread for our delay
117 @@ -3117,7 +3120,7 @@ relax_delay_slots (rtx_insn *first)
118 /* Look at every JUMP_INSN and see if we can improve it. */
119 for (insn = first; insn; insn = next)
120 {
121 - rtx_insn *other;
122 + rtx_insn *other, *prior_insn;
123 bool crossing;
124
125 next = next_active_insn (insn);
126 @@ -3223,8 +3226,9 @@ relax_delay_slots (rtx_insn *first)
127 /* See if the first insn in the delay slot is redundant with some
128 previous insn. Remove it from the delay slot if so; then set up
129 to reprocess this insn. */
130 - if (redundant_insn (pat->insn (1), delay_insn, vNULL))
131 + if ((prior_insn = redundant_insn (pat->insn (1), delay_insn, vNULL)))
132 {
133 + fix_reg_dead_note (prior_insn, insn);
134 update_block (pat->insn (1), insn);
135 delete_from_delay_slot (pat->insn (1));
136 next = prev_active_insn (next);