From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 59175 invoked by alias); 27 Apr 2017 08:05:13 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Received: (qmail 59105 invoked by uid 89); 27 Apr 2017 08:05:12 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-25.4 required=5.0 tests=AWL,BAYES_00,GIT_PATCH_0,GIT_PATCH_1,GIT_PATCH_2,GIT_PATCH_3,RCVD_IN_DNSWL_LOW,RP_MATCHES_RCVD,SPF_PASS autolearn=ham version=3.3.2 spammy= X-HELO: sasl.smtp.pobox.com Received: from pb-smtp2.pobox.com (HELO sasl.smtp.pobox.com) (64.147.108.71) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Thu, 27 Apr 2017 08:05:05 +0000 Received: from sasl.smtp.pobox.com (unknown [127.0.0.1]) by pb-smtp2.pobox.com (Postfix) with ESMTP id CA23F883B4; Thu, 27 Apr 2017 04:05:05 -0400 (EDT) Received: from pb-smtp2.nyi.icgroup.com (unknown [127.0.0.1]) by pb-smtp2.pobox.com (Postfix) with ESMTP id BB620883B2; Thu, 27 Apr 2017 04:05:05 -0400 (EDT) Received: from localhost.localdomain (unknown [76.215.41.237]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by pb-smtp2.pobox.com (Postfix) with ESMTPSA id 0567D883AA; Thu, 27 Apr 2017 04:05:02 -0400 (EDT) From: Daniel Santos To: gcc-patches , Uros Bizjak , Jan Hubicka Subject: [PATCH 08/12] [i386] Modify ix86_compute_frame_layout for -mcall-ms2sysv-xlogues Date: Thu, 27 Apr 2017 08:05:00 -0000 Message-Id: <20170427080932.11703-8-daniel.santos@pobox.com> In-Reply-To: <49e81c0b-07a4-22df-d7c3-2439177ac7cf@pobox.com> References: <49e81c0b-07a4-22df-d7c3-2439177ac7cf@pobox.com> X-Pobox-Relay-ID: 385761A8-2B20-11E7-BAFD-C260AE2156B6-06139138!pb-smtp2.pobox.com X-IsSubscribed: yes X-SW-Source: 2017-04/txt/msg01349.txt.bz2 ix86_compute_frame_layout will now populate fields added to structs machine_function and ix86_frame and modify the frame layout specifics to facilitate the use of save & restore stubs. This is also where we init stub_managed_regs to track which register saves & restores are being managed by the out-of-line stub and which are being managed inline, as it is possible to have registers being managed both inline and out-of-line when inline asm explicitly clobbers a register. Signed-off-by: Daniel Santos --- gcc/config/i386/i386.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 90 insertions(+), 4 deletions(-) diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 4f0cb7dd6cc..debfe457d97 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -2715,12 +2715,29 @@ struct GTY(()) stack_local_entry { saved frame pointer if frame_pointer_needed <- HARD_FRAME_POINTER [saved regs] - <- regs_save_offset + <- reg_save_offset [padding0] <- stack_realign_offset [saved SSE regs] + OR + [stub-saved registers for ms x64 --> sysv clobbers + <- Start of out-of-line, stub-saved/restored regs + (see libgcc/config/i386/(sav|res)ms64*.S) + [XMM6-15] + [RSI] + [RDI] + [?RBX] only if RBX is clobbered + [?RBP] only if RBP and RBX are clobbered + [?R12] only if R12 and all previous regs are clobbered + [?R13] only if R13 and all previous regs are clobbered + [?R14] only if R14 and all previous regs are clobbered + [?R15] only if R15 and all previous regs are clobbered + <- end of stub-saved/restored regs + [padding1] + ] + <- outlined_save_offset <- sse_regs_save_offset - [padding1] | + [padding2] | <- FRAME_POINTER [va_arg registers] | | @@ -2745,6 +2762,7 @@ struct ix86_frame HOST_WIDE_INT reg_save_offset; HOST_WIDE_INT stack_realign_allocate_offset; HOST_WIDE_INT stack_realign_offset; + HOST_WIDE_INT outlined_save_offset; HOST_WIDE_INT sse_reg_save_offset; /* When save_regs_using_mov is set, emit prologue using @@ -12802,6 +12820,15 @@ ix86_builtin_setjmp_frame_value (void) return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx; } +/* Disables out-of-lined msabi to sysv pro/epilogues and emits a warning if + warn_once is null, or *warn_once is zero. */ +static void disable_call_ms2sysv_xlogues (const char *feature) +{ + cfun->machine->call_ms2sysv = false; + warning (OPT_mcall_ms2sysv_xlogues, "not currently compatible with %s.", + feature); +} + /* When using -fsplit-stack, the allocation routines set a field in the TCB to the bottom of the stack plus this much space, measured in bytes. */ @@ -12820,9 +12847,50 @@ ix86_compute_frame_layout (struct ix86_frame *frame) HOST_WIDE_INT size = get_frame_size (); HOST_WIDE_INT to_allocate; + CLEAR_HARD_REG_SET (stub_managed_regs); + + /* m->call_ms2sysv is initially enabled in ix86_expand_call for all 64-bit + * ms_abi functions that call a sysv function. We now need to prune away + * cases where it should be disabled. */ + if (TARGET_64BIT && m->call_ms2sysv) + { + gcc_assert (TARGET_64BIT_MS_ABI); + gcc_assert (TARGET_CALL_MS2SYSV_XLOGUES); + gcc_assert (!TARGET_SEH); + + if (!TARGET_SSE) + m->call_ms2sysv = false; + + /* Don't break hot-patched functions. */ + else if (ix86_function_ms_hook_prologue (current_function_decl)) + m->call_ms2sysv = false; + + /* TODO: Cases not yet examined. */ + else if (crtl->calls_eh_return) + disable_call_ms2sysv_xlogues ("__builtin_eh_return"); + + else if (ix86_static_chain_on_stack) + disable_call_ms2sysv_xlogues ("static call chains"); + + else if (ix86_using_red_zone ()) + disable_call_ms2sysv_xlogues ("red zones"); + + else if (flag_split_stack) + disable_call_ms2sysv_xlogues ("split stack"); + + /* Finally, compute which registers the stub will manage. */ + else + { + unsigned count = xlogue_layout + ::compute_stub_managed_regs (stub_managed_regs); + m->call_ms2sysv_extra_regs = count - xlogue_layout::MIN_REGS; + } + } + frame->nregs = ix86_nsaved_regs (); frame->nsseregs = ix86_nsaved_sseregs (); - CLEAR_HARD_REG_SET (stub_managed_regs); + m->call_ms2sysv_pad_in = 0; + m->call_ms2sysv_pad_out = 0; /* 64-bit MS ABI seem to require stack alignment to be always 16, except for function prologues, leaf functions and when the defult @@ -12926,8 +12994,26 @@ ix86_compute_frame_layout (struct ix86_frame *frame) offset = ROUND_UP (offset, stack_alignment_needed); frame->stack_realign_offset = offset; + if (TARGET_64BIT && m->call_ms2sysv) + { + gcc_assert (stack_alignment_needed >= 16); + gcc_assert (!frame->nsseregs); + + m->call_ms2sysv_pad_in = !!(offset & UNITS_PER_WORD); + + /* Select an appropriate layout for incoming stack offset. */ + const struct xlogue_layout &xlogue = xlogue_layout::get_instance (); + + if ((offset + xlogue.get_stack_space_used ()) & UNITS_PER_WORD) + m->call_ms2sysv_pad_out = 1; + + offset += xlogue.get_stack_space_used (); + gcc_assert (!(offset & 0xf)); + frame->outlined_save_offset = offset; + } + /* Align and set SSE register save area. */ - if (frame->nsseregs) + else if (frame->nsseregs) { /* The only ABI that has saved SSE registers (Win64) also has a 16-byte aligned default stack. However, many programs violate -- 2.11.0