From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm1-x329.google.com (mail-wm1-x329.google.com [IPv6:2a00:1450:4864:20::329]) by sourceware.org (Postfix) with ESMTPS id EC287392AC0F; Wed, 24 Aug 2022 12:01:16 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org EC287392AC0F Authentication-Results: sourceware.org; dmarc=pass (p=quarantine dis=none) header.from=googlemail.com Authentication-Results: sourceware.org; spf=pass smtp.mailfrom=googlemail.com Received: by mail-wm1-x329.google.com with SMTP id m3-20020a05600c3b0300b003a5e0557150so1883646wms.0; Wed, 24 Aug 2022 05:01:16 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=googlemail.com; s=20210112; h=content-transfer-encoding:mime-version:reply-to:references :in-reply-to:message-id:date:subject:cc:to:from:from:to:cc; bh=DguTS2Q0gv/zg0dqGdK8wDHjNDy1YZwH8XLFuFygJ14=; b=GcEtdlbiJZhNnIbW+YS9CLXRknMkTWR8TV7/MlGmcB9lC+31dkuJ2J1vbmi02/S7dv +mUA03QXY95vCz80jzgq5VabakvR117JC90+XW/twY710hzlJfprDZ/DX5aSyLh/EuLc x5bxBDX6wS3t5BuXM8yNvTVumuxycgOlFXmanERcFI5d/XzIMPdCg8/DEhRCE+seqvt7 5uIpSzhzsz6LXKxXeKt316DF5SidL3OOsUfhDqz9ln06pzIHcTGgfv8F3EOzW4Q7VykS MlltqexrUZF93zG91Va5mkqIdGhMJptikFpLVclniWVLA1FruiaaGrbltPGGzp1t0N1j k2MQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:reply-to:references :in-reply-to:message-id:date:subject:cc:to:from:x-gm-message-state :from:to:cc; bh=DguTS2Q0gv/zg0dqGdK8wDHjNDy1YZwH8XLFuFygJ14=; b=l4qRlSknyInU9q03IZ4/VgCOu03iWQcGyv6FPKwXxB9z8FOUJaV+7Nd7xQ6+MKfz+P 1G86xmGXr67YkvwWmqZqey3NCuRZ+uJOOFVb0/TfltPJq5xo9aEeZ26VPBqOWvs+7atd BYThbCQJT9qJPhREI4l6vY/CPzQPuIRq9YkDuXUb22zta/hbtvJTCJt6m7zBEfuL4lUj E+Tm301uYNAxv+AYEZUy3TcQLu7tWjhnZSMKWiD/DjcblvVQt+L0XFAUj0qiXsESlGek SpB+gcs1WLP5wdlkgRDj+K0mxsJFVHnS5GwcSuFhQJyd/8V8lFikZ40m6nmS3HLZGYMl 7cUA== X-Gm-Message-State: ACgBeo3sTlzIQxfOUiNvRBRG33JQBs2hrx2eqzsvj3IbeGIfrwRkr617 IMoQegvcFcIpXD5ZReZL4FScunTB09c= X-Google-Smtp-Source: AA6agR6Z5jMI1eFaORmfIoiwuKlfGpHc5NVvX9HSBfHBAwuklW3MelCtm9ZoHvNgLxhn0fiOckaiLg== X-Received: by 2002:a05:600c:214d:b0:3a5:ce18:bb71 with SMTP id v13-20020a05600c214d00b003a5ce18bb71mr5253385wml.1.1661342472789; Wed, 24 Aug 2022 05:01:12 -0700 (PDT) Received: from localhost.localdomain ([86.14.124.218]) by smtp.gmail.com with ESMTPSA id cc19-20020a5d5c13000000b0022571d43d32sm1697676wrb.21.2022.08.24.05.01.10 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Wed, 24 Aug 2022 05:01:11 -0700 (PDT) From: herron.philip@googlemail.com X-Google-Original-From: philip.herron@embecosm.com To: gcc-patches@gcc.gnu.org Cc: gcc-rust@gcc.gnu.org, Philip Herron , David Faust Subject: [PATCH Rust front-end v2 29/37] gccrs: HIR to GCC GENERIC lowering Date: Wed, 24 Aug 2022 12:59:48 +0100 Message-Id: <20220824115956.737931-30-philip.herron@embecosm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824115956.737931-1-philip.herron@embecosm.com> References: <20220824115956.737931-1-philip.herron@embecosm.com> Reply-To: philip.herron@embecosm.com MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Spam-Status: No, score=-12.0 required=5.0 tests=BAYES_00,DKIM_SIGNED,DKIM_VALID,DKIM_VALID_AU,DKIM_VALID_EF,FREEMAIL_FROM,GIT_PATCH_0,KAM_SHORT,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE,SPF_PASS,TXREP,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org List-Id: From: Philip Herron This pass walks the HIR crate and turns them into GCC tree's we do not have any Rust specific tree's. We are slowly removing the backend abstraction which was ported over from gccgo in favour of using direct tree's. Note we are porting over the constexpr evaluation from the C++ front-end here. Co-authored-by: David Faust --- gcc/rust/backend/rust-builtins.h | 189 ++ gcc/rust/backend/rust-compile-base.cc | 730 +++++ gcc/rust/backend/rust-compile-base.h | 146 + gcc/rust/backend/rust-compile-block.cc | 158 + gcc/rust/backend/rust-compile-block.h | 211 ++ gcc/rust/backend/rust-compile-context.cc | 146 + gcc/rust/backend/rust-compile-context.h | 343 ++ gcc/rust/backend/rust-compile-expr.cc | 2764 +++++++++++++++++ gcc/rust/backend/rust-compile-expr.h | 148 + gcc/rust/backend/rust-compile-extern.h | 172 + gcc/rust/backend/rust-compile-fnparam.cc | 121 + gcc/rust/backend/rust-compile-fnparam.h | 70 + gcc/rust/backend/rust-compile-implitem.cc | 101 + gcc/rust/backend/rust-compile-implitem.h | 91 + gcc/rust/backend/rust-compile-intrinsic.cc | 515 +++ gcc/rust/backend/rust-compile-intrinsic.h | 40 + gcc/rust/backend/rust-compile-item.cc | 206 ++ gcc/rust/backend/rust-compile-item.h | 88 + gcc/rust/backend/rust-compile-pattern.cc | 333 ++ gcc/rust/backend/rust-compile-pattern.h | 95 + gcc/rust/backend/rust-compile-resolve-path.cc | 301 ++ gcc/rust/backend/rust-compile-resolve-path.h | 73 + gcc/rust/backend/rust-compile-stmt.cc | 115 + gcc/rust/backend/rust-compile-stmt.h | 69 + .../backend/rust-compile-struct-field-expr.cc | 81 + .../backend/rust-compile-struct-field-expr.h | 46 + gcc/rust/backend/rust-compile-type.cc | 713 +++++ gcc/rust/backend/rust-compile-type.h | 79 + gcc/rust/backend/rust-compile-var-decl.h | 95 + gcc/rust/backend/rust-compile.cc | 414 +++ gcc/rust/backend/rust-compile.h | 47 + gcc/rust/backend/rust-constexpr.cc | 441 +++ gcc/rust/backend/rust-constexpr.h | 31 + gcc/rust/backend/rust-mangle.cc | 307 ++ gcc/rust/backend/rust-mangle.h | 52 + gcc/rust/backend/rust-tree.cc | 958 ++++++ gcc/rust/backend/rust-tree.h | 508 +++ gcc/rust/rust-backend.h | 506 +++ gcc/rust/rust-gcc.cc | 2717 ++++++++++++++++ 39 files changed, 14220 insertions(+) create mode 100644 gcc/rust/backend/rust-builtins.h create mode 100644 gcc/rust/backend/rust-compile-base.cc create mode 100644 gcc/rust/backend/rust-compile-base.h create mode 100644 gcc/rust/backend/rust-compile-block.cc create mode 100644 gcc/rust/backend/rust-compile-block.h create mode 100644 gcc/rust/backend/rust-compile-context.cc create mode 100644 gcc/rust/backend/rust-compile-context.h create mode 100644 gcc/rust/backend/rust-compile-expr.cc create mode 100644 gcc/rust/backend/rust-compile-expr.h create mode 100644 gcc/rust/backend/rust-compile-extern.h create mode 100644 gcc/rust/backend/rust-compile-fnparam.cc create mode 100644 gcc/rust/backend/rust-compile-fnparam.h create mode 100644 gcc/rust/backend/rust-compile-implitem.cc create mode 100644 gcc/rust/backend/rust-compile-implitem.h create mode 100644 gcc/rust/backend/rust-compile-intrinsic.cc create mode 100644 gcc/rust/backend/rust-compile-intrinsic.h create mode 100644 gcc/rust/backend/rust-compile-item.cc create mode 100644 gcc/rust/backend/rust-compile-item.h create mode 100644 gcc/rust/backend/rust-compile-pattern.cc create mode 100644 gcc/rust/backend/rust-compile-pattern.h create mode 100644 gcc/rust/backend/rust-compile-resolve-path.cc create mode 100644 gcc/rust/backend/rust-compile-resolve-path.h create mode 100644 gcc/rust/backend/rust-compile-stmt.cc create mode 100644 gcc/rust/backend/rust-compile-stmt.h create mode 100644 gcc/rust/backend/rust-compile-struct-field-expr.cc create mode 100644 gcc/rust/backend/rust-compile-struct-field-expr.h create mode 100644 gcc/rust/backend/rust-compile-type.cc create mode 100644 gcc/rust/backend/rust-compile-type.h create mode 100644 gcc/rust/backend/rust-compile-var-decl.h create mode 100644 gcc/rust/backend/rust-compile.cc create mode 100644 gcc/rust/backend/rust-compile.h create mode 100644 gcc/rust/backend/rust-constexpr.cc create mode 100644 gcc/rust/backend/rust-constexpr.h create mode 100644 gcc/rust/backend/rust-mangle.cc create mode 100644 gcc/rust/backend/rust-mangle.h create mode 100644 gcc/rust/backend/rust-tree.cc create mode 100644 gcc/rust/backend/rust-tree.h create mode 100644 gcc/rust/rust-backend.h create mode 100644 gcc/rust/rust-gcc.cc diff --git a/gcc/rust/backend/rust-builtins.h b/gcc/rust/backend/rust-builtins.h new file mode 100644 index 00000000000..2bfa6c6cdf7 --- /dev/null +++ b/gcc/rust/backend/rust-builtins.h @@ -0,0 +1,189 @@ +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_BUILTINS_H +#define RUST_BUILTINS_H + +#include "rust-system.h" +#include "tree.h" +#include "langhooks.h" + +namespace Rust { +namespace Compile { + +// https://github.com/rust-lang/rust/blob/master/library/core/src/intrinsics.rs +// https://github.com/rust-lang/rust/blob/master/compiler/rustc_codegen_llvm/src/intrinsic.rs +// https://github.com/Rust-GCC/gccrs/issues/658 +// +// let llvm_name = match name { +// sym::sqrtf32 => "llvm.sqrt.f32", +// sym::sqrtf64 => "llvm.sqrt.f64", +// sym::powif32 => "llvm.powi.f32", +// sym::powif64 => "llvm.powi.f64", +// sym::sinf32 => "llvm.sin.f32", +// sym::sinf64 => "llvm.sin.f64", +// sym::cosf32 => "llvm.cos.f32", +// sym::cosf64 => "llvm.cos.f64", +// sym::powf32 => "llvm.pow.f32", +// sym::powf64 => "llvm.pow.f64", +// sym::expf32 => "llvm.exp.f32", +// sym::expf64 => "llvm.exp.f64", +// sym::exp2f32 => "llvm.exp2.f32", +// sym::exp2f64 => "llvm.exp2.f64", +// sym::logf32 => "llvm.log.f32", +// sym::logf64 => "llvm.log.f64", +// sym::log10f32 => "llvm.log10.f32", +// sym::log10f64 => "llvm.log10.f64", +// sym::log2f32 => "llvm.log2.f32", +// sym::log2f64 => "llvm.log2.f64", +// sym::fmaf32 => "llvm.fma.f32", +// sym::fmaf64 => "llvm.fma.f64", +// sym::fabsf32 => "llvm.fabs.f32", +// sym::fabsf64 => "llvm.fabs.f64", +// sym::minnumf32 => "llvm.minnum.f32", +// sym::minnumf64 => "llvm.minnum.f64", +// sym::maxnumf32 => "llvm.maxnum.f32", +// sym::maxnumf64 => "llvm.maxnum.f64", +// sym::copysignf32 => "llvm.copysign.f32", +// sym::copysignf64 => "llvm.copysign.f64", +// sym::floorf32 => "llvm.floor.f32", +// sym::floorf64 => "llvm.floor.f64", +// sym::ceilf32 => "llvm.ceil.f32", +// sym::ceilf64 => "llvm.ceil.f64", +// sym::truncf32 => "llvm.trunc.f32", +// sym::truncf64 => "llvm.trunc.f64", +// sym::rintf32 => "llvm.rint.f32", +// sym::rintf64 => "llvm.rint.f64", +// sym::nearbyintf32 => "llvm.nearbyint.f32", +// sym::nearbyintf64 => "llvm.nearbyint.f64", +// sym::roundf32 => "llvm.round.f32", +// sym::roundf64 => "llvm.round.f64", +// _ => return None, +// }; +// Some(cx.get_intrinsic(&llvm_name)) +class BuiltinsContext +{ +public: + static BuiltinsContext &get () + { + static BuiltinsContext instance; + return instance; + } + + bool lookup_simple_builtin (const std::string &name, tree *builtin) + { + auto it = rust_intrinsic_to_gcc_builtin.find (name); + if (it == rust_intrinsic_to_gcc_builtin.end ()) + return false; + + return lookup_gcc_builtin (it->second, builtin); + } + +private: + static const int builtin_const = 1 << 0; + static const int builtin_noreturn = 1 << 1; + static const int builtin_novops = 1 << 2; + + BuiltinsContext () { setup (); } + + void setup () + { + tree math_function_type_f32 + = build_function_type_list (float_type_node, float_type_node, NULL_TREE); + + define_builtin ("sinf32", BUILT_IN_SINF, "__builtin_sinf", "sinf", + math_function_type_f32, builtin_const); + + define_builtin ("sqrtf32", BUILT_IN_SQRTF, "__builtin_sqrtf", "sqrtf", + math_function_type_f32, builtin_const); + + define_builtin ("unreachable", BUILT_IN_UNREACHABLE, + "__builtin_unreachable", NULL, + build_function_type (void_type_node, void_list_node), + builtin_const | builtin_noreturn); + + define_builtin ("abort", BUILT_IN_ABORT, "__builtin_abort", "abort", + build_function_type (void_type_node, void_list_node), + builtin_const | builtin_noreturn); + + define_builtin ("breakpoint", BUILT_IN_TRAP, "__builtin_trap", "breakpoint", + build_function_type (void_type_node, void_list_node), + builtin_const | builtin_noreturn); + + define_builtin ( + "memcpy", BUILT_IN_MEMCPY, "__builtin_memcpy", "memcpy", + build_function_type_list (build_pointer_type (void_type_node), + build_pointer_type (void_type_node), + build_pointer_type (void_type_node), + size_type_node, NULL_TREE), + 0); + } + + // Define a builtin function. BCODE is the builtin function code + // defined by builtins.def. NAME is the name of the builtin function. + // LIBNAME is the name of the corresponding library function, and is + // NULL if there isn't one. FNTYPE is the type of the function. + // CONST_P is true if the function has the const attribute. + // NORETURN_P is true if the function has the noreturn attribute. + void define_builtin (const std::string rust_name, built_in_function bcode, + const char *name, const char *libname, tree fntype, + int flags) + { + tree decl = add_builtin_function (name, fntype, bcode, BUILT_IN_NORMAL, + libname, NULL_TREE); + if ((flags & builtin_const) != 0) + TREE_READONLY (decl) = 1; + if ((flags & builtin_noreturn) != 0) + TREE_THIS_VOLATILE (decl) = 1; + if ((flags & builtin_novops) != 0) + DECL_IS_NOVOPS (decl) = 1; + set_builtin_decl (bcode, decl, true); + this->builtin_functions_[name] = decl; + if (libname != NULL) + { + decl = add_builtin_function (libname, fntype, bcode, BUILT_IN_NORMAL, + NULL, NULL_TREE); + if ((flags & builtin_const) != 0) + TREE_READONLY (decl) = 1; + if ((flags & builtin_noreturn) != 0) + TREE_THIS_VOLATILE (decl) = 1; + if ((flags & builtin_novops) != 0) + DECL_IS_NOVOPS (decl) = 1; + this->builtin_functions_[libname] = decl; + } + + rust_intrinsic_to_gcc_builtin[rust_name] = name; + } + + bool lookup_gcc_builtin (const std::string &name, tree *builtin) + { + auto it = builtin_functions_.find (name); + if (it == builtin_functions_.end ()) + return false; + + *builtin = it->second; + return true; + } + + // A mapping of the GCC built-ins exposed to GCC Rust. + std::map builtin_functions_; + std::map rust_intrinsic_to_gcc_builtin; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_BUILTINS_H diff --git a/gcc/rust/backend/rust-compile-base.cc b/gcc/rust/backend/rust-compile-base.cc new file mode 100644 index 00000000000..2b5c850872f --- /dev/null +++ b/gcc/rust/backend/rust-compile-base.cc @@ -0,0 +1,730 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-base.h" +#include "rust-abi.h" +#include "rust-compile-item.h" +#include "rust-compile-stmt.h" +#include "rust-compile-expr.h" +#include "rust-compile-fnparam.h" +#include "rust-compile-var-decl.h" +#include "rust-constexpr.h" +#include "rust-diagnostics.h" +#include "rust-expr.h" // for AST::AttrInputLiteral +#include "rust-macro.h" // for AST::MetaNameValueStr + +#include "fold-const.h" +#include "stringpool.h" +#include "attribs.h" +#include "tree.h" + +namespace Rust { +namespace Compile { + +bool inline should_mangle_item (const tree fndecl) +{ + return lookup_attribute ("no_mangle", DECL_ATTRIBUTES (fndecl)) == NULL_TREE; +} + +void +HIRCompileBase::setup_fndecl (tree fndecl, bool is_main_entry_point, + bool is_generic_fn, HIR::Visibility &visibility, + const HIR::FunctionQualifiers &qualifiers, + const AST::AttrVec &attrs) +{ + // if its the main fn or pub visibility mark its as DECL_PUBLIC + // please see https://github.com/Rust-GCC/gccrs/pull/137 + bool is_pub = visibility.get_vis_type () == HIR::Visibility::VisType::PUBLIC; + if (is_main_entry_point || (is_pub && !is_generic_fn)) + { + TREE_PUBLIC (fndecl) = 1; + } + + // is it a const fn + if (qualifiers.is_const ()) + { + TREE_READONLY (fndecl) = 1; + } + + // is it inline? + for (const auto &attr : attrs) + { + bool is_inline = attr.get_path ().as_string ().compare ("inline") == 0; + bool is_must_use + = attr.get_path ().as_string ().compare ("must_use") == 0; + bool is_cold = attr.get_path ().as_string ().compare ("cold") == 0; + bool is_link_section + = attr.get_path ().as_string ().compare ("link_section") == 0; + bool no_mangle = attr.get_path ().as_string ().compare ("no_mangle") == 0; + bool is_deprecated + = attr.get_path ().as_string ().compare ("deprecated") == 0; + + if (is_inline) + { + handle_inline_attribute_on_fndecl (fndecl, attr); + } + else if (is_must_use) + { + handle_must_use_attribute_on_fndecl (fndecl, attr); + } + else if (is_cold) + { + handle_cold_attribute_on_fndecl (fndecl, attr); + } + else if (is_link_section) + { + handle_link_section_attribute_on_fndecl (fndecl, attr); + } + else if (is_deprecated) + { + handle_deprecated_attribute_on_fndecl (fndecl, attr); + } + else if (no_mangle) + { + handle_no_mangle_attribute_on_fndecl (fndecl, attr); + } + } +} + +void +HIRCompileBase::handle_cold_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr) +{ + // simple #[cold] + if (!attr.has_attr_input ()) + { + tree cold = get_identifier ("cold"); + // this will get handled by the GCC backend later + DECL_ATTRIBUTES (fndecl) + = tree_cons (cold, NULL_TREE, DECL_ATTRIBUTES (fndecl)); + return; + } + + rust_error_at (attr.get_locus (), + "attribute % does not accept any arguments"); +} + +void +HIRCompileBase::handle_link_section_attribute_on_fndecl ( + tree fndecl, const AST::Attribute &attr) +{ + if (!attr.has_attr_input ()) + { + rust_error_at (attr.get_locus (), + "% expects exactly one argment"); + return; + } + + rust_assert (attr.get_attr_input ().get_attr_input_type () + == AST::AttrInput::AttrInputType::LITERAL); + + auto &literal = static_cast (attr.get_attr_input ()); + const auto &msg_str = literal.get_literal ().as_string (); + + if (decl_section_name (fndecl)) + { + rust_warning_at (attr.get_locus (), 0, "section name redefined"); + } + + set_decl_section_name (fndecl, msg_str.c_str ()); +} + +void +HIRCompileBase::handle_no_mangle_attribute_on_fndecl ( + tree fndecl, const AST::Attribute &attr) +{ + if (attr.has_attr_input ()) + { + rust_error_at (attr.get_locus (), + "attribute % does not accept any arguments"); + return; + } + + DECL_ATTRIBUTES (fndecl) = tree_cons (get_identifier ("no_mangle"), NULL_TREE, + DECL_ATTRIBUTES (fndecl)); +} + +void +HIRCompileBase::handle_deprecated_attribute_on_fndecl ( + tree fndecl, const AST::Attribute &attr) +{ + tree value = NULL_TREE; + TREE_DEPRECATED (fndecl) = 1; + + // simple #[deprecated] + if (!attr.has_attr_input ()) + return; + + const AST::AttrInput &input = attr.get_attr_input (); + auto input_type = input.get_attr_input_type (); + + if (input_type == AST::AttrInput::AttrInputType::LITERAL) + { + // handle #[deprecated = "message"] + auto &literal + = static_cast (attr.get_attr_input ()); + const auto &msg_str = literal.get_literal ().as_string (); + value = build_string (msg_str.size (), msg_str.c_str ()); + } + else if (input_type == AST::AttrInput::AttrInputType::TOKEN_TREE) + { + // handle #[deprecated(since = "...", note = "...")] + const auto &option = static_cast (input); + AST::AttrInputMetaItemContainer *meta_item = option.parse_to_meta_item (); + for (const auto &item : meta_item->get_items ()) + { + auto converted_item = item->to_meta_name_value_str (); + if (!converted_item) + continue; + auto key_value = converted_item->get_name_value_pair (); + if (key_value.first.compare ("since") == 0) + { + // valid, but this is handled by Cargo and some third-party audit + // tools + continue; + } + else if (key_value.first.compare ("note") == 0) + { + const auto &msg_str = key_value.second; + if (value) + rust_error_at (attr.get_locus (), "multiple % items"); + value = build_string (msg_str.size (), msg_str.c_str ()); + } + else + { + rust_error_at (attr.get_locus (), "unknown meta item %qs", + key_value.first.c_str ()); + } + } + } + + if (value) + { + tree attr_list = build_tree_list (NULL_TREE, value); + DECL_ATTRIBUTES (fndecl) + = tree_cons (get_identifier ("deprecated"), attr_list, + DECL_ATTRIBUTES (fndecl)); + } +} + +void +HIRCompileBase::handle_inline_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr) +{ + // simple #[inline] + if (!attr.has_attr_input ()) + { + DECL_DECLARED_INLINE_P (fndecl) = 1; + return; + } + + const AST::AttrInput &input = attr.get_attr_input (); + bool is_token_tree + = input.get_attr_input_type () == AST::AttrInput::AttrInputType::TOKEN_TREE; + rust_assert (is_token_tree); + const auto &option = static_cast (input); + AST::AttrInputMetaItemContainer *meta_item = option.parse_to_meta_item (); + if (meta_item->get_items ().size () != 1) + { + rust_error_at (attr.get_locus (), "invalid number of arguments"); + return; + } + + const std::string inline_option + = meta_item->get_items ().at (0)->as_string (); + + // we only care about NEVER and ALWAYS else its an error + bool is_always = inline_option.compare ("always") == 0; + bool is_never = inline_option.compare ("never") == 0; + + // #[inline(never)] + if (is_never) + { + DECL_UNINLINABLE (fndecl) = 1; + } + // #[inline(always)] + else if (is_always) + { + DECL_DECLARED_INLINE_P (fndecl) = 1; + DECL_ATTRIBUTES (fndecl) = tree_cons (get_identifier ("always_inline"), + NULL, DECL_ATTRIBUTES (fndecl)); + } + else + { + rust_error_at (attr.get_locus (), "unknown inline option"); + } +} + +void +HIRCompileBase::handle_must_use_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr) +{ + tree nodiscard = get_identifier ("nodiscard"); + tree value = NULL_TREE; + + if (attr.has_attr_input ()) + { + rust_assert (attr.get_attr_input ().get_attr_input_type () + == AST::AttrInput::AttrInputType::LITERAL); + + auto &literal + = static_cast (attr.get_attr_input ()); + const auto &msg_str = literal.get_literal ().as_string (); + tree message = build_string (msg_str.size (), msg_str.c_str ()); + + value = tree_cons (nodiscard, message, NULL_TREE); + } + + DECL_ATTRIBUTES (fndecl) + = tree_cons (nodiscard, value, DECL_ATTRIBUTES (fndecl)); +} + +void +HIRCompileBase::setup_abi_options (tree fndecl, ABI abi) +{ + tree abi_tree = NULL_TREE; + + switch (abi) + { + case Rust::ABI::RUST: + case Rust::ABI::INTRINSIC: + case Rust::ABI::C: + case Rust::ABI::CDECL: + // `decl_attributes` function (not the macro) has the side-effect of + // actually switching the codegen backend to use the ABI we annotated. + // However, since `cdecl` is the default ABI GCC will be using, explicitly + // specifying that ABI will cause GCC to emit a warning saying the + // attribute is useless (which is confusing to the user as the attribute + // is added by us). + DECL_ATTRIBUTES (fndecl) + = tree_cons (get_identifier ("cdecl"), NULL, DECL_ATTRIBUTES (fndecl)); + + return; + + case Rust::ABI::STDCALL: + abi_tree = get_identifier ("stdcall"); + + break; + + case Rust::ABI::FASTCALL: + abi_tree = get_identifier ("fastcall"); + + break; + + case Rust::ABI::SYSV64: + abi_tree = get_identifier ("sysv_abi"); + + break; + + case Rust::ABI::WIN64: + abi_tree = get_identifier ("ms_abi"); + + break; + + default: + break; + } + + decl_attributes (&fndecl, build_tree_list (abi_tree, NULL_TREE), 0); +} + +// ported from gcc/c/c-typecheck.c +// +// Mark EXP saying that we need to be able to take the +// address of it; it should not be allocated in a register. +// Returns true if successful. ARRAY_REF_P is true if this +// is for ARRAY_REF construction - in that case we don't want +// to look through VIEW_CONVERT_EXPR from VECTOR_TYPE to ARRAY_TYPE, +// it is fine to use ARRAY_REFs for vector subscripts on vector +// register variables. +bool +HIRCompileBase::mark_addressable (tree exp, Location locus) +{ + tree x = exp; + + while (1) + switch (TREE_CODE (x)) + { + case VIEW_CONVERT_EXPR: + if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE + && VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (x, 0)))) + return true; + x = TREE_OPERAND (x, 0); + break; + + case COMPONENT_REF: + // TODO + // if (DECL_C_BIT_FIELD (TREE_OPERAND (x, 1))) + // { + // error ("cannot take address of bit-field %qD", TREE_OPERAND (x, + // 1)); return false; + // } + + /* FALLTHRU */ + case ADDR_EXPR: + case ARRAY_REF: + case REALPART_EXPR: + case IMAGPART_EXPR: + x = TREE_OPERAND (x, 0); + break; + + case COMPOUND_LITERAL_EXPR: + TREE_ADDRESSABLE (x) = 1; + TREE_ADDRESSABLE (COMPOUND_LITERAL_EXPR_DECL (x)) = 1; + return true; + + case CONSTRUCTOR: + TREE_ADDRESSABLE (x) = 1; + return true; + + case VAR_DECL: + case CONST_DECL: + case PARM_DECL: + case RESULT_DECL: + // (we don't have a concept of a "register" declaration) + // fallthrough */ + + /* FALLTHRU */ + case FUNCTION_DECL: + TREE_ADDRESSABLE (x) = 1; + + /* FALLTHRU */ + default: + return true; + } + + return false; +} + +tree +HIRCompileBase::address_expression (tree expr, Location location) +{ + if (expr == error_mark_node) + return error_mark_node; + + if (!mark_addressable (expr, location)) + return error_mark_node; + + return build_fold_addr_expr_loc (location.gcc_location (), expr); +} + +tree +HIRCompileBase::indirect_expression (tree expr, Location locus) +{ + if (expr == error_mark_node) + return error_mark_node; + + return build_fold_indirect_ref_loc (locus.gcc_location (), expr); +} + +std::vector +HIRCompileBase::compile_locals_for_block (Context *ctx, Resolver::Rib &rib, + tree fndecl) +{ + std::vector locals; + for (auto it : rib.get_declarations ()) + { + NodeId node_id = it.first; + HirId ref = UNKNOWN_HIRID; + if (!ctx->get_mappings ()->lookup_node_to_hir (node_id, &ref)) + continue; + + // we only care about local patterns + HIR::Pattern *pattern = ctx->get_mappings ()->lookup_hir_pattern (ref); + if (pattern == nullptr) + continue; + + // lookup the type + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (ref, &tyty)) + continue; + + // compile the local + tree type = TyTyResolveCompile::compile (ctx, tyty); + Bvariable *compiled + = CompileVarDecl::compile (fndecl, type, pattern, ctx); + locals.push_back (compiled); + } + return locals; +} + +void +HIRCompileBase::compile_function_body (Context *ctx, tree fndecl, + HIR::BlockExpr &function_body, + bool has_return_type) +{ + for (auto &s : function_body.get_statements ()) + { + auto compiled_expr = CompileStmt::Compile (s.get (), ctx); + if (compiled_expr != nullptr) + { + tree s = convert_to_void (compiled_expr, ICV_STATEMENT); + ctx->add_statement (s); + } + } + + if (function_body.has_expr ()) + { + // the previous passes will ensure this is a valid return + // or a valid trailing expression + tree compiled_expr + = CompileExpr::Compile (function_body.expr.get (), ctx); + + if (compiled_expr != nullptr) + { + if (has_return_type) + { + std::vector retstmts; + retstmts.push_back (compiled_expr); + + auto ret = ctx->get_backend ()->return_statement ( + fndecl, retstmts, + function_body.get_final_expr ()->get_locus ()); + ctx->add_statement (ret); + } + else + { + // FIXME can this actually happen? + ctx->add_statement (compiled_expr); + } + } + } +} + +tree +HIRCompileBase::compile_function ( + Context *ctx, const std::string &fn_name, HIR::SelfParam &self_param, + std::vector &function_params, + const HIR::FunctionQualifiers &qualifiers, HIR::Visibility &visibility, + AST::AttrVec &outer_attrs, Location locus, HIR::BlockExpr *function_body, + const Resolver::CanonicalPath *canonical_path, TyTy::FnType *fntype, + bool function_has_return) +{ + tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype); + std::string ir_symbol_name + = canonical_path->get () + fntype->subst_as_string (); + + // we don't mangle the main fn since we haven't implemented the main shim + bool is_main_fn = fn_name.compare ("main") == 0; + std::string asm_name = fn_name; + + unsigned int flags = 0; + tree fndecl = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name, + "" /* asm_name */, flags, locus); + + setup_fndecl (fndecl, is_main_fn, fntype->has_subsititions_defined (), + visibility, qualifiers, outer_attrs); + setup_abi_options (fndecl, qualifiers.get_abi ()); + + // conditionally mangle the function name + bool should_mangle = should_mangle_item (fndecl); + if (!is_main_fn && should_mangle) + asm_name = ctx->mangle_item (fntype, *canonical_path); + SET_DECL_ASSEMBLER_NAME (fndecl, + get_identifier_with_length (asm_name.data (), + asm_name.length ())); + + // insert into the context + ctx->insert_function_decl (fntype, fndecl); + + // setup the params + TyTy::BaseType *tyret = fntype->get_return_type (); + std::vector param_vars; + if (!self_param.is_error ()) + { + rust_assert (fntype->is_method ()); + TyTy::BaseType *self_tyty_lookup = fntype->get_self_type (); + + tree self_type = TyTyResolveCompile::compile (ctx, self_tyty_lookup); + Bvariable *compiled_self_param + = CompileSelfParam::compile (ctx, fndecl, self_param, self_type, + self_param.get_locus ()); + + param_vars.push_back (compiled_self_param); + ctx->insert_var_decl (self_param.get_mappings ().get_hirid (), + compiled_self_param); + } + + // offset from + 1 for the TyTy::FnType being used when this is a method to + // skip over Self on the FnType + bool is_method = !self_param.is_error (); + size_t i = is_method ? 1 : 0; + for (auto &referenced_param : function_params) + { + auto tyty_param = fntype->param_at (i++); + auto param_tyty = tyty_param.second; + auto compiled_param_type = TyTyResolveCompile::compile (ctx, param_tyty); + + Location param_locus = referenced_param.get_locus (); + Bvariable *compiled_param_var + = CompileFnParam::compile (ctx, fndecl, &referenced_param, + compiled_param_type, param_locus); + + param_vars.push_back (compiled_param_var); + + const HIR::Pattern ¶m_pattern = *referenced_param.get_param_name (); + ctx->insert_var_decl (param_pattern.get_pattern_mappings ().get_hirid (), + compiled_param_var); + } + + if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars)) + return error_mark_node; + + // lookup locals + auto body_mappings = function_body->get_mappings (); + Resolver::Rib *rib = nullptr; + bool ok + = ctx->get_resolver ()->find_name_rib (body_mappings.get_nodeid (), &rib); + rust_assert (ok); + + std::vector locals + = compile_locals_for_block (ctx, *rib, fndecl); + + tree enclosing_scope = NULL_TREE; + Location start_location = function_body->get_locus (); + Location end_location = function_body->get_end_locus (); + + tree code_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals, + start_location, end_location); + ctx->push_block (code_block); + + Bvariable *return_address = nullptr; + if (function_has_return) + { + tree return_type = TyTyResolveCompile::compile (ctx, tyret); + + bool address_is_taken = false; + tree ret_var_stmt = NULL_TREE; + + return_address + = ctx->get_backend ()->temporary_variable (fndecl, code_block, + return_type, NULL, + address_is_taken, locus, + &ret_var_stmt); + + ctx->add_statement (ret_var_stmt); + } + + ctx->push_fn (fndecl, return_address); + compile_function_body (ctx, fndecl, *function_body, function_has_return); + tree bind_tree = ctx->pop_block (); + + gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR); + DECL_SAVED_TREE (fndecl) = bind_tree; + + ctx->pop_fn (); + ctx->push_function (fndecl); + + return fndecl; +} + +tree +HIRCompileBase::compile_constant_item ( + Context *ctx, TyTy::BaseType *resolved_type, + const Resolver::CanonicalPath *canonical_path, HIR::Expr *const_value_expr, + Location locus) +{ + const std::string &ident = canonical_path->get (); + tree type = TyTyResolveCompile::compile (ctx, resolved_type); + tree const_type = build_qualified_type (type, TYPE_QUAL_CONST); + + bool is_block_expr + = const_value_expr->get_expression_type () == HIR::Expr::ExprType::Block; + + // compile the expression + tree folded_expr = error_mark_node; + if (!is_block_expr) + { + tree value = CompileExpr::Compile (const_value_expr, ctx); + folded_expr = fold_expr (value); + } + else + { + // in order to compile a block expr we want to reuse as much existing + // machineary that we already have. This means the best approach is to + // make a _fake_ function with a block so it can hold onto temps then + // use our constexpr code to fold it completely or error_mark_node + Backend::typed_identifier receiver; + tree compiled_fn_type = ctx->get_backend ()->function_type ( + receiver, {}, {Backend::typed_identifier ("_", const_type, locus)}, + NULL, locus); + + tree fndecl + = ctx->get_backend ()->function (compiled_fn_type, ident, "", 0, locus); + TREE_READONLY (fndecl) = 1; + + tree enclosing_scope = NULL_TREE; + HIR::BlockExpr *function_body + = static_cast (const_value_expr); + Location start_location = function_body->get_locus (); + Location end_location = function_body->get_end_locus (); + + tree code_block + = ctx->get_backend ()->block (fndecl, enclosing_scope, {}, + start_location, end_location); + ctx->push_block (code_block); + + bool address_is_taken = false; + tree ret_var_stmt = NULL_TREE; + Bvariable *return_address + = ctx->get_backend ()->temporary_variable (fndecl, code_block, + const_type, NULL, + address_is_taken, locus, + &ret_var_stmt); + + ctx->add_statement (ret_var_stmt); + ctx->push_fn (fndecl, return_address); + + compile_function_body (ctx, fndecl, *function_body, true); + tree bind_tree = ctx->pop_block (); + + gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR); + DECL_SAVED_TREE (fndecl) = bind_tree; + + ctx->pop_fn (); + + // lets fold it into a call expr + tree call = build_call_array_loc (locus.gcc_location (), const_type, + fndecl, 0, NULL); + folded_expr = fold_expr (call); + } + + return named_constant_expression (const_type, ident, folded_expr, locus); +} + +tree +HIRCompileBase::named_constant_expression (tree type_tree, + const std::string &name, + tree const_val, Location location) +{ + if (type_tree == error_mark_node || const_val == error_mark_node) + return error_mark_node; + + tree name_tree = get_identifier_with_length (name.data (), name.length ()); + tree decl + = build_decl (location.gcc_location (), CONST_DECL, name_tree, type_tree); + DECL_INITIAL (decl) = const_val; + TREE_CONSTANT (decl) = 1; + TREE_READONLY (decl) = 1; + + rust_preserve_from_gc (decl); + return decl; +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-base.h b/gcc/rust/backend/rust-compile-base.h new file mode 100644 index 00000000000..4c20933cafc --- /dev/null +++ b/gcc/rust/backend/rust-compile-base.h @@ -0,0 +1,146 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_BASE +#define RUST_COMPILE_BASE + +#include "rust-compile-context.h" +#include "rust-compile-type.h" +#include "rust-hir-visitor.h" +#include "rust-hir-full.h" + +namespace Rust { +namespace Compile { + +class HIRCompileBase +{ +public: + virtual ~HIRCompileBase () {} + +protected: + HIRCompileBase (Context *ctx) : ctx (ctx) {} + + Context *ctx; + +protected: + Context *get_context () { return ctx; } + + tree coercion_site (HirId id, tree rvalue, const TyTy::BaseType *actual, + const TyTy::BaseType *expected, Location lvalue_locus, + Location rvalue_locus); + tree coercion_site1 (tree rvalue, const TyTy::BaseType *actual, + const TyTy::BaseType *expected, Location lvalue_locus, + Location rvalue_locus); + + tree coerce_to_dyn_object (tree compiled_ref, const TyTy::BaseType *actual, + const TyTy::DynamicObjectType *ty, Location locus); + + tree compute_address_for_trait_item ( + const Resolver::TraitItemReference *ref, + const TyTy::TypeBoundPredicate *predicate, + std::vector> + &receiver_bounds, + const TyTy::BaseType *receiver, const TyTy::BaseType *root, Location locus); + + bool verify_array_capacities (tree ltype, tree rtype, Location ltype_locus, + Location rtype_locus); + + tree query_compile (HirId ref, TyTy::BaseType *lookup, + const HIR::PathIdentSegment &final_segment, + const Analysis::NodeMapping &mappings, + Location expr_locus, bool is_qualified_path); + + tree resolve_adjustements (std::vector &adjustments, + tree expression, Location locus); + + tree resolve_deref_adjustment (Resolver::Adjustment &adjustment, + tree expression, Location locus); + + tree resolve_indirection_adjustment (Resolver::Adjustment &adjustment, + tree expression, Location locus); + + tree resolve_unsized_adjustment (Resolver::Adjustment &adjustment, + tree expression, Location locus); + + tree resolve_unsized_slice_adjustment (Resolver::Adjustment &adjustment, + tree expression, Location locus); + + tree resolve_unsized_dyn_adjustment (Resolver::Adjustment &adjustment, + tree expression, Location locus); + + static void setup_fndecl (tree fndecl, bool is_main_entry_point, + bool is_generic_fn, HIR::Visibility &visibility, + const HIR::FunctionQualifiers &qualifiers, + const AST::AttrVec &attrs); + + static void handle_inline_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr); + + static void handle_cold_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr); + + static void handle_must_use_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr); + + static void + handle_link_section_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr); + static void + handle_deprecated_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr); + + static void handle_no_mangle_attribute_on_fndecl (tree fndecl, + const AST::Attribute &attr); + + static void setup_abi_options (tree fndecl, ABI abi); + + static tree address_expression (tree expr, Location locus); + + static tree indirect_expression (tree expr, Location locus); + + static bool mark_addressable (tree, Location); + + static std::vector + compile_locals_for_block (Context *ctx, Resolver::Rib &rib, tree fndecl); + + static void compile_function_body (Context *ctx, tree fndecl, + HIR::BlockExpr &function_body, + bool has_return_type); + + static tree compile_function ( + Context *ctx, const std::string &fn_name, HIR::SelfParam &self_param, + std::vector &function_params, + const HIR::FunctionQualifiers &qualifiers, HIR::Visibility &visibility, + AST::AttrVec &outer_attrs, Location locus, HIR::BlockExpr *function_body, + const Resolver::CanonicalPath *canonical_path, TyTy::FnType *fntype, + bool function_has_return); + + static tree + compile_constant_item (Context *ctx, TyTy::BaseType *resolved_type, + const Resolver::CanonicalPath *canonical_path, + HIR::Expr *const_value_expr, Location locus); + + static tree named_constant_expression (tree type_tree, + const std::string &name, + tree const_val, Location location); +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_BASE diff --git a/gcc/rust/backend/rust-compile-block.cc b/gcc/rust/backend/rust-compile-block.cc new file mode 100644 index 00000000000..99674e2d1e7 --- /dev/null +++ b/gcc/rust/backend/rust-compile-block.cc @@ -0,0 +1,158 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-block.h" +#include "rust-compile-stmt.h" +#include "rust-compile-expr.h" + +namespace Rust { +namespace Compile { + +CompileBlock::CompileBlock (Context *ctx, Bvariable *result) + : HIRCompileBase (ctx), translated (nullptr), result (result) +{} + +tree +CompileBlock::compile (HIR::BlockExpr *expr, Context *ctx, Bvariable *result) +{ + CompileBlock compiler (ctx, result); + compiler.visit (*expr); + return compiler.translated; +} + +void +CompileBlock::visit (HIR::BlockExpr &expr) +{ + fncontext fnctx = ctx->peek_fn (); + tree fndecl = fnctx.fndecl; + Location start_location = expr.get_locus (); + Location end_location = expr.get_end_locus (); + auto body_mappings = expr.get_mappings (); + + Resolver::Rib *rib = nullptr; + if (!ctx->get_resolver ()->find_name_rib (body_mappings.get_nodeid (), &rib)) + { + rust_fatal_error (expr.get_locus (), "failed to setup locals per block"); + return; + } + + std::vector locals + = compile_locals_for_block (ctx, *rib, fndecl); + + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree new_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals, + start_location, end_location); + ctx->push_block (new_block); + + for (auto &s : expr.get_statements ()) + { + auto compiled_expr = CompileStmt::Compile (s.get (), ctx); + if (compiled_expr != nullptr) + { + tree s = convert_to_void (compiled_expr, ICV_STATEMENT); + ctx->add_statement (s); + } + } + + if (expr.has_expr ()) + { + // the previous passes will ensure this is a valid return or + // a valid trailing expression + tree compiled_expr = CompileExpr::Compile (expr.expr.get (), ctx); + if (compiled_expr != nullptr) + { + if (result == nullptr) + { + ctx->add_statement (compiled_expr); + } + else + { + tree result_reference = ctx->get_backend ()->var_expression ( + result, expr.get_final_expr ()->get_locus ()); + + tree assignment + = ctx->get_backend ()->assignment_statement (result_reference, + compiled_expr, + expr.get_locus ()); + ctx->add_statement (assignment); + } + } + } + + ctx->pop_block (); + translated = new_block; +} + +void +CompileConditionalBlocks::visit (HIR::IfExpr &expr) +{ + fncontext fnctx = ctx->peek_fn (); + tree fndecl = fnctx.fndecl; + tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx); + tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result); + + translated + = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block, + NULL, expr.get_locus ()); +} + +void +CompileConditionalBlocks::visit (HIR::IfExprConseqElse &expr) +{ + fncontext fnctx = ctx->peek_fn (); + tree fndecl = fnctx.fndecl; + tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx); + tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result); + tree else_block = CompileBlock::compile (expr.get_else_block (), ctx, result); + + translated + = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block, + else_block, expr.get_locus ()); +} + +void +CompileConditionalBlocks::visit (HIR::IfExprConseqIf &expr) +{ + fncontext fnctx = ctx->peek_fn (); + tree fndecl = fnctx.fndecl; + tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx); + tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result); + + // else block + std::vector locals; + Location start_location = expr.get_conseq_if_expr ()->get_locus (); + Location end_location = expr.get_conseq_if_expr ()->get_locus (); // FIXME + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree else_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals, + start_location, end_location); + ctx->push_block (else_block); + + tree else_stmt_decl + = CompileConditionalBlocks::compile (expr.get_conseq_if_expr (), ctx, + result); + ctx->add_statement (else_stmt_decl); + + ctx->pop_block (); + + translated + = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block, + else_block, expr.get_locus ()); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-block.h b/gcc/rust/backend/rust-compile-block.h new file mode 100644 index 00000000000..cdd17f19ca2 --- /dev/null +++ b/gcc/rust/backend/rust-compile-block.h @@ -0,0 +1,211 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_BLOCK +#define RUST_COMPILE_BLOCK + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class CompileBlock : private HIRCompileBase +{ +public: + static tree compile (HIR::BlockExpr *expr, Context *ctx, Bvariable *result); + +protected: + void visit (HIR::BlockExpr &expr); + +private: + CompileBlock (Context *ctx, Bvariable *result); + + tree translated; + Bvariable *result; +}; + +class CompileConditionalBlocks : public HIRCompileBase, + public HIR::HIRExpressionVisitor +{ +public: + static tree compile (HIR::IfExpr *expr, Context *ctx, Bvariable *result) + { + CompileConditionalBlocks resolver (ctx, result); + expr->accept_vis (resolver); + return resolver.translated; + } + + void visit (HIR::IfExpr &expr) override; + void visit (HIR::IfExprConseqElse &expr) override; + void visit (HIR::IfExprConseqIf &expr) override; + + // Empty visit for unused Expression HIR nodes. + void visit (HIR::PathInExpression &) override {} + void visit (HIR::QualifiedPathInExpression &) override {} + void visit (HIR::ClosureExprInner &) override {} + void visit (HIR::ClosureExprInnerTyped &) override {} + void visit (HIR::StructExprFieldIdentifier &) override {} + void visit (HIR::StructExprFieldIdentifierValue &) override {} + void visit (HIR::StructExprFieldIndexValue &) override {} + void visit (HIR::StructExprStruct &) override {} + void visit (HIR::StructExprStructFields &) override {} + void visit (HIR::LiteralExpr &) override {} + void visit (HIR::BorrowExpr &) override {} + void visit (HIR::DereferenceExpr &) override {} + void visit (HIR::ErrorPropagationExpr &) override {} + void visit (HIR::NegationExpr &) override {} + void visit (HIR::ArithmeticOrLogicalExpr &) override {} + void visit (HIR::ComparisonExpr &) override {} + void visit (HIR::LazyBooleanExpr &) override {} + void visit (HIR::TypeCastExpr &) override {} + void visit (HIR::AssignmentExpr &) override {} + void visit (HIR::CompoundAssignmentExpr &) override {} + void visit (HIR::GroupedExpr &) override {} + void visit (HIR::ArrayExpr &) override {} + void visit (HIR::ArrayIndexExpr &) override {} + void visit (HIR::TupleExpr &) override {} + void visit (HIR::TupleIndexExpr &) override {} + void visit (HIR::CallExpr &) override {} + void visit (HIR::MethodCallExpr &) override {} + void visit (HIR::FieldAccessExpr &) override {} + void visit (HIR::BlockExpr &) override {} + void visit (HIR::ContinueExpr &) override {} + void visit (HIR::BreakExpr &) override {} + void visit (HIR::RangeFromToExpr &) override {} + void visit (HIR::RangeFromExpr &) override {} + void visit (HIR::RangeToExpr &) override {} + void visit (HIR::RangeFullExpr &) override {} + void visit (HIR::RangeFromToInclExpr &) override {} + void visit (HIR::RangeToInclExpr &) override {} + void visit (HIR::ReturnExpr &) override {} + void visit (HIR::UnsafeBlockExpr &) override {} + void visit (HIR::LoopExpr &) override {} + void visit (HIR::WhileLoopExpr &) override {} + void visit (HIR::WhileLetLoopExpr &) override {} + void visit (HIR::ForLoopExpr &) override {} + void visit (HIR::IfExprConseqIfLet &) override {} + void visit (HIR::IfLetExpr &) override {} + void visit (HIR::IfLetExprConseqElse &) override {} + void visit (HIR::IfLetExprConseqIf &) override {} + void visit (HIR::IfLetExprConseqIfLet &) override {} + void visit (HIR::MatchExpr &) override {} + void visit (HIR::AwaitExpr &) override {} + void visit (HIR::AsyncBlockExpr &) override {} + +private: + CompileConditionalBlocks (Context *ctx, Bvariable *result) + : HIRCompileBase (ctx), translated (nullptr), result (result) + {} + + tree translated; + Bvariable *result; +}; + +class CompileExprWithBlock : public HIRCompileBase, + public HIR::HIRExpressionVisitor +{ +public: + static tree compile (HIR::ExprWithBlock *expr, Context *ctx, + Bvariable *result) + { + CompileExprWithBlock resolver (ctx, result); + expr->accept_vis (resolver); + return resolver.translated; + } + + void visit (HIR::IfExpr &expr) override + { + translated = CompileConditionalBlocks::compile (&expr, ctx, result); + } + + void visit (HIR::IfExprConseqElse &expr) override + { + translated = CompileConditionalBlocks::compile (&expr, ctx, result); + } + + void visit (HIR::IfExprConseqIf &expr) override + { + translated = CompileConditionalBlocks::compile (&expr, ctx, result); + } + + // Empty visit for unused Expression HIR nodes. + void visit (HIR::PathInExpression &) override {} + void visit (HIR::QualifiedPathInExpression &) override {} + void visit (HIR::ClosureExprInner &) override {} + void visit (HIR::ClosureExprInnerTyped &) override {} + void visit (HIR::StructExprFieldIdentifier &) override {} + void visit (HIR::StructExprFieldIdentifierValue &) override {} + void visit (HIR::StructExprFieldIndexValue &) override {} + void visit (HIR::StructExprStruct &) override {} + void visit (HIR::StructExprStructFields &) override {} + void visit (HIR::LiteralExpr &) override {} + void visit (HIR::BorrowExpr &) override {} + void visit (HIR::DereferenceExpr &) override {} + void visit (HIR::ErrorPropagationExpr &) override {} + void visit (HIR::NegationExpr &) override {} + void visit (HIR::ArithmeticOrLogicalExpr &) override {} + void visit (HIR::ComparisonExpr &) override {} + void visit (HIR::LazyBooleanExpr &) override {} + void visit (HIR::TypeCastExpr &) override {} + void visit (HIR::AssignmentExpr &) override {} + void visit (HIR::CompoundAssignmentExpr &) override {} + void visit (HIR::GroupedExpr &) override {} + void visit (HIR::ArrayExpr &) override {} + void visit (HIR::ArrayIndexExpr &) override {} + void visit (HIR::TupleExpr &) override {} + void visit (HIR::TupleIndexExpr &) override {} + void visit (HIR::CallExpr &) override {} + void visit (HIR::MethodCallExpr &) override {} + void visit (HIR::FieldAccessExpr &) override {} + void visit (HIR::BlockExpr &) override {} + void visit (HIR::ContinueExpr &) override {} + void visit (HIR::BreakExpr &) override {} + void visit (HIR::RangeFromToExpr &) override {} + void visit (HIR::RangeFromExpr &) override {} + void visit (HIR::RangeToExpr &) override {} + void visit (HIR::RangeFullExpr &) override {} + void visit (HIR::RangeFromToInclExpr &) override {} + void visit (HIR::RangeToInclExpr &) override {} + void visit (HIR::ReturnExpr &) override {} + void visit (HIR::UnsafeBlockExpr &) override {} + void visit (HIR::LoopExpr &) override {} + void visit (HIR::WhileLoopExpr &) override {} + void visit (HIR::WhileLetLoopExpr &) override {} + void visit (HIR::ForLoopExpr &) override {} + void visit (HIR::IfExprConseqIfLet &) override {} + void visit (HIR::IfLetExpr &) override {} + void visit (HIR::IfLetExprConseqElse &) override {} + void visit (HIR::IfLetExprConseqIf &) override {} + void visit (HIR::IfLetExprConseqIfLet &) override {} + void visit (HIR::MatchExpr &) override {} + void visit (HIR::AwaitExpr &) override {} + void visit (HIR::AsyncBlockExpr &) override {} + +private: + CompileExprWithBlock (Context *ctx, Bvariable *result) + : HIRCompileBase (ctx), translated (nullptr), result (result) + {} + + tree translated; + Bvariable *result; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_BLOCK diff --git a/gcc/rust/backend/rust-compile-context.cc b/gcc/rust/backend/rust-compile-context.cc new file mode 100644 index 00000000000..cb2addf6c21 --- /dev/null +++ b/gcc/rust/backend/rust-compile-context.cc @@ -0,0 +1,146 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-context.h" +#include "rust-compile-type.h" + +namespace Rust { +namespace Compile { + +Context::Context (::Backend *backend) + : backend (backend), resolver (Resolver::Resolver::get ()), + tyctx (Resolver::TypeCheckContext::get ()), + mappings (Analysis::Mappings::get ()), mangler (Mangler ()) +{ + setup_builtins (); +} + +void +Context::setup_builtins () +{ + auto builtins = resolver->get_builtin_types (); + for (auto it = builtins.begin (); it != builtins.end (); it++) + { + HirId ref; + bool ok = tyctx->lookup_type_by_node_id ((*it)->get_node_id (), &ref); + rust_assert (ok); + + TyTy::BaseType *lookup; + ok = tyctx->lookup_type (ref, &lookup); + rust_assert (ok); + + TyTyResolveCompile::compile (this, lookup); + } +} + +hashval_t +Context::type_hasher (tree type) +{ + inchash::hash hstate; + + hstate.add_int (TREE_CODE (type)); + + if (TYPE_NAME (type)) + { + hashval_t record_name_hash + = IDENTIFIER_HASH_VALUE (DECL_NAME (TYPE_NAME (type))); + hstate.add_object (record_name_hash); + } + + for (tree t = TYPE_ATTRIBUTES (type); t; t = TREE_CHAIN (t)) + /* Just the identifier is adequate to distinguish. */ + hstate.add_object (IDENTIFIER_HASH_VALUE (TREE_PURPOSE (t))); + + switch (TREE_CODE (type)) + { + case METHOD_TYPE: + hstate.add_object (TYPE_HASH (TYPE_METHOD_BASETYPE (type))); + /* FALLTHROUGH. */ + case FUNCTION_TYPE: + for (tree t = TYPE_ARG_TYPES (type); t; t = TREE_CHAIN (t)) + if (TREE_VALUE (t) != error_mark_node) + hstate.add_object (TYPE_HASH (TREE_VALUE (t))); + break; + + case OFFSET_TYPE: + hstate.add_object (TYPE_HASH (TYPE_OFFSET_BASETYPE (type))); + break; + + case ARRAY_TYPE: { + if (TYPE_DOMAIN (type)) + hstate.add_object (TYPE_HASH (TYPE_DOMAIN (type))); + if (!AGGREGATE_TYPE_P (TREE_TYPE (type))) + { + unsigned typeless = TYPE_TYPELESS_STORAGE (type); + hstate.add_object (typeless); + } + } + break; + + case INTEGER_TYPE: { + tree t = TYPE_MAX_VALUE (type); + if (!t) + t = TYPE_MIN_VALUE (type); + for (int i = 0; i < TREE_INT_CST_NUNITS (t); i++) + hstate.add_object (TREE_INT_CST_ELT (t, i)); + break; + } + + case REAL_TYPE: + case FIXED_POINT_TYPE: { + unsigned prec = TYPE_PRECISION (type); + hstate.add_object (prec); + break; + } + + case VECTOR_TYPE: + hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type)); + break; + + case RECORD_TYPE: + case UNION_TYPE: + case QUAL_UNION_TYPE: { + for (tree t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t)) + { + hashval_t name_hash = IDENTIFIER_HASH_VALUE (DECL_NAME (t)); + hashval_t type_hash = type_hasher (TREE_TYPE (t)); + hstate.add_object (name_hash); + hstate.add_object (type_hash); + } + } + break; + + case BOOLEAN_TYPE: + break; + + case REFERENCE_TYPE: + case POINTER_TYPE: { + hashval_t type_hash = type_hasher (TREE_TYPE (type)); + hstate.add_object (type_hash); + } + break; + + default: + break; + } + + return hstate.end (); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-context.h b/gcc/rust/backend/rust-compile-context.h new file mode 100644 index 00000000000..096b65f8b39 --- /dev/null +++ b/gcc/rust/backend/rust-compile-context.h @@ -0,0 +1,343 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_CONTEXT +#define RUST_COMPILE_CONTEXT + +#include "rust-system.h" +#include "rust-hir-map.h" +#include "rust-name-resolver.h" +#include "rust-hir-type-check.h" +#include "rust-backend.h" +#include "rust-hir-full.h" +#include "rust-mangle.h" +#include "rust-tree.h" + +namespace Rust { +namespace Compile { + +struct fncontext +{ + tree fndecl; + ::Bvariable *ret_addr; +}; + +class Context +{ +public: + Context (::Backend *backend); + + void setup_builtins (); + + bool lookup_compiled_types (tree t, tree *type) + { + hashval_t h = type_hasher (t); + auto it = compiled_type_map.find (h); + if (it == compiled_type_map.end ()) + return false; + + *type = it->second; + return true; + } + + tree insert_compiled_type (tree type) + { + hashval_t h = type_hasher (type); + auto it = compiled_type_map.find (h); + if (it != compiled_type_map.end ()) + return it->second; + + compiled_type_map.insert ({h, type}); + push_type (type); + return type; + } + + tree insert_main_variant (tree type) + { + hashval_t h = type_hasher (type); + auto it = main_variants.find (h); + if (it != main_variants.end ()) + return it->second; + + main_variants.insert ({h, type}); + return type; + } + + ::Backend *get_backend () { return backend; } + Resolver::Resolver *get_resolver () { return resolver; } + Resolver::TypeCheckContext *get_tyctx () { return tyctx; } + Analysis::Mappings *get_mappings () { return mappings; } + + void push_block (tree scope) + { + scope_stack.push_back (scope); + statements.push_back ({}); + } + + tree pop_block () + { + auto block = scope_stack.back (); + scope_stack.pop_back (); + + auto stmts = statements.back (); + statements.pop_back (); + + backend->block_add_statements (block, stmts); + + return block; + } + + tree peek_enclosing_scope () + { + if (scope_stack.size () == 0) + return nullptr; + + return scope_stack.back (); + } + + void add_statement_to_enclosing_scope (tree stmt) + { + statements.at (statements.size () - 2).push_back (stmt); + } + + void add_statement (tree stmt) { statements.back ().push_back (stmt); } + + void insert_var_decl (HirId id, ::Bvariable *decl) + { + compiled_var_decls[id] = decl; + } + + bool lookup_var_decl (HirId id, ::Bvariable **decl) + { + auto it = compiled_var_decls.find (id); + if (it == compiled_var_decls.end ()) + return false; + + *decl = it->second; + return true; + } + + void insert_function_decl (const TyTy::FnType *ref, tree fn) + { + auto id = ref->get_ty_ref (); + auto dId = ref->get_id (); + + rust_assert (compiled_fn_map.find (id) == compiled_fn_map.end ()); + compiled_fn_map[id] = fn; + + auto it = mono_fns.find (dId); + if (it == mono_fns.end ()) + mono_fns[dId] = {}; + + mono_fns[dId].push_back ({ref, fn}); + } + + bool lookup_function_decl (HirId id, tree *fn, DefId dId = UNKNOWN_DEFID, + const TyTy::BaseType *ref = nullptr) + { + // for for any monomorphized fns + if (ref != nullptr) + { + rust_assert (dId != UNKNOWN_DEFID); + + auto it = mono_fns.find (dId); + if (it == mono_fns.end ()) + return false; + + for (auto &e : mono_fns[dId]) + { + const TyTy::BaseType *r = e.first; + tree f = e.second; + if (ref->is_equal (*r)) + { + *fn = f; + return true; + } + } + return false; + } + + auto it = compiled_fn_map.find (id); + if (it == compiled_fn_map.end ()) + return false; + + *fn = it->second; + return true; + } + + void insert_const_decl (HirId id, tree expr) { compiled_consts[id] = expr; } + + bool lookup_const_decl (HirId id, tree *expr) + { + auto it = compiled_consts.find (id); + if (it == compiled_consts.end ()) + return false; + + *expr = it->second; + return true; + } + + void insert_label_decl (HirId id, tree label) { compiled_labels[id] = label; } + + bool lookup_label_decl (HirId id, tree *label) + { + auto it = compiled_labels.find (id); + if (it == compiled_labels.end ()) + return false; + + *label = it->second; + return true; + } + + void insert_pattern_binding (HirId id, tree binding) + { + implicit_pattern_bindings[id] = binding; + } + + bool lookup_pattern_binding (HirId id, tree *binding) + { + auto it = implicit_pattern_bindings.find (id); + if (it == implicit_pattern_bindings.end ()) + return false; + + *binding = it->second; + return true; + } + + void push_fn (tree fn, ::Bvariable *ret_addr) + { + fn_stack.push_back (fncontext{fn, ret_addr}); + } + void pop_fn () { fn_stack.pop_back (); } + + bool in_fn () { return fn_stack.size () != 0; } + + // Note: it is undefined behavior to call peek_fn () if fn_stack is empty. + fncontext peek_fn () + { + rust_assert (!fn_stack.empty ()); + return fn_stack.back (); + } + + void push_type (tree t) { type_decls.push_back (t); } + void push_var (::Bvariable *v) { var_decls.push_back (v); } + void push_const (tree c) { const_decls.push_back (c); } + void push_function (tree f) { func_decls.push_back (f); } + + void write_to_backend () + { + backend->write_global_definitions (type_decls, const_decls, func_decls, + var_decls); + } + + bool function_completed (tree fn) + { + for (auto it = func_decls.begin (); it != func_decls.end (); it++) + { + tree i = (*it); + if (i == fn) + { + return true; + } + } + return false; + } + + void push_loop_context (Bvariable *var) { loop_value_stack.push_back (var); } + + Bvariable *peek_loop_context () { return loop_value_stack.back (); } + + Bvariable *pop_loop_context () + { + auto back = loop_value_stack.back (); + loop_value_stack.pop_back (); + return back; + } + + void push_loop_begin_label (tree label) + { + loop_begin_labels.push_back (label); + } + + tree peek_loop_begin_label () { return loop_begin_labels.back (); } + + tree pop_loop_begin_label () + { + tree pop = loop_begin_labels.back (); + loop_begin_labels.pop_back (); + return pop; + } + + void push_const_context (void) { const_context++; } + void pop_const_context (void) + { + if (const_context > 0) + const_context--; + } + bool const_context_p (void) { return (const_context > 0); } + + std::string mangle_item (const TyTy::BaseType *ty, + const Resolver::CanonicalPath &path) const + { + return mangler.mangle_item (ty, path); + } + + std::vector &get_type_decls () { return type_decls; } + std::vector<::Bvariable *> &get_var_decls () { return var_decls; } + std::vector &get_const_decls () { return const_decls; } + std::vector &get_func_decls () { return func_decls; } + + static hashval_t type_hasher (tree type); + +private: + ::Backend *backend; + Resolver::Resolver *resolver; + Resolver::TypeCheckContext *tyctx; + Analysis::Mappings *mappings; + Mangler mangler; + + // state + std::vector fn_stack; + std::map compiled_var_decls; + std::map compiled_type_map; + std::map compiled_fn_map; + std::map compiled_consts; + std::map compiled_labels; + std::vector<::std::vector> statements; + std::vector scope_stack; + std::vector<::Bvariable *> loop_value_stack; + std::vector loop_begin_labels; + std::map>> + mono_fns; + std::map implicit_pattern_bindings; + std::map main_variants; + + // To GCC middle-end + std::vector type_decls; + std::vector<::Bvariable *> var_decls; + std::vector const_decls; + std::vector func_decls; + + // Nonzero iff we are currently compiling something inside a constant context. + unsigned int const_context = 0; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_CONTEXT diff --git a/gcc/rust/backend/rust-compile-expr.cc b/gcc/rust/backend/rust-compile-expr.cc new file mode 100644 index 00000000000..865ad250f2c --- /dev/null +++ b/gcc/rust/backend/rust-compile-expr.cc @@ -0,0 +1,2764 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-expr.h" +#include "rust-compile-struct-field-expr.h" +#include "rust-hir-trait-resolve.h" +#include "rust-hir-path-probe.h" +#include "rust-hir-type-bounds.h" +#include "rust-compile-pattern.h" +#include "rust-compile-resolve-path.h" +#include "rust-compile-block.h" +#include "rust-compile-implitem.h" +#include "rust-constexpr.h" + +#include "fold-const.h" +#include "realmpfr.h" +#include "convert.h" +#include "print-tree.h" + +namespace Rust { +namespace Compile { + +CompileExpr::CompileExpr (Context *ctx) + : HIRCompileBase (ctx), translated (error_mark_node) +{} + +tree +CompileExpr::Compile (HIR::Expr *expr, Context *ctx) +{ + CompileExpr compiler (ctx); + expr->accept_vis (compiler); + return compiler.translated; +} + +void +CompileExpr::visit (HIR::TupleIndexExpr &expr) +{ + HIR::Expr *tuple_expr = expr.get_tuple_expr ().get (); + TupleIndex index = expr.get_tuple_index (); + + tree receiver_ref = CompileExpr::Compile (tuple_expr, ctx); + + TyTy::BaseType *tuple_expr_ty = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (tuple_expr->get_mappings ().get_hirid (), + &tuple_expr_ty); + rust_assert (ok); + + // do we need to add an indirect reference + if (tuple_expr_ty->get_kind () == TyTy::TypeKind::REF) + { + tree indirect = indirect_expression (receiver_ref, expr.get_locus ()); + receiver_ref = indirect; + } + + translated + = ctx->get_backend ()->struct_field_expression (receiver_ref, index, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::TupleExpr &expr) +{ + if (expr.is_unit ()) + { + translated = ctx->get_backend ()->unit_expression (); + return; + } + + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &tyty)) + { + rust_fatal_error (expr.get_locus (), + "did not resolve type for this TupleExpr"); + return; + } + + tree tuple_type = TyTyResolveCompile::compile (ctx, tyty); + rust_assert (tuple_type != nullptr); + + // this assumes all fields are in order from type resolution + std::vector vals; + for (auto &elem : expr.get_tuple_elems ()) + { + auto e = CompileExpr::Compile (elem.get (), ctx); + vals.push_back (e); + } + + translated + = ctx->get_backend ()->constructor_expression (tuple_type, false, vals, -1, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::ReturnExpr &expr) +{ + auto fncontext = ctx->peek_fn (); + + std::vector retstmts; + if (expr.has_return_expr ()) + { + tree compiled_expr = CompileExpr::Compile (expr.return_expr.get (), ctx); + rust_assert (compiled_expr != nullptr); + + retstmts.push_back (compiled_expr); + } + + auto s = ctx->get_backend ()->return_statement (fncontext.fndecl, retstmts, + expr.get_locus ()); + ctx->add_statement (s); +} + +void +CompileExpr::visit (HIR::ArithmeticOrLogicalExpr &expr) +{ + auto op = expr.get_expr_type (); + auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx); + auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx); + + // this might be an operator overload situation lets check + TyTy::FnType *fntype; + bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload ( + expr.get_mappings ().get_hirid (), &fntype); + if (is_op_overload) + { + auto lang_item_type + = Analysis::RustLangItem::OperatorToLangItem (expr.get_expr_type ()); + translated = resolve_operator_overload (lang_item_type, expr, lhs, rhs, + expr.get_lhs (), expr.get_rhs ()); + return; + } + + translated + = ctx->get_backend ()->arithmetic_or_logical_expression (op, lhs, rhs, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::CompoundAssignmentExpr &expr) +{ + auto op = expr.get_expr_type (); + auto lhs = CompileExpr::Compile (expr.get_left_expr ().get (), ctx); + auto rhs = CompileExpr::Compile (expr.get_right_expr ().get (), ctx); + + // this might be an operator overload situation lets check + TyTy::FnType *fntype; + bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload ( + expr.get_mappings ().get_hirid (), &fntype); + if (is_op_overload) + { + auto lang_item_type + = Analysis::RustLangItem::CompoundAssignmentOperatorToLangItem ( + expr.get_expr_type ()); + auto compound_assignment + = resolve_operator_overload (lang_item_type, expr, lhs, rhs, + expr.get_left_expr ().get (), + expr.get_right_expr ().get ()); + ctx->add_statement (compound_assignment); + + return; + } + + auto operator_expr + = ctx->get_backend ()->arithmetic_or_logical_expression (op, lhs, rhs, + expr.get_locus ()); + tree assignment + = ctx->get_backend ()->assignment_statement (lhs, operator_expr, + expr.get_locus ()); + ctx->add_statement (assignment); +} + +void +CompileExpr::visit (HIR::NegationExpr &expr) +{ + auto op = expr.get_expr_type (); + auto negated_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx); + auto location = expr.get_locus (); + + // this might be an operator overload situation lets check + TyTy::FnType *fntype; + bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload ( + expr.get_mappings ().get_hirid (), &fntype); + if (is_op_overload) + { + auto lang_item_type + = Analysis::RustLangItem::NegationOperatorToLangItem (op); + translated + = resolve_operator_overload (lang_item_type, expr, negated_expr, + nullptr, expr.get_expr ().get (), nullptr); + return; + } + + translated + = ctx->get_backend ()->negation_expression (op, negated_expr, location); +} + +void +CompileExpr::visit (HIR::ComparisonExpr &expr) +{ + auto op = expr.get_expr_type (); + auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx); + auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx); + auto location = expr.get_locus (); + + translated + = ctx->get_backend ()->comparison_expression (op, lhs, rhs, location); +} + +void +CompileExpr::visit (HIR::LazyBooleanExpr &expr) +{ + auto op = expr.get_expr_type (); + auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx); + auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx); + auto location = expr.get_locus (); + + translated + = ctx->get_backend ()->lazy_boolean_expression (op, lhs, rhs, location); +} + +void +CompileExpr::visit (HIR::TypeCastExpr &expr) +{ + TyTy::BaseType *type_to_cast_to_ty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &type_to_cast_to_ty)) + { + translated = error_mark_node; + return; + } + + TyTy::BaseType *casted_tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type ( + expr.get_casted_expr ()->get_mappings ().get_hirid (), &casted_tyty)) + { + translated = error_mark_node; + return; + } + + auto type_to_cast_to = TyTyResolveCompile::compile (ctx, type_to_cast_to_ty); + auto casted_expr = CompileExpr::Compile (expr.get_casted_expr ().get (), ctx); + + std::vector *adjustments = nullptr; + bool ok = ctx->get_tyctx ()->lookup_cast_autoderef_mappings ( + expr.get_mappings ().get_hirid (), &adjustments); + if (ok) + { + casted_expr + = resolve_adjustements (*adjustments, casted_expr, expr.get_locus ()); + } + + translated + = type_cast_expression (type_to_cast_to, casted_expr, expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::IfExpr &expr) +{ + auto stmt = CompileConditionalBlocks::compile (&expr, ctx, nullptr); + ctx->add_statement (stmt); +} + +void +CompileExpr::visit (HIR::IfExprConseqElse &expr) +{ + TyTy::BaseType *if_type = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &if_type)) + { + rust_error_at (expr.get_locus (), + "failed to lookup type of IfExprConseqElse"); + return; + } + + Bvariable *tmp = NULL; + bool needs_temp = !if_type->is_unit (); + if (needs_temp) + { + fncontext fnctx = ctx->peek_fn (); + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree block_type = TyTyResolveCompile::compile (ctx, if_type); + + bool is_address_taken = false; + tree ret_var_stmt = nullptr; + tmp = ctx->get_backend ()->temporary_variable ( + fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken, + expr.get_locus (), &ret_var_stmt); + ctx->add_statement (ret_var_stmt); + } + + auto stmt = CompileConditionalBlocks::compile (&expr, ctx, tmp); + ctx->add_statement (stmt); + + if (tmp != NULL) + { + translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ()); + } +} + +void +CompileExpr::visit (HIR::IfExprConseqIf &expr) +{ + TyTy::BaseType *if_type = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &if_type)) + { + rust_error_at (expr.get_locus (), + "failed to lookup type of IfExprConseqElse"); + return; + } + + Bvariable *tmp = NULL; + bool needs_temp = !if_type->is_unit (); + if (needs_temp) + { + fncontext fnctx = ctx->peek_fn (); + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree block_type = TyTyResolveCompile::compile (ctx, if_type); + + bool is_address_taken = false; + tree ret_var_stmt = nullptr; + tmp = ctx->get_backend ()->temporary_variable ( + fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken, + expr.get_locus (), &ret_var_stmt); + ctx->add_statement (ret_var_stmt); + } + + auto stmt = CompileConditionalBlocks::compile (&expr, ctx, tmp); + ctx->add_statement (stmt); + + if (tmp != NULL) + { + translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ()); + } +} + +void +CompileExpr::visit (HIR::BlockExpr &expr) +{ + TyTy::BaseType *block_tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &block_tyty)) + { + rust_error_at (expr.get_locus (), "failed to lookup type of BlockExpr"); + return; + } + + Bvariable *tmp = NULL; + bool needs_temp = !block_tyty->is_unit (); + if (needs_temp) + { + fncontext fnctx = ctx->peek_fn (); + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree block_type = TyTyResolveCompile::compile (ctx, block_tyty); + + bool is_address_taken = false; + tree ret_var_stmt = nullptr; + tmp = ctx->get_backend ()->temporary_variable ( + fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken, + expr.get_locus (), &ret_var_stmt); + ctx->add_statement (ret_var_stmt); + } + + auto block_stmt = CompileBlock::compile (&expr, ctx, tmp); + rust_assert (TREE_CODE (block_stmt) == BIND_EXPR); + ctx->add_statement (block_stmt); + + if (tmp != NULL) + { + translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ()); + } +} + +void +CompileExpr::visit (HIR::UnsafeBlockExpr &expr) +{ + expr.get_block_expr ()->accept_vis (*this); +} + +void +CompileExpr::visit (HIR::StructExprStruct &struct_expr) +{ + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (struct_expr.get_mappings ().get_hirid (), + &tyty)) + { + rust_error_at (struct_expr.get_locus (), "unknown type"); + return; + } + + rust_assert (tyty->is_unit ()); + translated = ctx->get_backend ()->unit_expression (); +} + +void +CompileExpr::visit (HIR::StructExprStructFields &struct_expr) +{ + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (struct_expr.get_mappings ().get_hirid (), + &tyty)) + { + rust_error_at (struct_expr.get_locus (), "unknown type"); + return; + } + + // it must be an ADT + rust_assert (tyty->get_kind () == TyTy::TypeKind::ADT); + TyTy::ADTType *adt = static_cast (tyty); + + // what variant is it? + int union_disriminator = struct_expr.union_index; + TyTy::VariantDef *variant = nullptr; + if (!adt->is_enum ()) + { + rust_assert (adt->number_of_variants () == 1); + variant = adt->get_variants ().at (0); + } + else + { + HirId variant_id; + bool ok = ctx->get_tyctx ()->lookup_variant_definition ( + struct_expr.get_struct_name ().get_mappings ().get_hirid (), + &variant_id); + rust_assert (ok); + + ok + = adt->lookup_variant_by_id (variant_id, &variant, &union_disriminator); + rust_assert (ok); + } + + // compile it + tree compiled_adt_type = TyTyResolveCompile::compile (ctx, tyty); + + std::vector arguments; + if (adt->is_union ()) + { + rust_assert (struct_expr.get_fields ().size () == 1); + + // assignments are coercion sites so lets convert the rvalue if + // necessary + auto respective_field = variant->get_field_at_index (union_disriminator); + auto expected = respective_field->get_field_type (); + + // process arguments + auto &argument = struct_expr.get_fields ().at (0); + auto lvalue_locus + = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ()); + auto rvalue_locus = argument->get_locus (); + auto rvalue = CompileStructExprField::Compile (argument.get (), ctx); + + TyTy::BaseType *actual = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + argument->get_mappings ().get_hirid (), &actual); + + if (ok) + { + rvalue + = coercion_site (argument->get_mappings ().get_hirid (), rvalue, + actual, expected, lvalue_locus, rvalue_locus); + } + + // add it to the list + arguments.push_back (rvalue); + } + else + { + // this assumes all fields are in order from type resolution and if a + // base struct was specified those fields are filed via accesors + for (size_t i = 0; i < struct_expr.get_fields ().size (); i++) + { + // assignments are coercion sites so lets convert the rvalue if + // necessary + auto respective_field = variant->get_field_at_index (i); + auto expected = respective_field->get_field_type (); + + // process arguments + auto &argument = struct_expr.get_fields ().at (i); + auto lvalue_locus + = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ()); + auto rvalue_locus = argument->get_locus (); + auto rvalue = CompileStructExprField::Compile (argument.get (), ctx); + + TyTy::BaseType *actual = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + argument->get_mappings ().get_hirid (), &actual); + + // coerce it if required/possible see + // compile/torture/struct_base_init_1.rs + if (ok) + { + rvalue + = coercion_site (argument->get_mappings ().get_hirid (), rvalue, + actual, expected, lvalue_locus, rvalue_locus); + } + + // add it to the list + arguments.push_back (rvalue); + } + } + + // the constructor depends on whether this is actually an enum or not if + // its an enum we need to setup the discriminator + std::vector ctor_arguments; + if (adt->is_enum ()) + { + HIR::Expr *discrim_expr = variant->get_discriminant (); + tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx); + tree folded_discrim_expr = fold_expr (discrim_expr_node); + tree qualifier = folded_discrim_expr; + + ctor_arguments.push_back (qualifier); + } + for (auto &arg : arguments) + ctor_arguments.push_back (arg); + + translated = ctx->get_backend ()->constructor_expression ( + compiled_adt_type, adt->is_enum (), ctor_arguments, union_disriminator, + struct_expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::GroupedExpr &expr) +{ + translated = CompileExpr::Compile (expr.get_expr_in_parens ().get (), ctx); +} + +void +CompileExpr::visit (HIR::FieldAccessExpr &expr) +{ + HIR::Expr *receiver_expr = expr.get_receiver_expr ().get (); + tree receiver_ref = CompileExpr::Compile (receiver_expr, ctx); + + // resolve the receiver back to ADT type + TyTy::BaseType *receiver = nullptr; + if (!ctx->get_tyctx ()->lookup_type ( + expr.get_receiver_expr ()->get_mappings ().get_hirid (), &receiver)) + { + rust_error_at (expr.get_receiver_expr ()->get_locus (), + "unresolved type for receiver"); + return; + } + + size_t field_index = 0; + if (receiver->get_kind () == TyTy::TypeKind::ADT) + { + TyTy::ADTType *adt = static_cast (receiver); + rust_assert (!adt->is_enum ()); + rust_assert (adt->number_of_variants () == 1); + + TyTy::VariantDef *variant = adt->get_variants ().at (0); + bool ok + = variant->lookup_field (expr.get_field_name (), nullptr, &field_index); + rust_assert (ok); + } + else if (receiver->get_kind () == TyTy::TypeKind::REF) + { + TyTy::ReferenceType *r = static_cast (receiver); + TyTy::BaseType *b = r->get_base (); + rust_assert (b->get_kind () == TyTy::TypeKind::ADT); + + TyTy::ADTType *adt = static_cast (b); + rust_assert (!adt->is_enum ()); + rust_assert (adt->number_of_variants () == 1); + + TyTy::VariantDef *variant = adt->get_variants ().at (0); + bool ok + = variant->lookup_field (expr.get_field_name (), nullptr, &field_index); + rust_assert (ok); + + tree indirect = indirect_expression (receiver_ref, expr.get_locus ()); + receiver_ref = indirect; + } + + translated + = ctx->get_backend ()->struct_field_expression (receiver_ref, field_index, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::QualifiedPathInExpression &expr) +{ + translated = ResolvePathRef::Compile (expr, ctx); +} + +void +CompileExpr::visit (HIR::PathInExpression &expr) +{ + translated = ResolvePathRef::Compile (expr, ctx); +} + +void +CompileExpr::visit (HIR::LoopExpr &expr) +{ + TyTy::BaseType *block_tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &block_tyty)) + { + rust_error_at (expr.get_locus (), "failed to lookup type of BlockExpr"); + return; + } + + fncontext fnctx = ctx->peek_fn (); + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree block_type = TyTyResolveCompile::compile (ctx, block_tyty); + + bool is_address_taken = false; + tree ret_var_stmt = NULL_TREE; + Bvariable *tmp = ctx->get_backend ()->temporary_variable ( + fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken, + expr.get_locus (), &ret_var_stmt); + ctx->add_statement (ret_var_stmt); + ctx->push_loop_context (tmp); + + if (expr.has_loop_label ()) + { + HIR::LoopLabel &loop_label = expr.get_loop_label (); + tree label + = ctx->get_backend ()->label (fnctx.fndecl, + loop_label.get_lifetime ().get_name (), + loop_label.get_locus ()); + tree label_decl = ctx->get_backend ()->label_definition_statement (label); + ctx->add_statement (label_decl); + ctx->insert_label_decl ( + loop_label.get_lifetime ().get_mappings ().get_hirid (), label); + } + + tree loop_begin_label + = ctx->get_backend ()->label (fnctx.fndecl, "", expr.get_locus ()); + tree loop_begin_label_decl + = ctx->get_backend ()->label_definition_statement (loop_begin_label); + ctx->add_statement (loop_begin_label_decl); + ctx->push_loop_begin_label (loop_begin_label); + + tree code_block + = CompileBlock::compile (expr.get_loop_block ().get (), ctx, nullptr); + tree loop_expr + = ctx->get_backend ()->loop_expression (code_block, expr.get_locus ()); + ctx->add_statement (loop_expr); + + ctx->pop_loop_context (); + translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ()); + + ctx->pop_loop_begin_label (); +} + +void +CompileExpr::visit (HIR::WhileLoopExpr &expr) +{ + fncontext fnctx = ctx->peek_fn (); + if (expr.has_loop_label ()) + { + HIR::LoopLabel &loop_label = expr.get_loop_label (); + tree label + = ctx->get_backend ()->label (fnctx.fndecl, + loop_label.get_lifetime ().get_name (), + loop_label.get_locus ()); + tree label_decl = ctx->get_backend ()->label_definition_statement (label); + ctx->add_statement (label_decl); + ctx->insert_label_decl ( + loop_label.get_lifetime ().get_mappings ().get_hirid (), label); + } + + std::vector locals; + Location start_location = expr.get_loop_block ()->get_locus (); + Location end_location = expr.get_loop_block ()->get_locus (); // FIXME + + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree loop_block + = ctx->get_backend ()->block (fnctx.fndecl, enclosing_scope, locals, + start_location, end_location); + ctx->push_block (loop_block); + + tree loop_begin_label + = ctx->get_backend ()->label (fnctx.fndecl, "", expr.get_locus ()); + tree loop_begin_label_decl + = ctx->get_backend ()->label_definition_statement (loop_begin_label); + ctx->add_statement (loop_begin_label_decl); + ctx->push_loop_begin_label (loop_begin_label); + + tree condition + = CompileExpr::Compile (expr.get_predicate_expr ().get (), ctx); + tree exit_expr + = ctx->get_backend ()->exit_expression (condition, expr.get_locus ()); + ctx->add_statement (exit_expr); + + tree code_block_stmt + = CompileBlock::compile (expr.get_loop_block ().get (), ctx, nullptr); + rust_assert (TREE_CODE (code_block_stmt) == BIND_EXPR); + ctx->add_statement (code_block_stmt); + + ctx->pop_loop_begin_label (); + ctx->pop_block (); + + tree loop_expr + = ctx->get_backend ()->loop_expression (loop_block, expr.get_locus ()); + ctx->add_statement (loop_expr); +} + +void +CompileExpr::visit (HIR::BreakExpr &expr) +{ + if (expr.has_break_expr ()) + { + tree compiled_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx); + + Bvariable *loop_result_holder = ctx->peek_loop_context (); + tree result_reference + = ctx->get_backend ()->var_expression (loop_result_holder, + expr.get_expr ()->get_locus ()); + + tree assignment + = ctx->get_backend ()->assignment_statement (result_reference, + compiled_expr, + expr.get_locus ()); + ctx->add_statement (assignment); + } + + if (expr.has_label ()) + { + NodeId resolved_node_id = UNKNOWN_NODEID; + if (!ctx->get_resolver ()->lookup_resolved_label ( + expr.get_label ().get_mappings ().get_nodeid (), &resolved_node_id)) + { + rust_error_at ( + expr.get_label ().get_locus (), + "failed to resolve compiled label for label %s", + expr.get_label ().get_mappings ().as_string ().c_str ()); + return; + } + + HirId ref = UNKNOWN_HIRID; + if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref)) + { + rust_fatal_error (expr.get_locus (), "reverse lookup label failure"); + return; + } + + tree label = NULL_TREE; + if (!ctx->lookup_label_decl (ref, &label)) + { + rust_error_at (expr.get_label ().get_locus (), + "failed to lookup compiled label"); + return; + } + + tree goto_label + = ctx->get_backend ()->goto_statement (label, expr.get_locus ()); + ctx->add_statement (goto_label); + } + else + { + tree exit_expr = ctx->get_backend ()->exit_expression ( + ctx->get_backend ()->boolean_constant_expression (true), + expr.get_locus ()); + ctx->add_statement (exit_expr); + } +} + +void +CompileExpr::visit (HIR::ContinueExpr &expr) +{ + tree label = ctx->peek_loop_begin_label (); + if (expr.has_label ()) + { + NodeId resolved_node_id = UNKNOWN_NODEID; + if (!ctx->get_resolver ()->lookup_resolved_label ( + expr.get_label ().get_mappings ().get_nodeid (), &resolved_node_id)) + { + rust_error_at ( + expr.get_label ().get_locus (), + "failed to resolve compiled label for label %s", + expr.get_label ().get_mappings ().as_string ().c_str ()); + return; + } + + HirId ref = UNKNOWN_HIRID; + if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref)) + { + rust_fatal_error (expr.get_locus (), "reverse lookup label failure"); + return; + } + + if (!ctx->lookup_label_decl (ref, &label)) + { + rust_error_at (expr.get_label ().get_locus (), + "failed to lookup compiled label"); + return; + } + } + + translated = ctx->get_backend ()->goto_statement (label, expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::BorrowExpr &expr) +{ + tree main_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx); + if (SLICE_TYPE_P (TREE_TYPE (main_expr))) + { + translated = main_expr; + return; + } + + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &tyty)) + return; + + translated = address_expression (main_expr, expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::DereferenceExpr &expr) +{ + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &tyty)) + { + rust_fatal_error (expr.get_locus (), + "did not resolve type for this TupleExpr"); + return; + } + + tree main_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx); + + // this might be an operator overload situation lets check + TyTy::FnType *fntype; + bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload ( + expr.get_mappings ().get_hirid (), &fntype); + if (is_op_overload) + { + auto lang_item_type = Analysis::RustLangItem::ItemType::DEREF; + tree operator_overload_call + = resolve_operator_overload (lang_item_type, expr, main_expr, nullptr, + expr.get_expr ().get (), nullptr); + + // rust deref always returns a reference from this overload then we can + // actually do the indirection + main_expr = operator_overload_call; + } + + tree expected_type = TyTyResolveCompile::compile (ctx, tyty); + if (SLICE_TYPE_P (TREE_TYPE (main_expr)) && SLICE_TYPE_P (expected_type)) + { + translated = main_expr; + return; + } + + translated = indirect_expression (main_expr, expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::LiteralExpr &expr) +{ + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &tyty)) + return; + + switch (expr.get_lit_type ()) + { + case HIR::Literal::BOOL: + translated = compile_bool_literal (expr, tyty); + return; + + case HIR::Literal::INT: + translated = compile_integer_literal (expr, tyty); + return; + + case HIR::Literal::FLOAT: + translated = compile_float_literal (expr, tyty); + return; + + case HIR::Literal::CHAR: + translated = compile_char_literal (expr, tyty); + return; + + case HIR::Literal::BYTE: + translated = compile_byte_literal (expr, tyty); + return; + + case HIR::Literal::STRING: + translated = compile_string_literal (expr, tyty); + return; + + case HIR::Literal::BYTE_STRING: + translated = compile_byte_string_literal (expr, tyty); + return; + } +} + +void +CompileExpr::visit (HIR::AssignmentExpr &expr) +{ + auto lvalue = CompileExpr::Compile (expr.get_lhs (), ctx); + auto rvalue = CompileExpr::Compile (expr.get_rhs (), ctx); + + // assignments are coercion sites so lets convert the rvalue if necessary + TyTy::BaseType *expected = nullptr; + TyTy::BaseType *actual = nullptr; + + bool ok; + ok = ctx->get_tyctx ()->lookup_type ( + expr.get_lhs ()->get_mappings ().get_hirid (), &expected); + rust_assert (ok); + + ok = ctx->get_tyctx ()->lookup_type ( + expr.get_rhs ()->get_mappings ().get_hirid (), &actual); + rust_assert (ok); + + rvalue = coercion_site (expr.get_mappings ().get_hirid (), rvalue, actual, + expected, expr.get_lhs ()->get_locus (), + expr.get_rhs ()->get_locus ()); + + tree assignment + = ctx->get_backend ()->assignment_statement (lvalue, rvalue, + expr.get_locus ()); + + ctx->add_statement (assignment); +} + +// Helper for sort_tuple_patterns. +// Determine whether Patterns a and b are really the same pattern. +// FIXME: This is a nasty hack to avoid properly implementing a comparison +// for Patterns, which we really probably do want at some point. +static bool +patterns_mergeable (HIR::Pattern *a, HIR::Pattern *b) +{ + if (!a || !b) + return false; + + HIR::Pattern::PatternType pat_type = a->get_pattern_type (); + if (b->get_pattern_type () != pat_type) + return false; + + switch (pat_type) + { + case HIR::Pattern::PatternType::PATH: { + // FIXME: this is far too naive + HIR::PathPattern &aref = *static_cast (a); + HIR::PathPattern &bref = *static_cast (b); + if (aref.get_num_segments () != bref.get_num_segments ()) + return false; + + const auto &asegs = aref.get_segments (); + const auto &bsegs = bref.get_segments (); + for (size_t i = 0; i < asegs.size (); i++) + { + if (asegs[i].as_string () != bsegs[i].as_string ()) + return false; + } + return true; + } + break; + case HIR::Pattern::PatternType::LITERAL: { + HIR::LiteralPattern &aref = *static_cast (a); + HIR::LiteralPattern &bref = *static_cast (b); + return aref.get_literal ().is_equal (bref.get_literal ()); + } + break; + case HIR::Pattern::PatternType::IDENTIFIER: { + // TODO + } + break; + case HIR::Pattern::PatternType::WILDCARD: + return true; + break; + + // TODO + + default:; + } + return false; +} + +// A little container for rearranging the patterns and cases in a match +// expression while simplifying. +struct PatternMerge +{ + std::unique_ptr wildcard; + std::vector> heads; + std::vector> cases; +}; + +// Helper for simplify_tuple_match. +// For each tuple pattern in a given match, pull out the first elt of the +// tuple and construct a new MatchCase with the remaining tuple elts as the +// pattern. Return a mapping from each _unique_ first tuple element to a +// vec of cases for a new match. +// +// FIXME: This used to be a std::map>, but it doesn't +// actually work like we want - the Pattern includes an HIR ID, which is unique +// per Pattern object. This means we don't have a good means for comparing +// Patterns. It would probably be best to actually implement a means of +// properly comparing patterns, and then use an actual map. +// +static struct PatternMerge +sort_tuple_patterns (HIR::MatchExpr &expr) +{ + rust_assert (expr.get_scrutinee_expr ()->get_expression_type () + == HIR::Expr::ExprType::Tuple); + + struct PatternMerge result; + result.wildcard = nullptr; + result.heads = std::vector> (); + result.cases = std::vector> (); + + for (auto &match_case : expr.get_match_cases ()) + { + HIR::MatchArm &case_arm = match_case.get_arm (); + + // FIXME: Note we are only dealing with the first pattern in the arm. + // The patterns vector in the arm might hold many patterns, which are the + // patterns separated by the '|' token. Rustc abstracts these as "Or" + // patterns, and part of its simplification process is to get rid of them. + // We should get rid of the ORs too, maybe here or earlier than here? + auto pat = case_arm.get_patterns ()[0]->clone_pattern (); + + // Record wildcards so we can add them in inner matches. + if (pat->get_pattern_type () == HIR::Pattern::PatternType::WILDCARD) + { + // The *whole* pattern is a wild card (_). + result.wildcard + = std::unique_ptr (new HIR::MatchCase (match_case)); + continue; + } + + rust_assert (pat->get_pattern_type () + == HIR::Pattern::PatternType::TUPLE); + + auto ref = *static_cast (pat.get ()); + + rust_assert (ref.has_tuple_pattern_items ()); + + auto items + = HIR::TuplePattern (ref).get_items ()->clone_tuple_pattern_items (); + if (items->get_pattern_type () + == HIR::TuplePatternItems::TuplePatternItemType::MULTIPLE) + { + auto items_ref + = *static_cast (items.get ()); + + // Pop the first pattern out + auto patterns = std::vector> (); + auto first = items_ref.get_patterns ()[0]->clone_pattern (); + for (auto p = items_ref.get_patterns ().begin () + 1; + p != items_ref.get_patterns ().end (); p++) + { + patterns.push_back ((*p)->clone_pattern ()); + } + + // if there is only one pattern left, don't make a tuple out of it + std::unique_ptr result_pattern; + if (patterns.size () == 1) + { + result_pattern = std::move (patterns[0]); + } + else + { + auto new_items = std::unique_ptr ( + new HIR::TuplePatternItemsMultiple (std::move (patterns))); + + // Construct a TuplePattern from the rest of the patterns + result_pattern = std::unique_ptr ( + new HIR::TuplePattern (ref.get_pattern_mappings (), + std::move (new_items), + ref.get_locus ())); + } + + // I don't know why we need to make foo separately here but + // using the { new_tuple } syntax in new_arm constructor does not + // compile. + auto foo = std::vector> (); + foo.emplace_back (std::move (result_pattern)); + HIR::MatchArm new_arm (std::move (foo), Location (), nullptr, + AST::AttrVec ()); + + HIR::MatchCase new_case (match_case.get_mappings (), new_arm, + match_case.get_expr ()->clone_expr ()); + + bool pushed = false; + for (size_t i = 0; i < result.heads.size (); i++) + { + if (patterns_mergeable (result.heads[i].get (), first.get ())) + { + result.cases[i].push_back (new_case); + pushed = true; + } + } + + if (!pushed) + { + result.heads.push_back (std::move (first)); + result.cases.push_back ({new_case}); + } + } + else /* TuplePatternItemType::RANGED */ + { + // FIXME + gcc_unreachable (); + } + } + + return result; +} + +// Helper for CompileExpr::visit (HIR::MatchExpr). +// Given a MatchExpr where the scrutinee is some kind of tuple, build an +// equivalent match where only one element of the tuple is examined at a time. +// This resulting match can then be lowered to a SWITCH_EXPR tree directly. +// +// The approach is as follows: +// 1. Split the scrutinee and each pattern into the first (head) and the +// rest (tail). +// 2. Build a mapping of unique pattern heads to the cases (tail and expr) +// that shared that pattern head in the original match. +// (This is the job of sort_tuple_patterns ()). +// 3. For each unique pattern head, build a new MatchCase where the pattern +// is the unique head, and the expression is a new match where: +// - The scrutinee is the tail of the original scrutinee +// - The cases are are those built by the mapping in step 2, i.e. the +// tails of the patterns and the corresponing expressions from the +// original match expression. +// 4. Do this recursively for each inner match, until there is nothing more +// to simplify. +// 5. Build the resulting match which scrutinizes the head of the original +// scrutinee, using the cases built in step 3. +static HIR::MatchExpr +simplify_tuple_match (HIR::MatchExpr &expr) +{ + if (expr.get_scrutinee_expr ()->get_expression_type () + != HIR::Expr::ExprType::Tuple) + return expr; + + auto ref = *static_cast (expr.get_scrutinee_expr ().get ()); + + auto &tail = ref.get_tuple_elems (); + rust_assert (tail.size () > 1); + + auto head = std::move (tail[0]); + tail.erase (tail.begin (), tail.begin () + 1); + + // e.g. + // match (tupA, tupB, tupC) { + // (a1, b1, c1) => { blk1 }, + // (a2, b2, c2) => { blk2 }, + // (a1, b3, c3) => { blk3 }, + // } + // tail = (tupB, tupC) + // head = tupA + + // Make sure the tail is only a tuple if it consists of at least 2 elements. + std::unique_ptr remaining; + if (tail.size () == 1) + remaining = std::move (tail[0]); + else + remaining = std::unique_ptr ( + new HIR::TupleExpr (ref.get_mappings (), std::move (tail), + AST::AttrVec (), ref.get_outer_attrs (), + ref.get_locus ())); + + // e.g. + // a1 -> [(b1, c1) => { blk1 }, + // (b3, c3) => { blk3 }] + // a2 -> [(b2, c2) => { blk2 }] + struct PatternMerge map = sort_tuple_patterns (expr); + + std::vector cases; + // Construct the inner match for each unique first elt of the tuple + // patterns + for (size_t i = 0; i < map.heads.size (); i++) + { + auto inner_match_cases = map.cases[i]; + + // If there is a wildcard at the outer match level, then need to + // propegate the wildcard case into *every* inner match. + // FIXME: It is probably not correct to add this unconditionally, what if + // we have a pattern like (a, _, c)? Then there is already a wildcard in + // the inner matches, and having two will cause two 'default:' blocks + // which is an error. + if (map.wildcard != nullptr) + { + inner_match_cases.push_back (*(map.wildcard.get ())); + } + + // match (tupB, tupC) { + // (b1, c1) => { blk1 }, + // (b3, c3) => { blk3 } + // } + HIR::MatchExpr inner_match (expr.get_mappings (), + remaining->clone_expr (), inner_match_cases, + AST::AttrVec (), expr.get_outer_attrs (), + expr.get_locus ()); + + inner_match = simplify_tuple_match (inner_match); + + auto outer_arm_pat = std::vector> (); + outer_arm_pat.emplace_back (map.heads[i]->clone_pattern ()); + + HIR::MatchArm outer_arm (std::move (outer_arm_pat), expr.get_locus ()); + + // Need to move the inner match to the heap and put it in a unique_ptr to + // build the actual match case of the outer expression + // auto inner_expr = std::unique_ptr (new HIR::MatchExpr + // (inner_match)); + auto inner_expr = inner_match.clone_expr (); + + // a1 => match (tupB, tupC) { ... } + HIR::MatchCase outer_case (expr.get_mappings (), outer_arm, + std::move (inner_expr)); + + cases.push_back (outer_case); + } + + // If there was a wildcard, make sure to include it at the outer match level + // too. + if (map.wildcard != nullptr) + { + cases.push_back (*(map.wildcard.get ())); + } + + // match tupA { + // a1 => match (tupB, tupC) { + // (b1, c1) => { blk1 }, + // (b3, c3) => { blk3 } + // } + // a2 => match (tupB, tupC) { + // (b2, c2) => { blk2 } + // } + // } + HIR::MatchExpr outer_match (expr.get_mappings (), std::move (head), cases, + AST::AttrVec (), expr.get_outer_attrs (), + expr.get_locus ()); + + return outer_match; +} + +// Helper for CompileExpr::visit (HIR::MatchExpr). +// Check that the scrutinee of EXPR is a valid kind of expression to match on. +// Return the TypeKind of the scrutinee if it is valid, or TyTy::TypeKind::ERROR +// if not. +static TyTy::TypeKind +check_match_scrutinee (HIR::MatchExpr &expr, Context *ctx) +{ + TyTy::BaseType *scrutinee_expr_tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type ( + expr.get_scrutinee_expr ()->get_mappings ().get_hirid (), + &scrutinee_expr_tyty)) + { + return TyTy::TypeKind::ERROR; + } + + TyTy::TypeKind scrutinee_kind = scrutinee_expr_tyty->get_kind (); + rust_assert ((TyTy::is_primitive_type_kind (scrutinee_kind) + && scrutinee_kind != TyTy::TypeKind::NEVER) + || scrutinee_kind == TyTy::TypeKind::ADT + || scrutinee_kind == TyTy::TypeKind::TUPLE); + + if (scrutinee_kind == TyTy::TypeKind::ADT) + { + // this will need to change but for now the first pass implementation, + // lets assert this is the case + TyTy::ADTType *adt = static_cast (scrutinee_expr_tyty); + rust_assert (adt->is_enum ()); + rust_assert (adt->number_of_variants () > 0); + } + else if (scrutinee_kind == TyTy::TypeKind::FLOAT) + { + // FIXME: CASE_LABEL_EXPR does not support floating point types. + // Find another way to compile these. + rust_sorry_at (expr.get_locus (), + "match on floating-point types is not yet supported"); + } + + TyTy::BaseType *expr_tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &expr_tyty)) + { + return TyTy::TypeKind::ERROR; + } + + return scrutinee_kind; +} + +void +CompileExpr::visit (HIR::MatchExpr &expr) +{ + // https://gcc.gnu.org/onlinedocs/gccint/Basic-Statements.html#Basic-Statements + // TODO + // SWITCH_ALL_CASES_P is true if the switch includes a default label or the + // case label ranges cover all possible values of the condition expression + + /* Switch expression. + + TREE_TYPE is the original type of the condition, before any + language required type conversions. It may be NULL, in which case + the original type and final types are assumed to be the same. + + Operand 0 is the expression used to perform the branch, + Operand 1 is the body of the switch, which probably contains + CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2 + must not be NULL. */ + // DEFTREECODE (SWITCH_EXPR, "switch_expr", tcc_statement, 2) + + /* Used to represent a case label. + + Operand 0 is CASE_LOW. It may be NULL_TREE, in which case the label + is a 'default' label. + Operand 1 is CASE_HIGH. If it is NULL_TREE, the label is a simple + (one-value) case label. If it is non-NULL_TREE, the case is a range. + Operand 2 is CASE_LABEL, which has the corresponding LABEL_DECL. + Operand 3 is CASE_CHAIN. This operand is only used in tree-cfg.cc to + speed up the lookup of case labels which use a particular edge in + the control flow graph. */ + // DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", tcc_statement, 4) + + TyTy::TypeKind scrutinee_kind = check_match_scrutinee (expr, ctx); + if (scrutinee_kind == TyTy::TypeKind::ERROR) + { + translated = error_mark_node; + return; + } + + TyTy::BaseType *expr_tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &expr_tyty)) + { + translated = error_mark_node; + return; + } + + fncontext fnctx = ctx->peek_fn (); + Bvariable *tmp = NULL; + bool needs_temp = !expr_tyty->is_unit (); + if (needs_temp) + { + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree block_type = TyTyResolveCompile::compile (ctx, expr_tyty); + + bool is_address_taken = false; + tree ret_var_stmt = nullptr; + tmp = ctx->get_backend ()->temporary_variable ( + fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken, + expr.get_locus (), &ret_var_stmt); + ctx->add_statement (ret_var_stmt); + } + + // lets compile the scrutinee expression + tree match_scrutinee_expr + = CompileExpr::Compile (expr.get_scrutinee_expr ().get (), ctx); + + tree match_scrutinee_expr_qualifier_expr; + if (TyTy::is_primitive_type_kind (scrutinee_kind)) + { + match_scrutinee_expr_qualifier_expr = match_scrutinee_expr; + } + else if (scrutinee_kind == TyTy::TypeKind::ADT) + { + // need to access qualifier the field, if we use QUAL_UNION_TYPE this + // would be DECL_QUALIFIER i think. For now this will just access the + // first record field and its respective qualifier because it will always + // be set because this is all a big special union + tree scrutinee_first_record_expr + = ctx->get_backend ()->struct_field_expression ( + match_scrutinee_expr, 0, expr.get_scrutinee_expr ()->get_locus ()); + match_scrutinee_expr_qualifier_expr + = ctx->get_backend ()->struct_field_expression ( + scrutinee_first_record_expr, 0, + expr.get_scrutinee_expr ()->get_locus ()); + } + else if (scrutinee_kind == TyTy::TypeKind::TUPLE) + { + // match on tuple becomes a series of nested switches, with one level + // for each element of the tuple from left to right. + auto exprtype = expr.get_scrutinee_expr ()->get_expression_type (); + switch (exprtype) + { + case HIR::Expr::ExprType::Tuple: { + // Build an equivalent expression which is nicer to lower. + HIR::MatchExpr outer_match = simplify_tuple_match (expr); + + // We've rearranged the match into something that lowers better + // to GENERIC trees. + // For actually doing the lowering we need to compile the match + // we've just made. But we're half-way through compiling the + // original one. + // ... + // For now, let's just replace the original with the rearranged one + // we just made, and compile that instead. What could go wrong? :) + // + // FIXME: What about when we decide a temporary is needed above? + // We might have already pushed a statement for it that + // we no longer need. Probably need to rearrange the order + // of these steps. + expr = outer_match; + + scrutinee_kind = check_match_scrutinee (expr, ctx); + if (scrutinee_kind == TyTy::TypeKind::ERROR) + { + translated = error_mark_node; + return; + } + + // Now compile the scrutinee of the simplified match. + // FIXME: this part is duplicated from above. + match_scrutinee_expr + = CompileExpr::Compile (expr.get_scrutinee_expr ().get (), ctx); + + if (TyTy::is_primitive_type_kind (scrutinee_kind)) + { + match_scrutinee_expr_qualifier_expr = match_scrutinee_expr; + } + else if (scrutinee_kind == TyTy::TypeKind::ADT) + { + // need to access qualifier the field, if we use QUAL_UNION_TYPE + // this would be DECL_QUALIFIER i think. For now this will just + // access the first record field and its respective qualifier + // because it will always be set because this is all a big + // special union + tree scrutinee_first_record_expr + = ctx->get_backend ()->struct_field_expression ( + match_scrutinee_expr, 0, + expr.get_scrutinee_expr ()->get_locus ()); + match_scrutinee_expr_qualifier_expr + = ctx->get_backend ()->struct_field_expression ( + scrutinee_first_record_expr, 0, + expr.get_scrutinee_expr ()->get_locus ()); + } + else + { + // FIXME: There are other cases, but it better not be a Tuple + gcc_unreachable (); + } + } + break; + + case HIR::Expr::ExprType::Path: { + // FIXME + gcc_unreachable (); + } + break; + + default: + gcc_unreachable (); + } + } + else + { + // FIXME: match on other types of expressions not yet implemented. + gcc_unreachable (); + } + + // setup the end label so the cases can exit properly + tree fndecl = fnctx.fndecl; + Location end_label_locus = expr.get_locus (); // FIXME + tree end_label + = ctx->get_backend ()->label (fndecl, + "" /* empty creates an artificial label */, + end_label_locus); + tree end_label_decl_statement + = ctx->get_backend ()->label_definition_statement (end_label); + + // setup the switch-body-block + Location start_location; // FIXME + Location end_location; // FIXME + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree switch_body_block + = ctx->get_backend ()->block (fndecl, enclosing_scope, {}, start_location, + end_location); + ctx->push_block (switch_body_block); + + for (auto &kase : expr.get_match_cases ()) + { + // for now lets just get single pattern's working + HIR::MatchArm &kase_arm = kase.get_arm (); + rust_assert (kase_arm.get_patterns ().size () > 0); + + // generate implicit label + Location arm_locus = kase_arm.get_locus (); + tree case_label = ctx->get_backend ()->label ( + fndecl, "" /* empty creates an artificial label */, arm_locus); + + // setup the bindings for the block + for (auto &kase_pattern : kase_arm.get_patterns ()) + { + tree switch_kase_expr + = CompilePatternCaseLabelExpr::Compile (kase_pattern.get (), + case_label, ctx); + ctx->add_statement (switch_kase_expr); + + CompilePatternBindings::Compile (kase_pattern.get (), + match_scrutinee_expr, ctx); + } + + // compile the expr and setup the assignment if required when tmp != NULL + tree kase_expr_tree = CompileExpr::Compile (kase.get_expr ().get (), ctx); + if (tmp != NULL) + { + tree result_reference + = ctx->get_backend ()->var_expression (tmp, arm_locus); + tree assignment + = ctx->get_backend ()->assignment_statement (result_reference, + kase_expr_tree, + arm_locus); + ctx->add_statement (assignment); + } + + // go to end label + tree goto_end_label = build1_loc (arm_locus.gcc_location (), GOTO_EXPR, + void_type_node, end_label); + ctx->add_statement (goto_end_label); + } + + // setup the switch expression + tree match_body = ctx->pop_block (); + tree match_expr_stmt + = build2_loc (expr.get_locus ().gcc_location (), SWITCH_EXPR, + TREE_TYPE (match_scrutinee_expr_qualifier_expr), + match_scrutinee_expr_qualifier_expr, match_body); + ctx->add_statement (match_expr_stmt); + ctx->add_statement (end_label_decl_statement); + + if (tmp != NULL) + { + translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ()); + } +} + +void +CompileExpr::visit (HIR::CallExpr &expr) +{ + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type ( + expr.get_fnexpr ()->get_mappings ().get_hirid (), &tyty)) + { + rust_error_at (expr.get_locus (), "unknown type"); + return; + } + + // must be a tuple constructor + bool is_fn = tyty->get_kind () == TyTy::TypeKind::FNDEF + || tyty->get_kind () == TyTy::TypeKind::FNPTR; + bool is_adt_ctor = !is_fn; + if (is_adt_ctor) + { + rust_assert (tyty->get_kind () == TyTy::TypeKind::ADT); + TyTy::ADTType *adt = static_cast (tyty); + tree compiled_adt_type = TyTyResolveCompile::compile (ctx, tyty); + + // what variant is it? + int union_disriminator = -1; + TyTy::VariantDef *variant = nullptr; + if (!adt->is_enum ()) + { + rust_assert (adt->number_of_variants () == 1); + variant = adt->get_variants ().at (0); + } + else + { + HirId variant_id; + bool ok = ctx->get_tyctx ()->lookup_variant_definition ( + expr.get_fnexpr ()->get_mappings ().get_hirid (), &variant_id); + rust_assert (ok); + + ok = adt->lookup_variant_by_id (variant_id, &variant, + &union_disriminator); + rust_assert (ok); + } + + // this assumes all fields are in order from type resolution and if a + // base struct was specified those fields are filed via accesors + std::vector arguments; + for (size_t i = 0; i < expr.get_arguments ().size (); i++) + { + auto &argument = expr.get_arguments ().at (i); + auto rvalue = CompileExpr::Compile (argument.get (), ctx); + + // assignments are coercion sites so lets convert the rvalue if + // necessary + auto respective_field = variant->get_field_at_index (i); + auto expected = respective_field->get_field_type (); + + TyTy::BaseType *actual = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + argument->get_mappings ().get_hirid (), &actual); + rust_assert (ok); + + // coerce it if required + Location lvalue_locus + = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ()); + Location rvalue_locus = argument->get_locus (); + rvalue + = coercion_site (argument->get_mappings ().get_hirid (), rvalue, + actual, expected, lvalue_locus, rvalue_locus); + + // add it to the list + arguments.push_back (rvalue); + } + + // the constructor depends on whether this is actually an enum or not if + // its an enum we need to setup the discriminator + std::vector ctor_arguments; + if (adt->is_enum ()) + { + HIR::Expr *discrim_expr = variant->get_discriminant (); + tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx); + tree folded_discrim_expr = fold_expr (discrim_expr_node); + tree qualifier = folded_discrim_expr; + + ctor_arguments.push_back (qualifier); + } + for (auto &arg : arguments) + ctor_arguments.push_back (arg); + + translated = ctx->get_backend ()->constructor_expression ( + compiled_adt_type, adt->is_enum (), ctor_arguments, union_disriminator, + expr.get_locus ()); + + return; + } + + auto get_parameter_tyty_at_index + = [] (const TyTy::BaseType *base, size_t index, + TyTy::BaseType **result) -> bool { + bool is_fn = base->get_kind () == TyTy::TypeKind::FNDEF + || base->get_kind () == TyTy::TypeKind::FNPTR; + rust_assert (is_fn); + + if (base->get_kind () == TyTy::TypeKind::FNPTR) + { + const TyTy::FnPtr *fn = static_cast (base); + *result = fn->param_at (index); + + return true; + } + + const TyTy::FnType *fn = static_cast (base); + auto param = fn->param_at (index); + *result = param.second; + + return true; + }; + + bool is_varadic = false; + if (tyty->get_kind () == TyTy::TypeKind::FNDEF) + { + const TyTy::FnType *fn = static_cast (tyty); + is_varadic = fn->is_varadic (); + } + + size_t required_num_args; + if (tyty->get_kind () == TyTy::TypeKind::FNDEF) + { + const TyTy::FnType *fn = static_cast (tyty); + required_num_args = fn->num_params (); + } + else + { + const TyTy::FnPtr *fn = static_cast (tyty); + required_num_args = fn->num_params (); + } + + std::vector args; + for (size_t i = 0; i < expr.get_arguments ().size (); i++) + { + auto &argument = expr.get_arguments ().at (i); + auto rvalue = CompileExpr::Compile (argument.get (), ctx); + + if (is_varadic && i >= required_num_args) + { + args.push_back (rvalue); + continue; + } + + // assignments are coercion sites so lets convert the rvalue if + // necessary + bool ok; + TyTy::BaseType *expected = nullptr; + ok = get_parameter_tyty_at_index (tyty, i, &expected); + rust_assert (ok); + + TyTy::BaseType *actual = nullptr; + ok = ctx->get_tyctx ()->lookup_type ( + argument->get_mappings ().get_hirid (), &actual); + rust_assert (ok); + + // coerce it if required + Location lvalue_locus + = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ()); + Location rvalue_locus = argument->get_locus (); + rvalue = coercion_site (argument->get_mappings ().get_hirid (), rvalue, + actual, expected, lvalue_locus, rvalue_locus); + + // add it to the list + args.push_back (rvalue); + } + + // must be a call to a function + auto fn_address = CompileExpr::Compile (expr.get_fnexpr (), ctx); + translated = ctx->get_backend ()->call_expression (fn_address, args, nullptr, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::MethodCallExpr &expr) +{ + // method receiver + tree self = CompileExpr::Compile (expr.get_receiver ().get (), ctx); + + // lookup the resolved name + NodeId resolved_node_id = UNKNOWN_NODEID; + if (!ctx->get_resolver ()->lookup_resolved_name ( + expr.get_mappings ().get_nodeid (), &resolved_node_id)) + { + rust_error_at (expr.get_locus (), "failed to lookup resolved MethodCall"); + return; + } + + // reverse lookup + HirId ref; + if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref)) + { + rust_fatal_error (expr.get_locus (), "reverse lookup failure"); + return; + } + + // lookup the expected function type + TyTy::BaseType *lookup_fntype = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + expr.get_method_name ().get_mappings ().get_hirid (), &lookup_fntype); + rust_assert (ok); + rust_assert (lookup_fntype->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *fntype = static_cast (lookup_fntype); + + TyTy::BaseType *receiver = nullptr; + ok = ctx->get_tyctx ()->lookup_receiver (expr.get_mappings ().get_hirid (), + &receiver); + rust_assert (ok); + + bool is_dyn_dispatch + = receiver->get_root ()->get_kind () == TyTy::TypeKind::DYNAMIC; + bool is_generic_receiver = receiver->get_kind () == TyTy::TypeKind::PARAM; + if (is_generic_receiver) + { + TyTy::ParamType *p = static_cast (receiver); + receiver = p->resolve (); + } + + tree fn_expr = error_mark_node; + if (is_dyn_dispatch) + { + const TyTy::DynamicObjectType *dyn + = static_cast (receiver->get_root ()); + + std::vector arguments; + for (auto &arg : expr.get_arguments ()) + arguments.push_back (arg.get ()); + + fn_expr + = get_fn_addr_from_dyn (dyn, receiver, fntype, self, expr.get_locus ()); + self = get_receiver_from_dyn (dyn, receiver, fntype, self, + expr.get_locus ()); + } + else + { + // lookup compiled functions since it may have already been compiled + HIR::PathExprSegment method_name = expr.get_method_name (); + HIR::PathIdentSegment segment_name = method_name.get_segment (); + fn_expr + = resolve_method_address (fntype, ref, receiver, segment_name, + expr.get_mappings (), expr.get_locus ()); + } + + // lookup the autoderef mappings + HirId autoderef_mappings_id + = expr.get_receiver ()->get_mappings ().get_hirid (); + std::vector *adjustments = nullptr; + ok = ctx->get_tyctx ()->lookup_autoderef_mappings (autoderef_mappings_id, + &adjustments); + rust_assert (ok); + + // apply adjustments for the fn call + self = resolve_adjustements (*adjustments, self, + expr.get_receiver ()->get_locus ()); + + std::vector args; + args.push_back (self); // adjusted self + + // normal args + for (size_t i = 0; i < expr.get_arguments ().size (); i++) + { + auto &argument = expr.get_arguments ().at (i); + auto rvalue = CompileExpr::Compile (argument.get (), ctx); + + // assignments are coercion sites so lets convert the rvalue if + // necessary, offset from the already adjusted implicit self + bool ok; + TyTy::BaseType *expected = fntype->param_at (i + 1).second; + + TyTy::BaseType *actual = nullptr; + ok = ctx->get_tyctx ()->lookup_type ( + argument->get_mappings ().get_hirid (), &actual); + rust_assert (ok); + + // coerce it if required + Location lvalue_locus + = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ()); + Location rvalue_locus = argument->get_locus (); + rvalue = coercion_site (argument->get_mappings ().get_hirid (), rvalue, + actual, expected, lvalue_locus, rvalue_locus); + + // add it to the list + args.push_back (rvalue); + } + + translated = ctx->get_backend ()->call_expression (fn_expr, args, nullptr, + expr.get_locus ()); +} + +tree +CompileExpr::get_fn_addr_from_dyn (const TyTy::DynamicObjectType *dyn, + TyTy::BaseType *receiver, + TyTy::FnType *fntype, tree receiver_ref, + Location expr_locus) +{ + size_t offs = 0; + const Resolver::TraitItemReference *ref = nullptr; + for (auto &bound : dyn->get_object_items ()) + { + const Resolver::TraitItemReference *item = bound.first; + auto t = item->get_tyty (); + rust_assert (t->get_kind () == TyTy::TypeKind::FNDEF); + auto ft = static_cast (t); + + if (ft->get_id () == fntype->get_id ()) + { + ref = item; + break; + } + offs++; + } + + if (ref == nullptr) + return error_mark_node; + + // get any indirection sorted out + if (receiver->get_kind () == TyTy::TypeKind::REF) + { + tree indirect = indirect_expression (receiver_ref, expr_locus); + receiver_ref = indirect; + } + + // cast it to the correct fntype + tree expected_fntype = TyTyResolveCompile::compile (ctx, fntype, true); + tree idx = build_int_cst (size_type_node, offs); + + tree vtable_ptr + = ctx->get_backend ()->struct_field_expression (receiver_ref, 1, + expr_locus); + tree vtable_array_access = build4_loc (expr_locus.gcc_location (), ARRAY_REF, + TREE_TYPE (TREE_TYPE (vtable_ptr)), + vtable_ptr, idx, NULL_TREE, NULL_TREE); + + tree vcall + = build3_loc (expr_locus.gcc_location (), OBJ_TYPE_REF, expected_fntype, + vtable_array_access, receiver_ref, idx); + + return vcall; +} + +tree +CompileExpr::get_receiver_from_dyn (const TyTy::DynamicObjectType *dyn, + TyTy::BaseType *receiver, + TyTy::FnType *fntype, tree receiver_ref, + Location expr_locus) +{ + // get any indirection sorted out + if (receiver->get_kind () == TyTy::TypeKind::REF) + { + tree indirect = indirect_expression (receiver_ref, expr_locus); + receiver_ref = indirect; + } + + // access the offs + 1 for the fnptr and offs=0 for the reciever obj + return ctx->get_backend ()->struct_field_expression (receiver_ref, 0, + expr_locus); +} + +tree +CompileExpr::resolve_method_address (TyTy::FnType *fntype, HirId ref, + TyTy::BaseType *receiver, + HIR::PathIdentSegment &segment, + Analysis::NodeMapping expr_mappings, + Location expr_locus) +{ + // lookup compiled functions since it may have already been compiled + tree fn = NULL_TREE; + if (ctx->lookup_function_decl (fntype->get_ty_ref (), &fn)) + { + return address_expression (fn, expr_locus); + } + + // Now we can try and resolve the address since this might be a forward + // declared function, generic function which has not be compiled yet or + // its an not yet trait bound function + HIR::ImplItem *resolved_item + = ctx->get_mappings ()->lookup_hir_implitem (ref, nullptr); + if (resolved_item != nullptr) + { + if (!fntype->has_subsititions_defined ()) + return CompileInherentImplItem::Compile (resolved_item, ctx); + + return CompileInherentImplItem::Compile (resolved_item, ctx, fntype); + } + + // it might be resolved to a trait item + HIR::TraitItem *trait_item + = ctx->get_mappings ()->lookup_hir_trait_item (ref); + HIR::Trait *trait = ctx->get_mappings ()->lookup_trait_item_mapping ( + trait_item->get_mappings ().get_hirid ()); + + Resolver::TraitReference *trait_ref + = &Resolver::TraitReference::error_node (); + bool ok = ctx->get_tyctx ()->lookup_trait_reference ( + trait->get_mappings ().get_defid (), &trait_ref); + rust_assert (ok); + + // the type resolver can only resolve type bounds to their trait + // item so its up to us to figure out if this path should resolve + // to an trait-impl-block-item or if it can be defaulted to the + // trait-impl-item's definition + + auto root = receiver->get_root (); + std::vector candidates + = Resolver::PathProbeType::Probe (root, segment, true /* probe_impls */, + false /* probe_bounds */, + true /* ignore_mandatory_trait_items */); + if (candidates.size () == 0) + { + // this means we are defaulting back to the trait_item if + // possible + Resolver::TraitItemReference *trait_item_ref = nullptr; + bool ok = trait_ref->lookup_hir_trait_item (*trait_item, &trait_item_ref); + rust_assert (ok); // found + rust_assert (trait_item_ref->is_optional ()); // has definition + + // FIXME Optional means it has a definition and an associated + // block which can be a default implementation, if it does not + // contain an implementation we should actually return + // error_mark_node + + return CompileTraitItem::Compile (trait_item_ref->get_hir_trait_item (), + ctx, fntype, true, expr_locus); + } + else + { + // FIXME this will be a case to return error_mark_node, there is + // an error scenario where a Trait Foo has a method Bar, but this + // receiver does not implement this trait or has an incompatible + // implementation and we should just return error_mark_node + + rust_assert (candidates.size () == 1); + auto &candidate = candidates.at (0); + rust_assert (candidate.is_impl_candidate ()); + rust_assert (candidate.ty->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *candidate_call = static_cast (candidate.ty); + + HIR::ImplItem *impl_item = candidate.item.impl.impl_item; + if (!candidate_call->has_subsititions_defined ()) + return CompileInherentImplItem::Compile (impl_item, ctx); + + TyTy::BaseType *monomorphized = candidate_call; + if (candidate_call->needs_generic_substitutions ()) + { + TyTy::BaseType *infer_impl_call + = candidate_call->infer_substitions (expr_locus); + monomorphized = infer_impl_call->unify (fntype); + } + + return CompileInherentImplItem::Compile (impl_item, ctx, monomorphized); + } +} + +tree +CompileExpr::resolve_operator_overload ( + Analysis::RustLangItem::ItemType lang_item_type, HIR::OperatorExprMeta expr, + tree lhs, tree rhs, HIR::Expr *lhs_expr, HIR::Expr *rhs_expr) +{ + TyTy::FnType *fntype; + bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload ( + expr.get_mappings ().get_hirid (), &fntype); + rust_assert (is_op_overload); + + // lookup the resolved name + NodeId resolved_node_id = UNKNOWN_NODEID; + bool ok = ctx->get_resolver ()->lookup_resolved_name ( + expr.get_mappings ().get_nodeid (), &resolved_node_id); + rust_assert (ok); + + // reverse lookup + HirId ref; + ok = ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref); + rust_assert (ok); + + TyTy::BaseType *receiver = nullptr; + ok = ctx->get_tyctx ()->lookup_receiver (expr.get_mappings ().get_hirid (), + &receiver); + rust_assert (ok); + + bool is_generic_receiver = receiver->get_kind () == TyTy::TypeKind::PARAM; + if (is_generic_receiver) + { + TyTy::ParamType *p = static_cast (receiver); + receiver = p->resolve (); + } + + // lookup compiled functions since it may have already been compiled + HIR::PathIdentSegment segment_name ( + Analysis::RustLangItem::ToString (lang_item_type)); + tree fn_expr + = resolve_method_address (fntype, ref, receiver, segment_name, + expr.get_mappings (), expr.get_locus ()); + + // lookup the autoderef mappings + std::vector *adjustments = nullptr; + ok = ctx->get_tyctx ()->lookup_autoderef_mappings ( + expr.get_lvalue_mappings ().get_hirid (), &adjustments); + rust_assert (ok); + + // apply adjustments for the fn call + tree self = resolve_adjustements (*adjustments, lhs, lhs_expr->get_locus ()); + + std::vector args; + args.push_back (self); // adjusted self + if (rhs != nullptr) // can be null for negation_expr (unary ones) + args.push_back (rhs); + + return ctx->get_backend ()->call_expression (fn_expr, args, nullptr, + expr.get_locus ()); +} + +tree +CompileExpr::compile_bool_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty) +{ + rust_assert (expr.get_lit_type () == HIR::Literal::BOOL); + + const auto literal_value = expr.get_literal (); + bool bval = literal_value.as_string ().compare ("true") == 0; + return ctx->get_backend ()->boolean_constant_expression (bval); +} + +tree +CompileExpr::compile_integer_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty) +{ + rust_assert (expr.get_lit_type () == HIR::Literal::INT); + const auto literal_value = expr.get_literal (); + + tree type = TyTyResolveCompile::compile (ctx, tyty); + + mpz_t ival; + if (mpz_init_set_str (ival, literal_value.as_string ().c_str (), 10) != 0) + { + rust_error_at (expr.get_locus (), "bad number in literal"); + return error_mark_node; + } + + mpz_t type_min; + mpz_t type_max; + mpz_init (type_min); + mpz_init (type_max); + get_type_static_bounds (type, type_min, type_max); + + if (mpz_cmp (ival, type_min) < 0 || mpz_cmp (ival, type_max) > 0) + { + rust_error_at (expr.get_locus (), + "integer overflows the respective type %<%s%>", + tyty->get_name ().c_str ()); + return error_mark_node; + } + return double_int_to_tree (type, mpz_get_double_int (type, ival, true)); +} + +tree +CompileExpr::compile_float_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty) +{ + rust_assert (expr.get_lit_type () == HIR::Literal::FLOAT); + const auto literal_value = expr.get_literal (); + + mpfr_t fval; + if (mpfr_init_set_str (fval, literal_value.as_string ().c_str (), 10, + MPFR_RNDN) + != 0) + { + rust_error_at (expr.get_locus (), "bad number in literal"); + return error_mark_node; + } + + tree type = TyTyResolveCompile::compile (ctx, tyty); + + // taken from: + // see go/gofrontend/expressions.cc:check_float_type + mpfr_exp_t exp = mpfr_get_exp (fval); + bool real_value_overflow = exp > TYPE_PRECISION (type); + + REAL_VALUE_TYPE r1; + real_from_mpfr (&r1, fval, type, GMP_RNDN); + REAL_VALUE_TYPE r2; + real_convert (&r2, TYPE_MODE (type), &r1); + + tree real_value = build_real (type, r2); + if (TREE_OVERFLOW (real_value) || real_value_overflow) + { + rust_error_at (expr.get_locus (), + "decimal overflows the respective type %<%s%>", + tyty->get_name ().c_str ()); + return error_mark_node; + } + + return real_value; +} + +tree +CompileExpr::compile_char_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty) +{ + rust_assert (expr.get_lit_type () == HIR::Literal::CHAR); + const auto literal_value = expr.get_literal (); + + // FIXME needs wchar_t + char c = literal_value.as_string ().c_str ()[0]; + return ctx->get_backend ()->wchar_constant_expression (c); +} + +tree +CompileExpr::compile_byte_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty) +{ + rust_assert (expr.get_lit_type () == HIR::Literal::BYTE); + const auto literal_value = expr.get_literal (); + + tree type = TyTyResolveCompile::compile (ctx, tyty); + char c = literal_value.as_string ().c_str ()[0]; + return build_int_cst (type, c); +} + +tree +CompileExpr::compile_string_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty) +{ + tree fat_pointer = TyTyResolveCompile::compile (ctx, tyty); + + rust_assert (expr.get_lit_type () == HIR::Literal::STRING); + const auto literal_value = expr.get_literal (); + + auto base = ctx->get_backend ()->string_constant_expression ( + literal_value.as_string ()); + tree data = address_expression (base, expr.get_locus ()); + + TyTy::BaseType *usize = nullptr; + bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize); + rust_assert (ok); + tree type = TyTyResolveCompile::compile (ctx, usize); + + mpz_t ival; + mpz_init_set_ui (ival, literal_value.as_string ().size ()); + tree size = double_int_to_tree (type, mpz_get_double_int (type, ival, true)); + + return ctx->get_backend ()->constructor_expression (fat_pointer, false, + {data, size}, -1, + expr.get_locus ()); +} + +tree +CompileExpr::compile_byte_string_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty) +{ + rust_assert (expr.get_lit_type () == HIR::Literal::BYTE_STRING); + + // the type here is &[ty; capacity] + rust_assert (tyty->get_kind () == TyTy::TypeKind::REF); + const auto ref_tyty = static_cast (tyty); + auto base_tyty = ref_tyty->get_base (); + rust_assert (base_tyty->get_kind () == TyTy::TypeKind::ARRAY); + auto array_tyty = static_cast (base_tyty); + + std::string value_str = expr.get_literal ().as_string (); + std::vector vals; + std::vector indexes; + for (size_t i = 0; i < value_str.size (); i++) + { + char b = value_str.at (i); + tree bb = ctx->get_backend ()->char_constant_expression (b); + vals.push_back (bb); + indexes.push_back (i); + } + + tree array_type = TyTyResolveCompile::compile (ctx, array_tyty); + tree constructed + = ctx->get_backend ()->array_constructor_expression (array_type, indexes, + vals, + expr.get_locus ()); + + return address_expression (constructed, expr.get_locus ()); +} + +tree +CompileExpr::type_cast_expression (tree type_to_cast_to, tree expr_tree, + Location location) +{ + if (type_to_cast_to == error_mark_node || expr_tree == error_mark_node + || TREE_TYPE (expr_tree) == error_mark_node) + return error_mark_node; + + if (ctx->get_backend ()->type_size (type_to_cast_to) == 0 + || TREE_TYPE (expr_tree) == void_type_node) + { + // Do not convert zero-sized types. + return expr_tree; + } + else if (TREE_CODE (type_to_cast_to) == INTEGER_TYPE) + { + tree cast = fold (convert_to_integer (type_to_cast_to, expr_tree)); + // FIXME check for TREE_OVERFLOW? + return cast; + } + else if (TREE_CODE (type_to_cast_to) == REAL_TYPE) + { + tree cast = fold (convert_to_real (type_to_cast_to, expr_tree)); + // FIXME + // We might need to check that the tree is MAX val and thusly saturate it + // to inf. we can get the bounds and check the value if its >= or <= to + // the min and max bounds + // + // https://github.com/Rust-GCC/gccrs/issues/635 + return cast; + } + else if (TREE_CODE (type_to_cast_to) == COMPLEX_TYPE) + { + return fold (convert_to_complex (type_to_cast_to, expr_tree)); + } + else if (TREE_CODE (type_to_cast_to) == POINTER_TYPE + && TREE_CODE (TREE_TYPE (expr_tree)) == INTEGER_TYPE) + { + return fold (convert_to_pointer (type_to_cast_to, expr_tree)); + } + else if (TREE_CODE (type_to_cast_to) == RECORD_TYPE + || TREE_CODE (type_to_cast_to) == ARRAY_TYPE) + { + return fold_build1_loc (location.gcc_location (), VIEW_CONVERT_EXPR, + type_to_cast_to, expr_tree); + } + else if (TREE_CODE (type_to_cast_to) == POINTER_TYPE + && SLICE_TYPE_P (TREE_TYPE (expr_tree))) + { + // returning a raw cast using NOP_EXPR seems to resut in an ICE: + // + // Analyzing compilation unit + // Performing interprocedural optimizations + // <*free_lang_data> {heap 2644k} {heap 2644k} + // {heap 2644k} {heap 2644k}during + // GIMPLE pass: cddce + // In function ‘*T::as_ptr’: + // rust1: internal compiler error: in propagate_necessity, at + // tree-ssa-dce.cc:984 0x1d5b43e propagate_necessity + // ../../gccrs/gcc/tree-ssa-dce.cc:984 + // 0x1d5e180 perform_tree_ssa_dce + // ../../gccrs/gcc/tree-ssa-dce.cc:1876 + // 0x1d5e2c8 tree_ssa_cd_dce + // ../../gccrs/gcc/tree-ssa-dce.cc:1920 + // 0x1d5e49a execute + // ../../gccrs/gcc/tree-ssa-dce.cc:1992 + + // this is returning the direct raw pointer of the slice an assumes a very + // specific layout + return ctx->get_backend ()->struct_field_expression (expr_tree, 0, + location); + } + + return fold_convert_loc (location.gcc_location (), type_to_cast_to, + expr_tree); +} + +void +CompileExpr::visit (HIR::ArrayExpr &expr) +{ + TyTy::BaseType *tyty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), + &tyty)) + { + rust_fatal_error (expr.get_locus (), + "did not resolve type for this array expr"); + return; + } + + tree array_type = TyTyResolveCompile::compile (ctx, tyty); + if (TREE_CODE (array_type) != ARRAY_TYPE) + { + translated = error_mark_node; + return; + } + + rust_assert (tyty->get_kind () == TyTy::TypeKind::ARRAY); + const TyTy::ArrayType &array_tyty + = static_cast (*tyty); + + HIR::ArrayElems &elements = *expr.get_internal_elements (); + switch (elements.get_array_expr_type ()) + { + case HIR::ArrayElems::ArrayExprType::VALUES: { + HIR::ArrayElemsValues &elems + = static_cast (elements); + translated + = array_value_expr (expr.get_locus (), array_tyty, array_type, elems); + } + return; + + case HIR::ArrayElems::ArrayExprType::COPIED: + HIR::ArrayElemsCopied &elems + = static_cast (elements); + translated + = array_copied_expr (expr.get_locus (), array_tyty, array_type, elems); + } +} + +tree +CompileExpr::array_value_expr (Location expr_locus, + const TyTy::ArrayType &array_tyty, + tree array_type, HIR::ArrayElemsValues &elems) +{ + std::vector indexes; + std::vector constructor; + size_t i = 0; + for (auto &elem : elems.get_values ()) + { + tree translated_expr = CompileExpr::Compile (elem.get (), ctx); + constructor.push_back (translated_expr); + indexes.push_back (i++); + } + + return ctx->get_backend ()->array_constructor_expression (array_type, indexes, + constructor, + expr_locus); +} + +tree +CompileExpr::array_copied_expr (Location expr_locus, + const TyTy::ArrayType &array_tyty, + tree array_type, HIR::ArrayElemsCopied &elems) +{ + // see gcc/cp/typeck2.cc:1369-1401 + gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE); + tree domain = TYPE_DOMAIN (array_type); + if (!domain) + return error_mark_node; + + if (!TREE_CONSTANT (TYPE_MAX_VALUE (domain))) + { + rust_error_at (expr_locus, "non const capacity domain %qT", array_type); + return error_mark_node; + } + + tree capacity_expr = CompileExpr::Compile (elems.get_num_copies_expr (), ctx); + if (!TREE_CONSTANT (capacity_expr)) + { + rust_error_at (expr_locus, "non const num copies %qT", array_type); + return error_mark_node; + } + + // get the compiled value + tree translated_expr = CompileExpr::Compile (elems.get_elem_to_copy (), ctx); + + tree max_domain = TYPE_MAX_VALUE (domain); + tree min_domain = TYPE_MIN_VALUE (domain); + + auto max = wi::to_offset (max_domain); + auto min = wi::to_offset (min_domain); + auto precision = TYPE_PRECISION (TREE_TYPE (domain)); + auto sign = TYPE_SIGN (TREE_TYPE (domain)); + unsigned HOST_WIDE_INT len + = wi::ext (max - min + 1, precision, sign).to_uhwi (); + + // In a const context we must initialize the entire array, which entails + // allocating for each element. If the user wants a huge array, we will OOM + // and die horribly. + if (ctx->const_context_p ()) + { + size_t idx = 0; + std::vector indexes; + std::vector constructor; + for (unsigned HOST_WIDE_INT i = 0; i < len; i++) + { + constructor.push_back (translated_expr); + indexes.push_back (idx++); + } + + return ctx->get_backend ()->array_constructor_expression (array_type, + indexes, + constructor, + expr_locus); + } + + else + { + // Create a new block scope in which to initialize the array + tree fndecl = NULL_TREE; + if (ctx->in_fn ()) + fndecl = ctx->peek_fn ().fndecl; + + std::vector locals; + tree enclosing_scope = ctx->peek_enclosing_scope (); + tree init_block + = ctx->get_backend ()->block (fndecl, enclosing_scope, locals, + expr_locus, expr_locus); + ctx->push_block (init_block); + + tree tmp; + tree stmts + = ctx->get_backend ()->array_initializer (fndecl, init_block, + array_type, capacity_expr, + translated_expr, &tmp, + expr_locus); + ctx->add_statement (stmts); + + tree block = ctx->pop_block (); + + // The result is a compound expression which creates a temporary array, + // initializes all the elements in a loop, and then yeilds the array. + return ctx->get_backend ()->compound_expression (block, tmp, expr_locus); + } +} + +tree +HIRCompileBase::resolve_adjustements ( + std::vector &adjustments, tree expression, + Location locus) +{ + tree e = expression; + for (auto &adjustment : adjustments) + { + switch (adjustment.get_type ()) + { + case Resolver::Adjustment::AdjustmentType::ERROR: + return error_mark_node; + + case Resolver::Adjustment::AdjustmentType::IMM_REF: + case Resolver::Adjustment::AdjustmentType::MUT_REF: { + if (!SLICE_TYPE_P (TREE_TYPE (e))) + { + e = address_expression (e, locus); + } + } + break; + + case Resolver::Adjustment::AdjustmentType::DEREF: + case Resolver::Adjustment::AdjustmentType::DEREF_MUT: + e = resolve_deref_adjustment (adjustment, e, locus); + break; + + case Resolver::Adjustment::AdjustmentType::INDIRECTION: + e = resolve_indirection_adjustment (adjustment, e, locus); + break; + + case Resolver::Adjustment::AdjustmentType::UNSIZE: + e = resolve_unsized_adjustment (adjustment, e, locus); + break; + } + } + + return e; +} + +tree +HIRCompileBase::resolve_deref_adjustment (Resolver::Adjustment &adjustment, + tree expression, Location locus) +{ + rust_assert (adjustment.is_deref_adjustment () + || adjustment.is_deref_mut_adjustment ()); + rust_assert (adjustment.has_operator_overload ()); + + TyTy::FnType *lookup = adjustment.get_deref_operator_fn (); + HIR::ImplItem *resolved_item = adjustment.get_deref_hir_item (); + + tree fn_address = error_mark_node; + if (!lookup->has_subsititions_defined ()) + fn_address = CompileInherentImplItem::Compile (resolved_item, ctx, nullptr, + true, locus); + else + fn_address = CompileInherentImplItem::Compile (resolved_item, ctx, lookup, + true, locus); + + // does it need a reference to call + tree adjusted_argument = expression; + bool needs_borrow = adjustment.get_deref_adjustment_type () + != Resolver::Adjustment::AdjustmentType::ERROR; + if (needs_borrow) + { + adjusted_argument = address_expression (expression, locus); + } + + // make the call + return ctx->get_backend ()->call_expression (fn_address, {adjusted_argument}, + nullptr, locus); +} + +tree +HIRCompileBase::resolve_indirection_adjustment ( + Resolver::Adjustment &adjustment, tree expression, Location locus) +{ + return indirect_expression (expression, locus); +} + +tree +HIRCompileBase::resolve_unsized_adjustment (Resolver::Adjustment &adjustment, + tree expression, Location locus) +{ + bool expect_slice + = adjustment.get_expected ()->get_kind () == TyTy::TypeKind::SLICE; + bool expect_dyn + = adjustment.get_expected ()->get_kind () == TyTy::TypeKind::DYNAMIC; + + // assumes this is an array + tree expr_type = TREE_TYPE (expression); + if (expect_slice) + { + rust_assert (TREE_CODE (expr_type) == ARRAY_TYPE); + return resolve_unsized_slice_adjustment (adjustment, expression, locus); + } + + rust_assert (expect_dyn); + return resolve_unsized_dyn_adjustment (adjustment, expression, locus); +} + +tree +HIRCompileBase::resolve_unsized_slice_adjustment ( + Resolver::Adjustment &adjustment, tree expression, Location locus) +{ + // assumes this is an array + tree expr_type = TREE_TYPE (expression); + rust_assert (TREE_CODE (expr_type) == ARRAY_TYPE); + + // takes an array and returns a fat-pointer so this becomes a constructor + // expression + rust_assert (adjustment.get_expected ()->get_kind () + == TyTy::TypeKind::SLICE); + tree fat_pointer + = TyTyResolveCompile::compile (ctx, adjustment.get_expected ()); + + // make a constructor for this + tree data = address_expression (expression, locus); + + // fetch the size from the domain + tree domain = TYPE_DOMAIN (expr_type); + unsigned HOST_WIDE_INT array_size + = wi::ext (wi::to_offset (TYPE_MAX_VALUE (domain)) + - wi::to_offset (TYPE_MIN_VALUE (domain)) + 1, + TYPE_PRECISION (TREE_TYPE (domain)), + TYPE_SIGN (TREE_TYPE (domain))) + .to_uhwi (); + tree size = build_int_cst (size_type_node, array_size); + + return ctx->get_backend ()->constructor_expression (fat_pointer, false, + {data, size}, -1, locus); +} + +tree +HIRCompileBase::resolve_unsized_dyn_adjustment ( + Resolver::Adjustment &adjustment, tree expression, Location locus) +{ + tree rvalue = expression; + Location rvalue_locus = locus; + + const TyTy::BaseType *actual = adjustment.get_actual (); + const TyTy::BaseType *expected = adjustment.get_expected (); + + const TyTy::DynamicObjectType *dyn + = static_cast (expected); + + rust_debug ("resolve_unsized_dyn_adjustment actual={%s} dyn={%s}", + actual->debug_str ().c_str (), dyn->debug_str ().c_str ()); + + return coerce_to_dyn_object (rvalue, actual, dyn, rvalue_locus); +} + +void +CompileExpr::visit (HIR::RangeFromToExpr &expr) +{ + tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx); + tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx); + if (from == error_mark_node || to == error_mark_node) + { + translated = error_mark_node; + return; + } + + TyTy::BaseType *tyty = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty); + rust_assert (ok); + + tree adt = TyTyResolveCompile::compile (ctx, tyty); + + // make the constructor + translated + = ctx->get_backend ()->constructor_expression (adt, false, {from, to}, -1, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::RangeFromExpr &expr) +{ + tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx); + if (from == error_mark_node) + { + translated = error_mark_node; + return; + } + + TyTy::BaseType *tyty = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty); + rust_assert (ok); + + tree adt = TyTyResolveCompile::compile (ctx, tyty); + + // make the constructor + translated + = ctx->get_backend ()->constructor_expression (adt, false, {from}, -1, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::RangeToExpr &expr) +{ + tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx); + if (to == error_mark_node) + { + translated = error_mark_node; + return; + } + + TyTy::BaseType *tyty = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty); + rust_assert (ok); + + tree adt = TyTyResolveCompile::compile (ctx, tyty); + + // make the constructor + translated + = ctx->get_backend ()->constructor_expression (adt, false, {to}, -1, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::RangeFullExpr &expr) +{ + TyTy::BaseType *tyty = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty); + rust_assert (ok); + + tree adt = TyTyResolveCompile::compile (ctx, tyty); + translated = ctx->get_backend ()->constructor_expression (adt, false, {}, -1, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::RangeFromToInclExpr &expr) +{ + tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx); + tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx); + if (from == error_mark_node || to == error_mark_node) + { + translated = error_mark_node; + return; + } + + TyTy::BaseType *tyty = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty); + rust_assert (ok); + + tree adt = TyTyResolveCompile::compile (ctx, tyty); + + // make the constructor + translated + = ctx->get_backend ()->constructor_expression (adt, false, {from, to}, -1, + expr.get_locus ()); +} + +void +CompileExpr::visit (HIR::ArrayIndexExpr &expr) +{ + tree array_reference = CompileExpr::Compile (expr.get_array_expr (), ctx); + tree index = CompileExpr::Compile (expr.get_index_expr (), ctx); + + // this might be an core::ops::index lang item situation + TyTy::FnType *fntype; + bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload ( + expr.get_mappings ().get_hirid (), &fntype); + if (is_op_overload) + { + auto lang_item_type = Analysis::RustLangItem::ItemType::INDEX; + tree operator_overload_call + = resolve_operator_overload (lang_item_type, expr, array_reference, + index, expr.get_array_expr (), + expr.get_index_expr ()); + + tree actual_type = TREE_TYPE (operator_overload_call); + bool can_indirect = TYPE_PTR_P (actual_type) || TYPE_REF_P (actual_type); + if (!can_indirect) + { + // nothing to do + translated = operator_overload_call; + return; + } + + // rust deref always returns a reference from this overload then we can + // actually do the indirection + translated + = indirect_expression (operator_overload_call, expr.get_locus ()); + return; + } + + // lets check if the array is a reference type then we can add an + // indirection if required + TyTy::BaseType *array_expr_ty = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + expr.get_array_expr ()->get_mappings ().get_hirid (), &array_expr_ty); + rust_assert (ok); + + // do we need to add an indirect reference + if (array_expr_ty->get_kind () == TyTy::TypeKind::REF) + { + array_reference + = indirect_expression (array_reference, expr.get_locus ()); + } + + translated + = ctx->get_backend ()->array_index_expression (array_reference, index, + expr.get_locus ()); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-expr.h b/gcc/rust/backend/rust-compile-expr.h new file mode 100644 index 00000000000..4c1f95ade29 --- /dev/null +++ b/gcc/rust/backend/rust-compile-expr.h @@ -0,0 +1,148 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_EXPR +#define RUST_COMPILE_EXPR + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class CompileExpr : private HIRCompileBase, protected HIR::HIRExpressionVisitor +{ +public: + static tree Compile (HIR::Expr *expr, Context *ctx); + + void visit (HIR::TupleIndexExpr &expr) override; + void visit (HIR::TupleExpr &expr) override; + void visit (HIR::ReturnExpr &expr) override; + void visit (HIR::CallExpr &expr) override; + void visit (HIR::MethodCallExpr &expr) override; + void visit (HIR::LiteralExpr &expr) override; + void visit (HIR::AssignmentExpr &expr) override; + void visit (HIR::CompoundAssignmentExpr &expr) override; + void visit (HIR::ArrayIndexExpr &expr) override; + void visit (HIR::ArrayExpr &expr) override; + void visit (HIR::ArithmeticOrLogicalExpr &expr) override; + void visit (HIR::ComparisonExpr &expr) override; + void visit (HIR::LazyBooleanExpr &expr) override; + void visit (HIR::NegationExpr &expr) override; + void visit (HIR::TypeCastExpr &expr) override; + void visit (HIR::IfExpr &expr) override; + void visit (HIR::IfExprConseqIf &expr) override; + void visit (HIR::IfExprConseqElse &expr) override; + void visit (HIR::BlockExpr &expr) override; + void visit (HIR::UnsafeBlockExpr &expr) override; + void visit (HIR::StructExprStruct &struct_expr) override; + void visit (HIR::StructExprStructFields &struct_expr) override; + void visit (HIR::GroupedExpr &expr) override; + void visit (HIR::FieldAccessExpr &expr) override; + void visit (HIR::QualifiedPathInExpression &expr) override; + void visit (HIR::PathInExpression &expr) override; + void visit (HIR::LoopExpr &expr) override; + void visit (HIR::WhileLoopExpr &expr) override; + void visit (HIR::BreakExpr &expr) override; + void visit (HIR::ContinueExpr &expr) override; + void visit (HIR::BorrowExpr &expr) override; + void visit (HIR::DereferenceExpr &expr) override; + void visit (HIR::MatchExpr &expr) override; + void visit (HIR::RangeFromToExpr &expr) override; + void visit (HIR::RangeFromExpr &expr) override; + void visit (HIR::RangeToExpr &expr) override; + void visit (HIR::RangeFullExpr &expr) override; + void visit (HIR::RangeFromToInclExpr &expr) override; + + // Empty visit for unused Expression HIR nodes. + void visit (HIR::ClosureExprInner &) override {} + void visit (HIR::ClosureExprInnerTyped &) override {} + void visit (HIR::StructExprFieldIdentifier &) override {} + void visit (HIR::StructExprFieldIdentifierValue &) override {} + void visit (HIR::StructExprFieldIndexValue &) override {} + void visit (HIR::ErrorPropagationExpr &) override {} + void visit (HIR::RangeToInclExpr &) override {} + void visit (HIR::WhileLetLoopExpr &) override {} + void visit (HIR::ForLoopExpr &) override {} + void visit (HIR::IfExprConseqIfLet &) override {} + void visit (HIR::IfLetExpr &) override {} + void visit (HIR::IfLetExprConseqElse &) override {} + void visit (HIR::IfLetExprConseqIf &) override {} + void visit (HIR::IfLetExprConseqIfLet &) override {} + void visit (HIR::AwaitExpr &) override {} + void visit (HIR::AsyncBlockExpr &) override {} + +protected: + tree get_fn_addr_from_dyn (const TyTy::DynamicObjectType *dyn, + TyTy::BaseType *receiver, TyTy::FnType *fntype, + tree receiver_ref, Location expr_locus); + + tree get_receiver_from_dyn (const TyTy::DynamicObjectType *dyn, + TyTy::BaseType *receiver, TyTy::FnType *fntype, + tree receiver_ref, Location expr_locus); + + tree resolve_method_address (TyTy::FnType *fntype, HirId ref, + TyTy::BaseType *receiver, + HIR::PathIdentSegment &segment, + Analysis::NodeMapping expr_mappings, + Location expr_locus); + + tree + resolve_operator_overload (Analysis::RustLangItem::ItemType lang_item_type, + HIR::OperatorExprMeta expr, tree lhs, tree rhs, + HIR::Expr *lhs_expr, HIR::Expr *rhs_expr); + + tree compile_bool_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty); + + tree compile_integer_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty); + + tree compile_float_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty); + + tree compile_char_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty); + + tree compile_byte_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty); + + tree compile_string_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty); + + tree compile_byte_string_literal (const HIR::LiteralExpr &expr, + const TyTy::BaseType *tyty); + + tree type_cast_expression (tree type_to_cast_to, tree expr, Location locus); + + tree array_value_expr (Location expr_locus, const TyTy::ArrayType &array_tyty, + tree array_type, HIR::ArrayElemsValues &elems); + + tree array_copied_expr (Location expr_locus, + const TyTy::ArrayType &array_tyty, tree array_type, + HIR::ArrayElemsCopied &elems); + +private: + CompileExpr (Context *ctx); + + tree translated; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_EXPR diff --git a/gcc/rust/backend/rust-compile-extern.h b/gcc/rust/backend/rust-compile-extern.h new file mode 100644 index 00000000000..45a507e03be --- /dev/null +++ b/gcc/rust/backend/rust-compile-extern.h @@ -0,0 +1,172 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_EXTERN_ITEM +#define RUST_COMPILE_EXTERN_ITEM + +#include "rust-compile-base.h" +#include "rust-compile-intrinsic.h" + +namespace Rust { +namespace Compile { + +class CompileExternItem : public HIRCompileBase, + public HIR::HIRExternalItemVisitor +{ +public: + static tree compile (HIR::ExternalItem *item, Context *ctx, + TyTy::BaseType *concrete = nullptr, + bool is_query_mode = false, + Location ref_locus = Location ()) + { + CompileExternItem compiler (ctx, concrete, ref_locus); + item->accept_vis (compiler); + + if (is_query_mode && compiler.reference == error_mark_node) + rust_internal_error_at (ref_locus, "failed to compile extern item: %s", + item->as_string ().c_str ()); + + return compiler.reference; + } + + void visit (HIR::ExternalStaticItem &item) override + { + // check if its already been compiled + Bvariable *lookup = ctx->get_backend ()->error_variable (); + if (ctx->lookup_var_decl (item.get_mappings ().get_hirid (), &lookup)) + { + reference = ctx->get_backend ()->var_expression (lookup, ref_locus); + return; + } + + TyTy::BaseType *resolved_type = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type (item.get_mappings ().get_hirid (), + &resolved_type); + rust_assert (ok); + + std::string name = item.get_item_name (); + // FIXME this is assuming C ABI + std::string asm_name = name; + + tree type = TyTyResolveCompile::compile (ctx, resolved_type); + bool is_external = true; + bool is_hidden = false; + bool in_unique_section = false; + + Bvariable *static_global + = ctx->get_backend ()->global_variable (name, asm_name, type, is_external, + is_hidden, in_unique_section, + item.get_locus ()); + ctx->insert_var_decl (item.get_mappings ().get_hirid (), static_global); + ctx->push_var (static_global); + + reference = ctx->get_backend ()->var_expression (static_global, ref_locus); + } + + void visit (HIR::ExternalFunctionItem &function) override + { + TyTy::BaseType *fntype_tyty; + if (!ctx->get_tyctx ()->lookup_type (function.get_mappings ().get_hirid (), + &fntype_tyty)) + { + rust_fatal_error (function.get_locus (), + "failed to lookup function type"); + return; + } + + rust_assert (fntype_tyty->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *fntype = static_cast (fntype_tyty); + if (fntype->has_subsititions_defined ()) + { + // we cant do anything for this only when it is used and a concrete type + // is given + if (concrete == nullptr) + return; + else + { + rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF); + fntype = static_cast (concrete); + } + } + + // items can be forward compiled which means we may not need to invoke this + // code. We might also have already compiled this generic function as well. + tree lookup = NULL_TREE; + if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup, + fntype->get_id (), fntype)) + { + reference = address_expression (lookup, ref_locus); + return; + } + + if (fntype->has_subsititions_defined ()) + { + // override the Hir Lookups for the substituions in this context + fntype->override_context (); + } + + if (fntype->get_abi () == ABI::INTRINSIC) + { + Intrinsics compile (ctx); + tree fndecl = compile.compile (fntype); + ctx->insert_function_decl (fntype, fndecl); + return; + } + + tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype); + std::string ir_symbol_name = function.get_item_name (); + std::string asm_name = function.get_item_name (); + if (fntype->get_abi () == ABI::RUST) + { + // then we need to get the canonical path of it and mangle it + const Resolver::CanonicalPath *canonical_path = nullptr; + bool ok = ctx->get_mappings ()->lookup_canonical_path ( + function.get_mappings ().get_nodeid (), &canonical_path); + rust_assert (ok); + + ir_symbol_name = canonical_path->get () + fntype->subst_as_string (); + asm_name = ctx->mangle_item (fntype, *canonical_path); + } + + const unsigned int flags = Backend::function_is_declaration; + tree fndecl + = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name, + asm_name, flags, function.get_locus ()); + TREE_PUBLIC (fndecl) = 1; + setup_abi_options (fndecl, fntype->get_abi ()); + + ctx->insert_function_decl (fntype, fndecl); + + reference = address_expression (fndecl, ref_locus); + } + +private: + CompileExternItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus) + : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node), + ref_locus (ref_locus) + {} + + TyTy::BaseType *concrete; + tree reference; + Location ref_locus; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_EXTERN_ITEM diff --git a/gcc/rust/backend/rust-compile-fnparam.cc b/gcc/rust/backend/rust-compile-fnparam.cc new file mode 100644 index 00000000000..3f0ec82b625 --- /dev/null +++ b/gcc/rust/backend/rust-compile-fnparam.cc @@ -0,0 +1,121 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-fnparam.h" +#include "rust-compile-pattern.h" + +#include "gimple-expr.h" + +namespace Rust { +namespace Compile { + +CompileFnParam::CompileFnParam (Context *ctx, tree fndecl, tree decl_type, + Location locus) + : HIRCompileBase (ctx), fndecl (fndecl), decl_type (decl_type), locus (locus), + compiled_param (ctx->get_backend ()->error_variable ()) +{} + +Bvariable * +CompileFnParam::compile (Context *ctx, tree fndecl, HIR::FunctionParam *param, + tree decl_type, Location locus) +{ + CompileFnParam compiler (ctx, fndecl, decl_type, locus); + param->get_param_name ()->accept_vis (compiler); + return compiler.compiled_param; +} + +Bvariable * +CompileFnParam::compile (Context *ctx, tree fndecl, HIR::Pattern *param, + tree decl_type, Location locus) +{ + CompileFnParam compiler (ctx, fndecl, decl_type, locus); + param->accept_vis (compiler); + return compiler.compiled_param; +} + +void +CompileFnParam::visit (HIR::IdentifierPattern &pattern) +{ + if (!pattern.is_mut ()) + decl_type = ctx->get_backend ()->immutable_type (decl_type); + + compiled_param + = ctx->get_backend ()->parameter_variable (fndecl, + pattern.get_identifier (), + decl_type, locus); +} + +void +CompileFnParam::visit (HIR::WildcardPattern &pattern) +{ + decl_type = ctx->get_backend ()->immutable_type (decl_type); + + compiled_param + = ctx->get_backend ()->parameter_variable (fndecl, "_", decl_type, locus); +} + +void +CompileFnParam::visit (HIR::StructPattern &pattern) +{ + // generate the anon param + tree tmp_ident = create_tmp_var_name ("RSTPRM"); + std::string cpp_str_identifier = std::string (IDENTIFIER_POINTER (tmp_ident)); + + decl_type = ctx->get_backend ()->immutable_type (decl_type); + compiled_param + = ctx->get_backend ()->parameter_variable (fndecl, cpp_str_identifier, + decl_type, locus); + + // setup the pattern bindings + tree anon_param = ctx->get_backend ()->var_expression (compiled_param, locus); + CompilePatternBindings::Compile (&pattern, anon_param, ctx); +} + +void +CompileFnParam::visit (HIR::TupleStructPattern &pattern) +{ + // generate the anon param + tree tmp_ident = create_tmp_var_name ("RSTPRM"); + std::string cpp_str_identifier = std::string (IDENTIFIER_POINTER (tmp_ident)); + + decl_type = ctx->get_backend ()->immutable_type (decl_type); + compiled_param + = ctx->get_backend ()->parameter_variable (fndecl, cpp_str_identifier, + decl_type, locus); + + // setup the pattern bindings + tree anon_param = ctx->get_backend ()->var_expression (compiled_param, locus); + CompilePatternBindings::Compile (&pattern, anon_param, ctx); +} + +Bvariable * +CompileSelfParam::compile (Context *ctx, tree fndecl, HIR::SelfParam &self, + tree decl_type, Location locus) +{ + bool is_immutable + = self.get_self_kind () == HIR::SelfParam::ImplicitSelfKind::IMM + || self.get_self_kind () == HIR::SelfParam::ImplicitSelfKind::IMM_REF; + if (is_immutable) + decl_type = ctx->get_backend ()->immutable_type (decl_type); + + return ctx->get_backend ()->parameter_variable (fndecl, "self", decl_type, + locus); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-fnparam.h b/gcc/rust/backend/rust-compile-fnparam.h new file mode 100644 index 00000000000..0dbbd99ef08 --- /dev/null +++ b/gcc/rust/backend/rust-compile-fnparam.h @@ -0,0 +1,70 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_FNPARAM +#define RUST_COMPILE_FNPARAM + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class CompileFnParam : private HIRCompileBase, protected HIR::HIRPatternVisitor +{ +public: + static Bvariable *compile (Context *ctx, tree fndecl, + HIR::FunctionParam *param, tree decl_type, + Location locus); + static Bvariable *compile (Context *ctx, tree fndecl, HIR::Pattern *param, + tree decl_type, Location locus); + + void visit (HIR::IdentifierPattern &pattern) override; + void visit (HIR::WildcardPattern &pattern) override; + void visit (HIR::StructPattern &) override; + void visit (HIR::TupleStructPattern &) override; + + // Empty visit for unused Pattern HIR nodes. + void visit (HIR::GroupedPattern &) override {} + void visit (HIR::LiteralPattern &) override {} + void visit (HIR::PathInExpression &) override {} + void visit (HIR::QualifiedPathInExpression &) override {} + void visit (HIR::RangePattern &) override {} + void visit (HIR::ReferencePattern &) override {} + void visit (HIR::SlicePattern &) override {} + void visit (HIR::TuplePattern &) override {} + +private: + CompileFnParam (Context *ctx, tree fndecl, tree decl_type, Location locus); + + tree fndecl; + tree decl_type; + Location locus; + Bvariable *compiled_param; +}; + +class CompileSelfParam : private HIRCompileBase +{ +public: + static Bvariable *compile (Context *ctx, tree fndecl, HIR::SelfParam &self, + tree decl_type, Location locus); +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_FNPARAM diff --git a/gcc/rust/backend/rust-compile-implitem.cc b/gcc/rust/backend/rust-compile-implitem.cc new file mode 100644 index 00000000000..d0f70a70228 --- /dev/null +++ b/gcc/rust/backend/rust-compile-implitem.cc @@ -0,0 +1,101 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-implitem.h" +#include "rust-compile-expr.h" +#include "rust-compile-fnparam.h" + +namespace Rust { +namespace Compile { + +void +CompileTraitItem::visit (HIR::TraitItemConst &constant) +{ + rust_assert (concrete != nullptr); + TyTy::BaseType *resolved_type = concrete; + + const Resolver::CanonicalPath *canonical_path = nullptr; + bool ok = ctx->get_mappings ()->lookup_canonical_path ( + constant.get_mappings ().get_nodeid (), &canonical_path); + rust_assert (ok); + + HIR::Expr *const_value_expr = constant.get_expr ().get (); + tree const_expr + = compile_constant_item (ctx, resolved_type, canonical_path, + const_value_expr, constant.get_locus ()); + ctx->push_const (const_expr); + ctx->insert_const_decl (constant.get_mappings ().get_hirid (), const_expr); + + reference = const_expr; +} + +void +CompileTraitItem::visit (HIR::TraitItemFunc &func) +{ + rust_assert (func.has_block_defined ()); + + rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *fntype = static_cast (concrete); + fntype->monomorphize (); + + // items can be forward compiled which means we may not need to invoke this + // code. We might also have already compiled this generic function as well. + tree lookup = NULL_TREE; + if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup, + fntype->get_id (), fntype)) + { + // has this been added to the list then it must be finished + if (ctx->function_completed (lookup)) + { + tree dummy = NULL_TREE; + if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy)) + { + ctx->insert_function_decl (fntype, lookup); + } + + reference = address_expression (lookup, ref_locus); + return; + } + } + + if (fntype->has_subsititions_defined ()) + { + // override the Hir Lookups for the substituions in this context + fntype->override_context (); + } + + const Resolver::CanonicalPath *canonical_path = nullptr; + bool ok = ctx->get_mappings ()->lookup_canonical_path ( + func.get_mappings ().get_nodeid (), &canonical_path); + rust_assert (ok); + + // FIXME: How do we get the proper visibility here? + auto vis = HIR::Visibility (HIR::Visibility::VisType::PUBLIC); + HIR::TraitFunctionDecl &function = func.get_decl (); + tree fndecl + = compile_function (ctx, function.get_function_name (), + function.get_self (), function.get_function_params (), + function.get_qualifiers (), vis, + func.get_outer_attrs (), func.get_locus (), + func.get_block_expr ().get (), canonical_path, fntype, + function.has_return_type ()); + reference = address_expression (fndecl, ref_locus); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-implitem.h b/gcc/rust/backend/rust-compile-implitem.h new file mode 100644 index 00000000000..ac9478af150 --- /dev/null +++ b/gcc/rust/backend/rust-compile-implitem.h @@ -0,0 +1,91 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_IMPLITEM_H +#define RUST_COMPILE_IMPLITEM_H + +#include "rust-compile-item.h" +#include "rust-compile-expr.h" +#include "rust-compile-fnparam.h" + +namespace Rust { +namespace Compile { + +// this is a proxy for HIR::ImplItem's back to use the normel HIR::Item path +class CompileInherentImplItem : public CompileItem +{ +public: + static tree Compile (HIR::ImplItem *item, Context *ctx, + TyTy::BaseType *concrete = nullptr, + bool is_query_mode = false, + Location ref_locus = Location ()) + { + CompileInherentImplItem compiler (ctx, concrete, ref_locus); + item->accept_vis (compiler); + + if (is_query_mode && compiler.reference == error_mark_node) + rust_internal_error_at (ref_locus, "failed to compile impl item: %s", + item->as_string ().c_str ()); + + return compiler.reference; + } + +private: + CompileInherentImplItem (Context *ctx, TyTy::BaseType *concrete, + Location ref_locus) + : CompileItem (ctx, concrete, ref_locus) + {} +}; + +class CompileTraitItem : public HIRCompileBase, public HIR::HIRTraitItemVisitor +{ +public: + static tree Compile (HIR::TraitItem *item, Context *ctx, + TyTy::BaseType *concrete, bool is_query_mode = false, + Location ref_locus = Location ()) + { + CompileTraitItem compiler (ctx, concrete, ref_locus); + item->accept_vis (compiler); + + if (is_query_mode && compiler.reference == error_mark_node) + rust_internal_error_at (ref_locus, "failed to compile trait item: %s", + item->as_string ().c_str ()); + + return compiler.reference; + } + + void visit (HIR::TraitItemConst &constant) override; + void visit (HIR::TraitItemFunc &func) override; + + void visit (HIR::TraitItemType &typ) override {} + +private: + CompileTraitItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus) + : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node), + ref_locus (ref_locus) + {} + + TyTy::BaseType *concrete; + tree reference; + Location ref_locus; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_IMPLITEM_H diff --git a/gcc/rust/backend/rust-compile-intrinsic.cc b/gcc/rust/backend/rust-compile-intrinsic.cc new file mode 100644 index 00000000000..61084b90f33 --- /dev/null +++ b/gcc/rust/backend/rust-compile-intrinsic.cc @@ -0,0 +1,515 @@ +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-intrinsic.h" +#include "fold-const.h" +#include "langhooks.h" +#include "rust-compile-context.h" +#include "rust-compile-type.h" +#include "rust-compile-fnparam.h" +#include "rust-builtins.h" +#include "rust-diagnostics.h" +#include "rust-location.h" +#include "rust-tree.h" +#include "tree-core.h" +#include "print-tree.h" + +namespace Rust { +namespace Compile { + +static tree +offset_handler (Context *ctx, TyTy::FnType *fntype); +static tree +sizeof_handler (Context *ctx, TyTy::FnType *fntype); +static tree +transmute_handler (Context *ctx, TyTy::FnType *fntype); +static tree +rotate_handler (Context *ctx, TyTy::FnType *fntype, tree_code op); +static tree +wrapping_op_handler (Context *ctx, TyTy::FnType *fntype, tree_code op); +static tree +copy_nonoverlapping_handler (Context *ctx, TyTy::FnType *fntype); + +static inline tree +rotate_left_handler (Context *ctx, TyTy::FnType *fntype) +{ + return rotate_handler (ctx, fntype, LROTATE_EXPR); +} +static inline tree +rotate_right_handler (Context *ctx, TyTy::FnType *fntype) +{ + return rotate_handler (ctx, fntype, RROTATE_EXPR); +} + +static inline tree +wrapping_add_handler (Context *ctx, TyTy::FnType *fntype) +{ + return wrapping_op_handler (ctx, fntype, PLUS_EXPR); +} +static inline tree +wrapping_sub_handler (Context *ctx, TyTy::FnType *fntype) +{ + return wrapping_op_handler (ctx, fntype, MINUS_EXPR); +} +static inline tree +wrapping_mul_handler (Context *ctx, TyTy::FnType *fntype) +{ + return wrapping_op_handler (ctx, fntype, MULT_EXPR); +} + +static const std::map> + generic_intrinsics = {{"offset", &offset_handler}, + {"size_of", &sizeof_handler}, + {"transmute", &transmute_handler}, + {"rotate_left", &rotate_left_handler}, + {"rotate_right", &rotate_right_handler}, + {"wrapping_add", &wrapping_add_handler}, + {"wrapping_sub", &wrapping_sub_handler}, + {"wrapping_mul", &wrapping_mul_handler}, + {"copy_nonoverlapping", ©_nonoverlapping_handler}}; + +Intrinsics::Intrinsics (Context *ctx) : ctx (ctx) {} + +tree +Intrinsics::compile (TyTy::FnType *fntype) +{ + rust_assert (fntype->get_abi () == ABI::INTRINSIC); + + tree builtin = error_mark_node; + BuiltinsContext &builtin_ctx = BuiltinsContext::get (); + if (builtin_ctx.lookup_simple_builtin (fntype->get_identifier (), &builtin)) + return builtin; + + // is it an generic builtin? + auto it = generic_intrinsics.find (fntype->get_identifier ()); + if (it != generic_intrinsics.end ()) + return it->second (ctx, fntype); + + Location locus = ctx->get_mappings ()->lookup_location (fntype->get_ref ()); + rust_error_at (locus, "unknown builtin intrinsic: %s", + fntype->get_identifier ().c_str ()); + + return error_mark_node; +} + +/** + * Items can be forward compiled which means we may not need to invoke this + * code. We might also have already compiled this generic function as well. + */ +static bool +check_for_cached_intrinsic (Context *ctx, TyTy::FnType *fntype, tree *lookup) +{ + if (ctx->lookup_function_decl (fntype->get_ty_ref (), lookup, + fntype->get_id (), fntype)) + { + // Has this been added to the list? Then it must be finished + if (ctx->function_completed (*lookup)) + { + tree dummy = NULL_TREE; + if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy)) + ctx->insert_function_decl (fntype, *lookup); + return true; + } + } + + return false; +} + +/** + * Maybe override the Hir Lookups for the substituions in this context + */ +static void +maybe_override_ctx (TyTy::FnType *fntype) +{ + if (fntype->has_subsititions_defined ()) + fntype->override_context (); +} + +/** + * Compile and setup a function's parameters + */ +static void +compile_fn_params (Context *ctx, TyTy::FnType *fntype, tree fndecl, + std::vector *compiled_param_variables, + std::vector *compiled_param_types = nullptr) +{ + for (auto &parm : fntype->get_params ()) + { + auto &referenced_param = parm.first; + auto ¶m_tyty = parm.second; + auto compiled_param_type = TyTyResolveCompile::compile (ctx, param_tyty); + + Location param_locus = referenced_param->get_locus (); + Bvariable *compiled_param_var + = CompileFnParam::compile (ctx, fndecl, referenced_param, + compiled_param_type, param_locus); + + compiled_param_variables->push_back (compiled_param_var); + if (compiled_param_types) + compiled_param_types->push_back (compiled_param_type); + } +} + +static tree +compile_intrinsic_function (Context *ctx, TyTy::FnType *fntype) +{ + maybe_override_ctx (fntype); + + const Resolver::CanonicalPath &canonical_path = fntype->get_ident ().path; + + tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype); + std::string ir_symbol_name + = canonical_path.get () + fntype->subst_as_string (); + std::string asm_name = ctx->mangle_item (fntype, canonical_path); + + unsigned int flags = 0; + tree fndecl + = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name, asm_name, + flags, fntype->get_ident ().locus); + + TREE_PUBLIC (fndecl) = 0; + TREE_READONLY (fndecl) = 1; + DECL_ARTIFICIAL (fndecl) = 1; + DECL_EXTERNAL (fndecl) = 0; + DECL_DECLARED_INLINE_P (fndecl) = 1; + + return fndecl; +} + +static void +enter_intrinsic_block (Context *ctx, tree fndecl) +{ + tree enclosing_scope = NULL_TREE; + Location start_location = Location (); + Location end_location = Location (); + + auto block = ctx->get_backend ()->block (fndecl, enclosing_scope, {}, + start_location, end_location); + + ctx->push_block (block); +} + +static void +finalize_intrinsic_block (Context *ctx, tree fndecl) +{ + tree bind_tree = ctx->pop_block (); + + gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR); + + DECL_SAVED_TREE (fndecl) = bind_tree; + + ctx->push_function (fndecl); +} + +static tree +offset_handler (Context *ctx, TyTy::FnType *fntype) +{ + // offset intrinsic has two params dst pointer and offset isize + rust_assert (fntype->get_params ().size () == 2); + + auto fndecl = compile_intrinsic_function (ctx, fntype); + + std::vector param_vars; + compile_fn_params (ctx, fntype, fndecl, ¶m_vars); + + auto &dst_param = param_vars.at (0); + auto &size_param = param_vars.at (1); + rust_assert (param_vars.size () == 2); + if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars)) + return error_mark_node; + + enter_intrinsic_block (ctx, fndecl); + + // BUILTIN offset FN BODY BEGIN + tree dst = ctx->get_backend ()->var_expression (dst_param, Location ()); + tree size = ctx->get_backend ()->var_expression (size_param, Location ()); + tree pointer_offset_expr + = pointer_offset_expression (dst, size, BUILTINS_LOCATION); + auto return_statement + = ctx->get_backend ()->return_statement (fndecl, {pointer_offset_expr}, + Location ()); + ctx->add_statement (return_statement); + // BUILTIN offset FN BODY END + + finalize_intrinsic_block (ctx, fndecl); + + return fndecl; +} + +static tree +sizeof_handler (Context *ctx, TyTy::FnType *fntype) +{ + // size_of has _zero_ parameters its parameter is the generic one + rust_assert (fntype->get_params ().size () == 0); + + tree lookup = NULL_TREE; + if (check_for_cached_intrinsic (ctx, fntype, &lookup)) + return lookup; + + auto fndecl = compile_intrinsic_function (ctx, fntype); + + // get the template parameter type tree fn size_of(); + rust_assert (fntype->get_num_substitutions () == 1); + auto ¶m_mapping = fntype->get_substs ().at (0); + const TyTy::ParamType *param_tyty = param_mapping.get_param_ty (); + TyTy::BaseType *resolved_tyty = param_tyty->resolve (); + tree template_parameter_type + = TyTyResolveCompile::compile (ctx, resolved_tyty); + + enter_intrinsic_block (ctx, fndecl); + + // BUILTIN size_of FN BODY BEGIN + tree size_expr = TYPE_SIZE_UNIT (template_parameter_type); + auto return_statement + = ctx->get_backend ()->return_statement (fndecl, {size_expr}, Location ()); + ctx->add_statement (return_statement); + // BUILTIN size_of FN BODY END + + finalize_intrinsic_block (ctx, fndecl); + + return fndecl; +} + +static tree +transmute_handler (Context *ctx, TyTy::FnType *fntype) +{ + // transmute intrinsic has one parameter + rust_assert (fntype->get_params ().size () == 1); + + tree lookup = NULL_TREE; + if (check_for_cached_intrinsic (ctx, fntype, &lookup)) + return lookup; + + auto fndecl = compile_intrinsic_function (ctx, fntype); + + std::vector param_vars; + std::vector compiled_types; + compile_fn_params (ctx, fntype, fndecl, ¶m_vars, &compiled_types); + + if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars)) + return error_mark_node; + + // param to convert + Bvariable *convert_me_param = param_vars.at (0); + tree convert_me_expr + = ctx->get_backend ()->var_expression (convert_me_param, Location ()); + + // check for transmute pre-conditions + tree target_type_expr = TREE_TYPE (DECL_RESULT (fndecl)); + tree source_type_expr = compiled_types.at (0); + tree target_size_expr = TYPE_SIZE (target_type_expr); + tree source_size_expr = TYPE_SIZE (source_type_expr); + // for some reason, unit types and other zero-sized types return NULL for the + // size expressions + unsigned HOST_WIDE_INT target_size + = target_size_expr ? TREE_INT_CST_LOW (target_size_expr) : 0; + unsigned HOST_WIDE_INT source_size + = source_size_expr ? TREE_INT_CST_LOW (source_size_expr) : 0; + + // size check for concrete types + // TODO(liushuyu): check alignment for pointers; check for dependently-sized + // types + if (target_size != source_size) + { + rust_error_at (fntype->get_locus (), + "cannot transmute between types of different sizes, or " + "dependently-sized types"); + rust_inform (fntype->get_ident ().locus, "source type: %qs (%lu bits)", + fntype->get_params ().at (0).second->as_string ().c_str (), + (unsigned long) source_size); + rust_inform (fntype->get_ident ().locus, "target type: %qs (%lu bits)", + fntype->get_return_type ()->as_string ().c_str (), + (unsigned long) target_size); + } + + enter_intrinsic_block (ctx, fndecl); + + // BUILTIN transmute FN BODY BEGIN + + // Return *((orig_type*)&decl) */ + + tree t + = build_fold_addr_expr_loc (Location ().gcc_location (), convert_me_expr); + t = fold_build1_loc (Location ().gcc_location (), NOP_EXPR, + build_pointer_type (target_type_expr), t); + tree result_expr + = build_fold_indirect_ref_loc (Location ().gcc_location (), t); + + auto return_statement + = ctx->get_backend ()->return_statement (fndecl, {result_expr}, + Location ()); + ctx->add_statement (return_statement); + // BUILTIN transmute FN BODY END + + finalize_intrinsic_block (ctx, fndecl); + + return fndecl; +} + +static tree +rotate_handler (Context *ctx, TyTy::FnType *fntype, tree_code op) +{ + // rotate intrinsic has two parameter + rust_assert (fntype->get_params ().size () == 2); + + tree lookup = NULL_TREE; + if (check_for_cached_intrinsic (ctx, fntype, &lookup)) + return lookup; + + auto fndecl = compile_intrinsic_function (ctx, fntype); + + // setup the params + std::vector param_vars; + compile_fn_params (ctx, fntype, fndecl, ¶m_vars); + + auto &x_param = param_vars.at (0); + auto &y_param = param_vars.at (1); + rust_assert (param_vars.size () == 2); + if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars)) + return error_mark_node; + + enter_intrinsic_block (ctx, fndecl); + + // BUILTIN rotate FN BODY BEGIN + tree x = ctx->get_backend ()->var_expression (x_param, Location ()); + tree y = ctx->get_backend ()->var_expression (y_param, Location ()); + tree rotate_expr + = fold_build2_loc (BUILTINS_LOCATION, op, TREE_TYPE (x), x, y); + auto return_statement + = ctx->get_backend ()->return_statement (fndecl, {rotate_expr}, + Location ()); + ctx->add_statement (return_statement); + // BUILTIN rotate FN BODY END + + finalize_intrinsic_block (ctx, fndecl); + + return fndecl; +} + +/** + * pub fn wrapping_{add, sub, mul}(lhs: T, rhs: T) -> T; + */ +static tree +wrapping_op_handler (Context *ctx, TyTy::FnType *fntype, tree_code op) +{ + // wrapping_ intrinsics have two parameter + rust_assert (fntype->get_params ().size () == 2); + + tree lookup = NULL_TREE; + if (check_for_cached_intrinsic (ctx, fntype, &lookup)) + return lookup; + + auto fndecl = compile_intrinsic_function (ctx, fntype); + + // setup the params + std::vector param_vars; + compile_fn_params (ctx, fntype, fndecl, ¶m_vars); + + auto &lhs_param = param_vars.at (0); + auto &rhs_param = param_vars.at (1); + + if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars)) + return error_mark_node; + + enter_intrinsic_block (ctx, fndecl); + + // BUILTIN wrapping_ FN BODY BEGIN + auto lhs = ctx->get_backend ()->var_expression (lhs_param, Location ()); + auto rhs = ctx->get_backend ()->var_expression (rhs_param, Location ()); + + // Operations are always wrapping in Rust, as we have -fwrapv enabled by + // default. The difference between a wrapping_{add, sub, mul} and a regular + // arithmetic operation is that these intrinsics do not panic - they always + // carry over. + auto wrap_expr = build2 (op, TREE_TYPE (lhs), lhs, rhs); + + auto return_statement + = ctx->get_backend ()->return_statement (fndecl, {wrap_expr}, Location ()); + ctx->add_statement (return_statement); + // BUILTIN wrapping_ FN BODY END + + finalize_intrinsic_block (ctx, fndecl); + + return fndecl; +} + +/** + * fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + */ +static tree +copy_nonoverlapping_handler (Context *ctx, TyTy::FnType *fntype) +{ + rust_assert (fntype->get_params ().size () == 3); + rust_assert (fntype->get_num_substitutions () == 1); + + tree lookup = NULL_TREE; + if (check_for_cached_intrinsic (ctx, fntype, &lookup)) + return lookup; + + auto fndecl = compile_intrinsic_function (ctx, fntype); + + // Most intrinsic functions are pure - not `copy_nonoverlapping` + TREE_READONLY (fndecl) = 0; + TREE_SIDE_EFFECTS (fndecl) = 1; + + // setup the params + std::vector param_vars; + compile_fn_params (ctx, fntype, fndecl, ¶m_vars); + + if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars)) + return error_mark_node; + + enter_intrinsic_block (ctx, fndecl); + + // BUILTIN copy_nonoverlapping BODY BEGIN + + auto src = ctx->get_backend ()->var_expression (param_vars[0], Location ()); + auto dst = ctx->get_backend ()->var_expression (param_vars[1], Location ()); + auto count = ctx->get_backend ()->var_expression (param_vars[2], Location ()); + + // We want to create the following statement + // memcpy(dst, src, size_of::()); + // so + // memcpy(dst, src, size_expr); + + auto *resolved_ty = fntype->get_substs ().at (0).get_param_ty ()->resolve (); + auto param_type = TyTyResolveCompile::compile (ctx, resolved_ty); + + tree size_expr + = build2 (MULT_EXPR, size_type_node, TYPE_SIZE_UNIT (param_type), count); + + tree memcpy_raw = nullptr; + BuiltinsContext::get ().lookup_simple_builtin ("memcpy", &memcpy_raw); + rust_assert (memcpy_raw); + auto memcpy + = build_fold_addr_expr_loc (Location ().gcc_location (), memcpy_raw); + + auto copy_call + = ctx->get_backend ()->call_expression (memcpy, {dst, src, size_expr}, + nullptr, Location ()); + + ctx->add_statement (copy_call); + + // BUILTIN copy_nonoverlapping BODY END + + finalize_intrinsic_block (ctx, fndecl); + + return fndecl; +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-intrinsic.h b/gcc/rust/backend/rust-compile-intrinsic.h new file mode 100644 index 00000000000..dceb0864fd4 --- /dev/null +++ b/gcc/rust/backend/rust-compile-intrinsic.h @@ -0,0 +1,40 @@ +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_INTRINSIC +#define RUST_COMPILE_INTRINSIC + +#include "rust-compile-context.h" +#include "langhooks.h" + +namespace Rust { +namespace Compile { + +class Intrinsics +{ +public: + Intrinsics (Context *ctx); + + tree compile (TyTy::FnType *fntype); + +private: + Context *ctx; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_INTRINSIC diff --git a/gcc/rust/backend/rust-compile-item.cc b/gcc/rust/backend/rust-compile-item.cc new file mode 100644 index 00000000000..ceba51c2d27 --- /dev/null +++ b/gcc/rust/backend/rust-compile-item.cc @@ -0,0 +1,206 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-item.h" +#include "rust-compile-implitem.h" +#include "rust-compile-expr.h" +#include "rust-compile-extern.h" +#include "rust-constexpr.h" + +namespace Rust { +namespace Compile { + +void +CompileItem::visit (HIR::StaticItem &var) +{ + // have we already compiled this? + Bvariable *static_decl_ref = nullptr; + if (ctx->lookup_var_decl (var.get_mappings ().get_hirid (), &static_decl_ref)) + { + reference + = ctx->get_backend ()->var_expression (static_decl_ref, ref_locus); + return; + } + + TyTy::BaseType *resolved_type = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type (var.get_mappings ().get_hirid (), + &resolved_type); + rust_assert (ok); + + tree type = TyTyResolveCompile::compile (ctx, resolved_type); + tree value = CompileExpr::Compile (var.get_expr (), ctx); + + const Resolver::CanonicalPath *canonical_path = nullptr; + ok = ctx->get_mappings ()->lookup_canonical_path ( + var.get_mappings ().get_nodeid (), &canonical_path); + rust_assert (ok); + + std::string name = canonical_path->get (); + std::string asm_name = ctx->mangle_item (resolved_type, *canonical_path); + + bool is_external = false; + bool is_hidden = false; + bool in_unique_section = true; + + Bvariable *static_global + = ctx->get_backend ()->global_variable (name, asm_name, type, is_external, + is_hidden, in_unique_section, + var.get_locus ()); + ctx->get_backend ()->global_variable_set_init (static_global, value); + + ctx->insert_var_decl (var.get_mappings ().get_hirid (), static_global); + ctx->push_var (static_global); + + reference = ctx->get_backend ()->var_expression (static_global, ref_locus); +} + +void +CompileItem::visit (HIR::ConstantItem &constant) +{ + if (ctx->lookup_const_decl (constant.get_mappings ().get_hirid (), + &reference)) + return; + + // resolve the type + TyTy::BaseType *resolved_type = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (constant.get_mappings ().get_hirid (), + &resolved_type); + rust_assert (ok); + + // canonical path + const Resolver::CanonicalPath *canonical_path = nullptr; + ok = ctx->get_mappings ()->lookup_canonical_path ( + constant.get_mappings ().get_nodeid (), &canonical_path); + rust_assert (ok); + + HIR::Expr *const_value_expr = constant.get_expr (); + ctx->push_const_context (); + tree const_expr + = compile_constant_item (ctx, resolved_type, canonical_path, + const_value_expr, constant.get_locus ()); + ctx->pop_const_context (); + + ctx->push_const (const_expr); + ctx->insert_const_decl (constant.get_mappings ().get_hirid (), const_expr); + reference = const_expr; +} + +void +CompileItem::visit (HIR::Function &function) +{ + TyTy::BaseType *fntype_tyty; + if (!ctx->get_tyctx ()->lookup_type (function.get_mappings ().get_hirid (), + &fntype_tyty)) + { + rust_fatal_error (function.get_locus (), + "failed to lookup function type"); + return; + } + + rust_assert (fntype_tyty->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *fntype = static_cast (fntype_tyty); + if (fntype->has_subsititions_defined ()) + { + // we cant do anything for this only when it is used and a concrete type + // is given + if (concrete == nullptr) + return; + else + { + rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF); + fntype = static_cast (concrete); + fntype->monomorphize (); + } + } + + // items can be forward compiled which means we may not need to invoke this + // code. We might also have already compiled this generic function as well. + tree lookup = NULL_TREE; + if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup, + fntype->get_id (), fntype)) + { + // has this been added to the list then it must be finished + if (ctx->function_completed (lookup)) + { + tree dummy = NULL_TREE; + if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy)) + { + ctx->insert_function_decl (fntype, lookup); + } + + reference = address_expression (lookup, ref_locus); + return; + } + } + + if (fntype->has_subsititions_defined ()) + { + // override the Hir Lookups for the substituions in this context + fntype->override_context (); + } + + const Resolver::CanonicalPath *canonical_path = nullptr; + bool ok = ctx->get_mappings ()->lookup_canonical_path ( + function.get_mappings ().get_nodeid (), &canonical_path); + rust_assert (ok); + + tree fndecl + = compile_function (ctx, function.get_function_name (), + function.get_self_param (), + function.get_function_params (), + function.get_qualifiers (), function.get_visibility (), + function.get_outer_attrs (), function.get_locus (), + function.get_definition ().get (), canonical_path, + fntype, function.has_function_return_type ()); + reference = address_expression (fndecl, ref_locus); +} + +void +CompileItem::visit (HIR::ImplBlock &impl_block) +{ + TyTy::BaseType *self_lookup = nullptr; + if (!ctx->get_tyctx ()->lookup_type ( + impl_block.get_type ()->get_mappings ().get_hirid (), &self_lookup)) + { + rust_error_at (impl_block.get_locus (), "failed to resolve type of impl"); + return; + } + + for (auto &impl_item : impl_block.get_impl_items ()) + CompileInherentImplItem::Compile (impl_item.get (), ctx); +} + +void +CompileItem::visit (HIR::ExternBlock &extern_block) +{ + for (auto &item : extern_block.get_extern_items ()) + { + CompileExternItem::compile (item.get (), ctx, concrete); + } +} + +void +CompileItem::visit (HIR::Module &module) +{ + for (auto &item : module.get_items ()) + CompileItem::compile (item.get (), ctx); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-item.h b/gcc/rust/backend/rust-compile-item.h new file mode 100644 index 00000000000..3c12f1040fc --- /dev/null +++ b/gcc/rust/backend/rust-compile-item.h @@ -0,0 +1,88 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_ITEM +#define RUST_COMPILE_ITEM + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class CompileItem : private HIRCompileBase, protected HIR::HIRStmtVisitor +{ +protected: +public: + static tree compile (HIR::Item *item, Context *ctx, + TyTy::BaseType *concrete = nullptr, + bool is_query_mode = false, + Location ref_locus = Location ()) + { + CompileItem compiler (ctx, concrete, ref_locus); + item->accept_vis (compiler); + + if (is_query_mode && compiler.reference == error_mark_node) + rust_internal_error_at (ref_locus, "failed to compile item: %s", + item->as_string ().c_str ()); + + return compiler.reference; + } + + void visit (HIR::StaticItem &var) override; + void visit (HIR::ConstantItem &constant) override; + void visit (HIR::Function &function) override; + void visit (HIR::ImplBlock &impl_block) override; + void visit (HIR::ExternBlock &extern_block) override; + void visit (HIR::Module &module) override; + + // Empty visit for unused Stmt HIR nodes. + void visit (HIR::TupleStruct &) override {} + void visit (HIR::EnumItem &) override {} + void visit (HIR::EnumItemTuple &) override {} + void visit (HIR::EnumItemStruct &) override {} + void visit (HIR::EnumItemDiscriminant &) override {} + void visit (HIR::TypePathSegmentFunction &) override {} + void visit (HIR::TypePath &) override {} + void visit (HIR::QualifiedPathInType &) override {} + void visit (HIR::ExternCrate &) override {} + void visit (HIR::UseDeclaration &) override {} + void visit (HIR::TypeAlias &) override {} + void visit (HIR::StructStruct &) override {} + void visit (HIR::Enum &) override {} + void visit (HIR::Union &) override {} + void visit (HIR::Trait &) override {} + void visit (HIR::EmptyStmt &) override {} + void visit (HIR::LetStmt &) override {} + void visit (HIR::ExprStmtWithoutBlock &) override {} + void visit (HIR::ExprStmtWithBlock &) override {} + +protected: + CompileItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus) + : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node), + ref_locus (ref_locus) + {} + + TyTy::BaseType *concrete; + tree reference; + Location ref_locus; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_ITEM diff --git a/gcc/rust/backend/rust-compile-pattern.cc b/gcc/rust/backend/rust-compile-pattern.cc new file mode 100644 index 00000000000..1d8eda1a577 --- /dev/null +++ b/gcc/rust/backend/rust-compile-pattern.cc @@ -0,0 +1,333 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-pattern.h" +#include "rust-compile-expr.h" +#include "rust-compile-resolve-path.h" +#include "rust-constexpr.h" + +namespace Rust { +namespace Compile { + +void +CompilePatternCaseLabelExpr::visit (HIR::PathInExpression &pattern) +{ + // lookup the type + TyTy::BaseType *lookup = nullptr; + bool ok + = ctx->get_tyctx ()->lookup_type (pattern.get_mappings ().get_hirid (), + &lookup); + rust_assert (ok); + + // this must be an enum + rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT); + TyTy::ADTType *adt = static_cast (lookup); + rust_assert (adt->is_enum ()); + + // lookup the variant + HirId variant_id; + ok = ctx->get_tyctx ()->lookup_variant_definition ( + pattern.get_mappings ().get_hirid (), &variant_id); + rust_assert (ok); + + TyTy::VariantDef *variant = nullptr; + ok = adt->lookup_variant_by_id (variant_id, &variant); + rust_assert (ok); + + HIR::Expr *discrim_expr = variant->get_discriminant (); + tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx); + tree folded_discrim_expr = fold_expr (discrim_expr_node); + tree case_low = folded_discrim_expr; + + case_label_expr + = build_case_label (case_low, NULL_TREE, associated_case_label); +} + +void +CompilePatternCaseLabelExpr::visit (HIR::StructPattern &pattern) +{ + CompilePatternCaseLabelExpr::visit (pattern.get_path ()); +} + +void +CompilePatternCaseLabelExpr::visit (HIR::TupleStructPattern &pattern) +{ + CompilePatternCaseLabelExpr::visit (pattern.get_path ()); +} + +void +CompilePatternCaseLabelExpr::visit (HIR::WildcardPattern &pattern) +{ + // operand 0 being NULL_TREE signifies this is the default case label see: + // tree.def for documentation for CASE_LABEL_EXPR + case_label_expr + = build_case_label (NULL_TREE, NULL_TREE, associated_case_label); +} + +void +CompilePatternCaseLabelExpr::visit (HIR::LiteralPattern &pattern) +{ + // Compile the literal + HIR::LiteralExpr *litexpr + = new HIR::LiteralExpr (pattern.get_pattern_mappings (), + pattern.get_literal (), pattern.get_locus (), + std::vector ()); + + // Note: Floating point literals are currently accepted but will likely be + // forbidden in LiteralPatterns in a future version of Rust. + // See: https://github.com/rust-lang/rust/issues/41620 + // For now, we cannot compile them anyway as CASE_LABEL_EXPR does not support + // floating point types. + if (pattern.get_literal ().get_lit_type () == HIR::Literal::LitType::FLOAT) + { + rust_sorry_at (pattern.get_locus (), "floating-point literal in pattern"); + } + + tree lit = CompileExpr::Compile (litexpr, ctx); + + case_label_expr = build_case_label (lit, NULL_TREE, associated_case_label); +} + +static tree +compile_range_pattern_bound (HIR::RangePatternBound *bound, + Analysis::NodeMapping mappings, Location locus, + Context *ctx) +{ + tree result = NULL_TREE; + switch (bound->get_bound_type ()) + { + case HIR::RangePatternBound::RangePatternBoundType::LITERAL: { + HIR::RangePatternBoundLiteral &ref + = *static_cast (bound); + + HIR::LiteralExpr *litexpr + = new HIR::LiteralExpr (mappings, ref.get_literal (), locus, + std::vector ()); + + result = CompileExpr::Compile (litexpr, ctx); + } + break; + + case HIR::RangePatternBound::RangePatternBoundType::PATH: { + HIR::RangePatternBoundPath &ref + = *static_cast (bound); + + result = ResolvePathRef::Compile (ref.get_path (), ctx); + + // If the path resolves to a const expression, fold it. + result = fold_expr (result); + } + break; + + case HIR::RangePatternBound::RangePatternBoundType::QUALPATH: { + HIR::RangePatternBoundQualPath &ref + = *static_cast (bound); + + result = ResolvePathRef::Compile (ref.get_qualified_path (), ctx); + + // If the path resolves to a const expression, fold it. + result = fold_expr (result); + } + } + + return result; +} + +void +CompilePatternCaseLabelExpr::visit (HIR::RangePattern &pattern) +{ + tree upper = compile_range_pattern_bound (pattern.get_upper_bound ().get (), + pattern.get_pattern_mappings (), + pattern.get_locus (), ctx); + tree lower = compile_range_pattern_bound (pattern.get_lower_bound ().get (), + pattern.get_pattern_mappings (), + pattern.get_locus (), ctx); + + case_label_expr = build_case_label (lower, upper, associated_case_label); +} + +// setup the bindings + +void +CompilePatternBindings::visit (HIR::TupleStructPattern &pattern) +{ + // lookup the type + TyTy::BaseType *lookup = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + pattern.get_path ().get_mappings ().get_hirid (), &lookup); + rust_assert (ok); + + // this must be an enum + rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT); + TyTy::ADTType *adt = static_cast (lookup); + rust_assert (adt->number_of_variants () > 0); + + int variant_index = 0; + TyTy::VariantDef *variant = adt->get_variants ().at (0); + if (adt->is_enum ()) + { + HirId variant_id = UNKNOWN_HIRID; + bool ok = ctx->get_tyctx ()->lookup_variant_definition ( + pattern.get_path ().get_mappings ().get_hirid (), &variant_id); + rust_assert (ok); + + ok = adt->lookup_variant_by_id (variant_id, &variant, &variant_index); + rust_assert (ok); + } + + rust_assert (variant->get_variant_type () + == TyTy::VariantDef::VariantType::TUPLE); + + std::unique_ptr &items = pattern.get_items (); + switch (items->get_item_type ()) + { + case HIR::TupleStructItems::RANGE: { + // TODO + gcc_unreachable (); + } + break; + + case HIR::TupleStructItems::NO_RANGE: { + HIR::TupleStructItemsNoRange &items_no_range + = static_cast (*items.get ()); + + rust_assert (items_no_range.get_patterns ().size () + == variant->num_fields ()); + + if (adt->is_enum ()) + { + // we are offsetting by + 1 here since the first field in the record + // is always the discriminator + size_t tuple_field_index = 1; + for (auto &pattern : items_no_range.get_patterns ()) + { + tree variant_accessor + = ctx->get_backend ()->struct_field_expression ( + match_scrutinee_expr, variant_index, pattern->get_locus ()); + + tree binding = ctx->get_backend ()->struct_field_expression ( + variant_accessor, tuple_field_index++, pattern->get_locus ()); + + ctx->insert_pattern_binding ( + pattern->get_pattern_mappings ().get_hirid (), binding); + } + } + else + { + size_t tuple_field_index = 0; + for (auto &pattern : items_no_range.get_patterns ()) + { + tree variant_accessor = match_scrutinee_expr; + + tree binding = ctx->get_backend ()->struct_field_expression ( + variant_accessor, tuple_field_index++, pattern->get_locus ()); + + ctx->insert_pattern_binding ( + pattern->get_pattern_mappings ().get_hirid (), binding); + } + } + } + break; + } +} + +void +CompilePatternBindings::visit (HIR::StructPattern &pattern) +{ + // lookup the type + TyTy::BaseType *lookup = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + pattern.get_path ().get_mappings ().get_hirid (), &lookup); + rust_assert (ok); + + // this must be an enum + rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT); + TyTy::ADTType *adt = static_cast (lookup); + rust_assert (adt->number_of_variants () > 0); + + int variant_index = 0; + TyTy::VariantDef *variant = adt->get_variants ().at (0); + if (adt->is_enum ()) + { + HirId variant_id = UNKNOWN_HIRID; + bool ok = ctx->get_tyctx ()->lookup_variant_definition ( + pattern.get_path ().get_mappings ().get_hirid (), &variant_id); + rust_assert (ok); + + ok = adt->lookup_variant_by_id (variant_id, &variant, &variant_index); + rust_assert (ok); + } + + rust_assert (variant->get_variant_type () + == TyTy::VariantDef::VariantType::STRUCT); + + auto &struct_pattern_elems = pattern.get_struct_pattern_elems (); + for (auto &field : struct_pattern_elems.get_struct_pattern_fields ()) + { + switch (field->get_item_type ()) + { + case HIR::StructPatternField::ItemType::TUPLE_PAT: { + // TODO + gcc_unreachable (); + } + break; + + case HIR::StructPatternField::ItemType::IDENT_PAT: { + // TODO + gcc_unreachable (); + } + break; + + case HIR::StructPatternField::ItemType::IDENT: { + HIR::StructPatternFieldIdent &ident + = static_cast (*field.get ()); + + size_t offs = 0; + ok + = variant->lookup_field (ident.get_identifier (), nullptr, &offs); + rust_assert (ok); + + tree binding = error_mark_node; + if (adt->is_enum ()) + { + tree variant_accessor + = ctx->get_backend ()->struct_field_expression ( + match_scrutinee_expr, variant_index, ident.get_locus ()); + + // we are offsetting by + 1 here since the first field in the + // record is always the discriminator + binding = ctx->get_backend ()->struct_field_expression ( + variant_accessor, offs + 1, ident.get_locus ()); + } + else + { + tree variant_accessor = match_scrutinee_expr; + binding = ctx->get_backend ()->struct_field_expression ( + variant_accessor, offs, ident.get_locus ()); + } + + ctx->insert_pattern_binding (ident.get_mappings ().get_hirid (), + binding); + } + break; + } + } +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-pattern.h b/gcc/rust/backend/rust-compile-pattern.h new file mode 100644 index 00000000000..0eb5d61249b --- /dev/null +++ b/gcc/rust/backend/rust-compile-pattern.h @@ -0,0 +1,95 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class CompilePatternCaseLabelExpr : public HIRCompileBase, + public HIR::HIRPatternVisitor +{ +public: + static tree Compile (HIR::Pattern *pattern, tree associated_case_label, + Context *ctx) + { + CompilePatternCaseLabelExpr compiler (ctx, associated_case_label); + pattern->accept_vis (compiler); + return compiler.case_label_expr; + } + + void visit (HIR::PathInExpression &pattern) override; + void visit (HIR::StructPattern &pattern) override; + void visit (HIR::TupleStructPattern &pattern) override; + void visit (HIR::WildcardPattern &pattern) override; + void visit (HIR::RangePattern &pattern) override; + + // Empty visit for unused Pattern HIR nodes. + void visit (HIR::GroupedPattern &) override {} + void visit (HIR::IdentifierPattern &) override {} + void visit (HIR::LiteralPattern &) override; + void visit (HIR::QualifiedPathInExpression &) override {} + void visit (HIR::ReferencePattern &) override {} + void visit (HIR::SlicePattern &) override {} + void visit (HIR::TuplePattern &) override {} + + CompilePatternCaseLabelExpr (Context *ctx, tree associated_case_label) + : HIRCompileBase (ctx), case_label_expr (error_mark_node), + associated_case_label (associated_case_label) + {} + + tree case_label_expr; + tree associated_case_label; +}; + +class CompilePatternBindings : public HIRCompileBase, + public HIR::HIRPatternVisitor +{ +public: + static void Compile (HIR::Pattern *pattern, tree match_scrutinee_expr, + Context *ctx) + { + CompilePatternBindings compiler (ctx, match_scrutinee_expr); + pattern->accept_vis (compiler); + } + + void visit (HIR::StructPattern &pattern) override; + void visit (HIR::TupleStructPattern &pattern) override; + + // Empty visit for unused Pattern HIR nodes. + void visit (HIR::GroupedPattern &) override {} + void visit (HIR::IdentifierPattern &) override {} + void visit (HIR::LiteralPattern &) override {} + void visit (HIR::PathInExpression &) override {} + void visit (HIR::QualifiedPathInExpression &) override {} + void visit (HIR::RangePattern &) override {} + void visit (HIR::ReferencePattern &) override {} + void visit (HIR::SlicePattern &) override {} + void visit (HIR::TuplePattern &) override {} + void visit (HIR::WildcardPattern &) override {} + +protected: + CompilePatternBindings (Context *ctx, tree match_scrutinee_expr) + : HIRCompileBase (ctx), match_scrutinee_expr (match_scrutinee_expr) + {} + + tree match_scrutinee_expr; +}; + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-resolve-path.cc b/gcc/rust/backend/rust-compile-resolve-path.cc new file mode 100644 index 00000000000..4fb3d540257 --- /dev/null +++ b/gcc/rust/backend/rust-compile-resolve-path.cc @@ -0,0 +1,301 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-resolve-path.h" +#include "rust-compile-intrinsic.h" +#include "rust-compile-item.h" +#include "rust-compile-implitem.h" +#include "rust-compile-expr.h" +#include "rust-hir-trait-resolve.h" +#include "rust-hir-path-probe.h" +#include "rust-compile-extern.h" +#include "rust-constexpr.h" + +namespace Rust { +namespace Compile { + +void +ResolvePathRef::visit (HIR::QualifiedPathInExpression &expr) +{ + resolved = resolve (expr.get_final_segment ().get_segment (), + expr.get_mappings (), expr.get_locus (), true); +} + +void +ResolvePathRef::visit (HIR::PathInExpression &expr) +{ + resolved = resolve (expr.get_final_segment ().get_segment (), + expr.get_mappings (), expr.get_locus (), false); +} + +tree +ResolvePathRef::resolve (const HIR::PathIdentSegment &final_segment, + const Analysis::NodeMapping &mappings, + Location expr_locus, bool is_qualified_path) +{ + TyTy::BaseType *lookup = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type (mappings.get_hirid (), &lookup); + rust_assert (ok); + + // need to look up the reference for this identifier + NodeId ref_node_id = UNKNOWN_NODEID; + if (!ctx->get_resolver ()->lookup_resolved_name (mappings.get_nodeid (), + &ref_node_id)) + { + // this can fail because it might be a Constructor for something + // in that case the caller should attempt ResolvePathType::Compile + + // it might be an enum data-less enum variant + if (lookup->get_kind () != TyTy::TypeKind::ADT) + return error_mark_node; + + TyTy::ADTType *adt = static_cast (lookup); + + // it might be a unit-struct + if (adt->is_unit ()) + { + return ctx->get_backend ()->unit_expression (); + } + + if (!adt->is_enum ()) + return error_mark_node; + + HirId variant_id; + if (!ctx->get_tyctx ()->lookup_variant_definition (mappings.get_hirid (), + &variant_id)) + return error_mark_node; + + int union_disriminator = -1; + TyTy::VariantDef *variant = nullptr; + if (!adt->lookup_variant_by_id (variant_id, &variant, + &union_disriminator)) + return error_mark_node; + + // this can only be for discriminant variants the others are built up + // using call-expr or struct-init + rust_assert (variant->get_variant_type () + == TyTy::VariantDef::VariantType::NUM); + + // we need the actual gcc type + tree compiled_adt_type = TyTyResolveCompile::compile (ctx, adt); + + // make the ctor for the union + HIR::Expr *discrim_expr = variant->get_discriminant (); + tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx); + tree folded_discrim_expr = fold_expr (discrim_expr_node); + tree qualifier = folded_discrim_expr; + + return ctx->get_backend ()->constructor_expression (compiled_adt_type, + true, {qualifier}, + union_disriminator, + expr_locus); + } + + HirId ref; + if (!ctx->get_mappings ()->lookup_node_to_hir (ref_node_id, &ref)) + { + rust_error_at (expr_locus, "reverse call path lookup failure"); + return error_mark_node; + } + + // might be a constant + tree constant_expr; + if (ctx->lookup_const_decl (ref, &constant_expr)) + { + TREE_USED (constant_expr) = 1; + return constant_expr; + } + + // this might be a variable reference or a function reference + Bvariable *var = nullptr; + if (ctx->lookup_var_decl (ref, &var)) + { + // TREE_USED is setup in the gcc abstraction here + return ctx->get_backend ()->var_expression (var, expr_locus); + } + + // might be a match pattern binding + tree binding = error_mark_node; + if (ctx->lookup_pattern_binding (ref, &binding)) + { + TREE_USED (binding) = 1; + return binding; + } + + // it might be a function call + if (lookup->get_kind () == TyTy::TypeKind::FNDEF) + { + TyTy::FnType *fntype = static_cast (lookup); + tree fn = NULL_TREE; + if (ctx->lookup_function_decl (fntype->get_ty_ref (), &fn)) + { + TREE_USED (fn) = 1; + return address_expression (fn, expr_locus); + } + else if (fntype->get_abi () == ABI::INTRINSIC) + { + Intrinsics compile (ctx); + fn = compile.compile (fntype); + TREE_USED (fn) = 1; + return address_expression (fn, expr_locus); + } + } + + // let the query system figure it out + tree resolved_item = query_compile (ref, lookup, final_segment, mappings, + expr_locus, is_qualified_path); + if (resolved_item != error_mark_node) + { + TREE_USED (resolved_item) = 1; + } + return resolved_item; +} + +tree +HIRCompileBase::query_compile (HirId ref, TyTy::BaseType *lookup, + const HIR::PathIdentSegment &final_segment, + const Analysis::NodeMapping &mappings, + Location expr_locus, bool is_qualified_path) +{ + HIR::Item *resolved_item = ctx->get_mappings ()->lookup_hir_item (ref); + HirId parent_block; + HIR::ExternalItem *resolved_extern_item + = ctx->get_mappings ()->lookup_hir_extern_item (ref, &parent_block); + bool is_hir_item = resolved_item != nullptr; + bool is_hir_extern_item = resolved_extern_item != nullptr; + if (is_hir_item) + { + if (!lookup->has_subsititions_defined ()) + return CompileItem::compile (resolved_item, ctx, nullptr, true, + expr_locus); + else + return CompileItem::compile (resolved_item, ctx, lookup, true, + expr_locus); + } + else if (is_hir_extern_item) + { + if (!lookup->has_subsititions_defined ()) + return CompileExternItem::compile (resolved_extern_item, ctx, nullptr, + true, expr_locus); + else + return CompileExternItem::compile (resolved_extern_item, ctx, lookup, + true, expr_locus); + } + else + { + HirId parent_impl_id = UNKNOWN_HIRID; + HIR::ImplItem *resolved_item + = ctx->get_mappings ()->lookup_hir_implitem (ref, &parent_impl_id); + bool is_impl_item = resolved_item != nullptr; + if (is_impl_item) + { + rust_assert (parent_impl_id != UNKNOWN_HIRID); + HIR::Item *impl_ref + = ctx->get_mappings ()->lookup_hir_item (parent_impl_id); + rust_assert (impl_ref != nullptr); + HIR::ImplBlock *impl = static_cast (impl_ref); + + TyTy::BaseType *self = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + impl->get_type ()->get_mappings ().get_hirid (), &self); + rust_assert (ok); + + if (!lookup->has_subsititions_defined ()) + return CompileInherentImplItem::Compile (resolved_item, ctx, + nullptr, true, expr_locus); + else + return CompileInherentImplItem::Compile (resolved_item, ctx, lookup, + true, expr_locus); + } + else + { + // it might be resolved to a trait item + HIR::TraitItem *trait_item + = ctx->get_mappings ()->lookup_hir_trait_item (ref); + HIR::Trait *trait = ctx->get_mappings ()->lookup_trait_item_mapping ( + trait_item->get_mappings ().get_hirid ()); + + Resolver::TraitReference *trait_ref + = &Resolver::TraitReference::error_node (); + bool ok = ctx->get_tyctx ()->lookup_trait_reference ( + trait->get_mappings ().get_defid (), &trait_ref); + rust_assert (ok); + + TyTy::BaseType *receiver = nullptr; + ok = ctx->get_tyctx ()->lookup_receiver (mappings.get_hirid (), + &receiver); + rust_assert (ok); + + if (receiver->get_kind () == TyTy::TypeKind::PARAM) + { + TyTy::ParamType *p = static_cast (receiver); + receiver = p->resolve (); + } + + // the type resolver can only resolve type bounds to their trait + // item so its up to us to figure out if this path should resolve + // to an trait-impl-block-item or if it can be defaulted to the + // trait-impl-item's definition + std::vector candidates + = Resolver::PathProbeImplTrait::Probe (receiver, final_segment, + trait_ref); + if (candidates.size () == 0) + { + // this means we are defaulting back to the trait_item if + // possible + Resolver::TraitItemReference *trait_item_ref = nullptr; + bool ok = trait_ref->lookup_hir_trait_item (*trait_item, + &trait_item_ref); + rust_assert (ok); // found + rust_assert (trait_item_ref->is_optional ()); // has definition + + return CompileTraitItem::Compile ( + trait_item_ref->get_hir_trait_item (), ctx, lookup, true, + expr_locus); + } + else + { + Resolver::PathProbeCandidate &candidate = candidates.at (0); + rust_assert (candidate.is_impl_candidate ()); + + HIR::ImplBlock *impl = candidate.item.impl.parent; + HIR::ImplItem *impl_item = candidate.item.impl.impl_item; + + TyTy::BaseType *self = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + impl->get_type ()->get_mappings ().get_hirid (), &self); + rust_assert (ok); + + if (!lookup->has_subsititions_defined ()) + return CompileInherentImplItem::Compile (impl_item, ctx, + nullptr, true, + expr_locus); + else + return CompileInherentImplItem::Compile (impl_item, ctx, lookup, + true, expr_locus); + + lookup->set_ty_ref (impl_item->get_impl_mappings ().get_hirid ()); + } + } + } + + return error_mark_node; +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-resolve-path.h b/gcc/rust/backend/rust-compile-resolve-path.h new file mode 100644 index 00000000000..f0360bdc739 --- /dev/null +++ b/gcc/rust/backend/rust-compile-resolve-path.h @@ -0,0 +1,73 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_RESOLVE_PATH +#define RUST_COMPILE_RESOLVE_PATH + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class ResolvePathRef : public HIRCompileBase, public HIR::HIRPatternVisitor +{ +public: + static tree Compile (HIR::QualifiedPathInExpression &expr, Context *ctx) + { + ResolvePathRef resolver (ctx); + expr.accept_vis (resolver); + return resolver.resolved; + } + + static tree Compile (HIR::PathInExpression &expr, Context *ctx) + { + ResolvePathRef resolver (ctx); + expr.accept_vis (resolver); + return resolver.resolved; + } + + void visit (HIR::PathInExpression &expr) override; + void visit (HIR::QualifiedPathInExpression &expr) override; + + // Empty visit for unused Pattern HIR nodes. + void visit (HIR::GroupedPattern &) override {} + void visit (HIR::IdentifierPattern &) override {} + void visit (HIR::LiteralPattern &) override {} + void visit (HIR::RangePattern &) override {} + void visit (HIR::ReferencePattern &) override {} + void visit (HIR::SlicePattern &) override {} + void visit (HIR::StructPattern &) override {} + void visit (HIR::TuplePattern &) override {} + void visit (HIR::TupleStructPattern &) override {} + void visit (HIR::WildcardPattern &) override {} + + ResolvePathRef (Context *ctx) + : HIRCompileBase (ctx), resolved (error_mark_node) + {} + + tree resolve (const HIR::PathIdentSegment &final_segment, + const Analysis::NodeMapping &mappings, Location locus, + bool is_qualified_path); + + tree resolved; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_RESOLVE_PATH diff --git a/gcc/rust/backend/rust-compile-stmt.cc b/gcc/rust/backend/rust-compile-stmt.cc new file mode 100644 index 00000000000..bfb25f12980 --- /dev/null +++ b/gcc/rust/backend/rust-compile-stmt.cc @@ -0,0 +1,115 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-stmt.h" +#include "rust-compile-expr.h" + +namespace Rust { +namespace Compile { + +CompileStmt::CompileStmt (Context *ctx) + : HIRCompileBase (ctx), translated (nullptr) +{} + +tree +CompileStmt::Compile (HIR::Stmt *stmt, Context *ctx) +{ + CompileStmt compiler (ctx); + stmt->accept_vis (compiler); + return compiler.translated; +} + +void +CompileStmt::visit (HIR::ExprStmtWithBlock &stmt) +{ + translated = CompileExpr::Compile (stmt.get_expr (), ctx); +} + +void +CompileStmt::visit (HIR::ExprStmtWithoutBlock &stmt) +{ + translated = CompileExpr::Compile (stmt.get_expr (), ctx); +} + +void +CompileStmt::visit (HIR::LetStmt &stmt) +{ + // nothing to do + if (!stmt.has_init_expr ()) + return; + + const HIR::Pattern &stmt_pattern = *stmt.get_pattern (); + HirId stmt_id = stmt_pattern.get_pattern_mappings ().get_hirid (); + + TyTy::BaseType *ty = nullptr; + if (!ctx->get_tyctx ()->lookup_type (stmt_id, &ty)) + { + // FIXME this should be an assertion instead + rust_fatal_error (stmt.get_locus (), + "failed to lookup variable declaration type"); + return; + } + + Bvariable *var = nullptr; + if (!ctx->lookup_var_decl (stmt_id, &var)) + { + // FIXME this should be an assertion instead and use error mark node + rust_fatal_error (stmt.get_locus (), + "failed to lookup compiled variable declaration"); + return; + } + + tree init = CompileExpr::Compile (stmt.get_init_expr (), ctx); + // FIXME use error_mark_node, check that CompileExpr returns error_mark_node + // on failure and make this an assertion + if (init == nullptr) + return; + + TyTy::BaseType *actual = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + stmt.get_init_expr ()->get_mappings ().get_hirid (), &actual); + rust_assert (ok); + tree stmt_type = TyTyResolveCompile::compile (ctx, ty); + + Location lvalue_locus = stmt.get_pattern ()->get_locus (); + Location rvalue_locus = stmt.get_init_expr ()->get_locus (); + TyTy::BaseType *expected = ty; + init = coercion_site (stmt.get_mappings ().get_hirid (), init, actual, + expected, lvalue_locus, rvalue_locus); + + auto fnctx = ctx->peek_fn (); + if (ty->is_unit ()) + { + ctx->add_statement (init); + + auto unit_type_init_expr + = ctx->get_backend ()->constructor_expression (stmt_type, false, {}, -1, + rvalue_locus); + auto s = ctx->get_backend ()->init_statement (fnctx.fndecl, var, + unit_type_init_expr); + ctx->add_statement (s); + } + else + { + auto s = ctx->get_backend ()->init_statement (fnctx.fndecl, var, init); + ctx->add_statement (s); + } +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-stmt.h b/gcc/rust/backend/rust-compile-stmt.h new file mode 100644 index 00000000000..a0ec8b26667 --- /dev/null +++ b/gcc/rust/backend/rust-compile-stmt.h @@ -0,0 +1,69 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_STMT +#define RUST_COMPILE_STMT + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class CompileStmt : private HIRCompileBase, protected HIR::HIRStmtVisitor +{ +public: + static tree Compile (HIR::Stmt *stmt, Context *ctx); + + void visit (HIR::ExprStmtWithBlock &stmt) override; + void visit (HIR::ExprStmtWithoutBlock &stmt) override; + void visit (HIR::LetStmt &stmt) override; + + // Empty visit for unused Stmt HIR nodes. + void visit (HIR::TupleStruct &) override {} + void visit (HIR::EnumItem &) override {} + void visit (HIR::EnumItemTuple &) override {} + void visit (HIR::EnumItemStruct &) override {} + void visit (HIR::EnumItemDiscriminant &) override {} + void visit (HIR::TypePathSegmentFunction &) override {} + void visit (HIR::TypePath &) override {} + void visit (HIR::QualifiedPathInType &) override {} + void visit (HIR::Module &) override {} + void visit (HIR::ExternCrate &) override {} + void visit (HIR::UseDeclaration &) override {} + void visit (HIR::Function &) override {} + void visit (HIR::TypeAlias &) override {} + void visit (HIR::StructStruct &) override {} + void visit (HIR::Enum &) override {} + void visit (HIR::Union &) override {} + void visit (HIR::ConstantItem &) override {} + void visit (HIR::StaticItem &) override {} + void visit (HIR::Trait &) override {} + void visit (HIR::ImplBlock &) override {} + void visit (HIR::ExternBlock &) override {} + void visit (HIR::EmptyStmt &) override {} + +private: + CompileStmt (Context *ctx); + + tree translated; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_STMT diff --git a/gcc/rust/backend/rust-compile-struct-field-expr.cc b/gcc/rust/backend/rust-compile-struct-field-expr.cc new file mode 100644 index 00000000000..c9a2811f611 --- /dev/null +++ b/gcc/rust/backend/rust-compile-struct-field-expr.cc @@ -0,0 +1,81 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-struct-field-expr.h" +#include "rust-compile-expr.h" + +namespace Rust { +namespace Compile { + +CompileStructExprField::CompileStructExprField (Context *ctx) + : HIRCompileBase (ctx), translated (error_mark_node) +{} + +tree +CompileStructExprField::Compile (HIR::StructExprField *field, Context *ctx) +{ + CompileStructExprField compiler (ctx); + switch (field->get_kind ()) + { + case HIR::StructExprField::StructExprFieldKind::IDENTIFIER: + compiler.visit (static_cast (*field)); + break; + + case HIR::StructExprField::StructExprFieldKind::IDENTIFIER_VALUE: + compiler.visit ( + static_cast (*field)); + break; + + case HIR::StructExprField::StructExprFieldKind::INDEX_VALUE: + compiler.visit (static_cast (*field)); + break; + } + return compiler.translated; +} + +void +CompileStructExprField::visit (HIR::StructExprFieldIdentifierValue &field) +{ + translated = CompileExpr::Compile (field.get_value (), ctx); +} + +void +CompileStructExprField::visit (HIR::StructExprFieldIndexValue &field) +{ + translated = CompileExpr::Compile (field.get_value (), ctx); +} + +void +CompileStructExprField::visit (HIR::StructExprFieldIdentifier &field) +{ + // we can make the field look like a path expr to take advantage of existing + // code + + Analysis::NodeMapping mappings_copy1 = field.get_mappings (); + Analysis::NodeMapping mappings_copy2 = field.get_mappings (); + + HIR::PathIdentSegment ident_seg (field.get_field_name ()); + HIR::PathExprSegment seg (mappings_copy1, ident_seg, field.get_locus (), + HIR::GenericArgs::create_empty ()); + HIR::PathInExpression expr (mappings_copy2, {seg}, field.get_locus (), false, + {}); + translated = CompileExpr::Compile (&expr, ctx); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-struct-field-expr.h b/gcc/rust/backend/rust-compile-struct-field-expr.h new file mode 100644 index 00000000000..bc5da080dfe --- /dev/null +++ b/gcc/rust/backend/rust-compile-struct-field-expr.h @@ -0,0 +1,46 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_STRUCT_FIELD_EXPR +#define RUST_COMPILE_STRUCT_FIELD_EXPR + +#include "rust-compile-base.h" + +namespace Rust { +namespace Compile { + +class CompileStructExprField : private HIRCompileBase +{ +public: + static tree Compile (HIR::StructExprField *field, Context *ctx); + +protected: + void visit (HIR::StructExprFieldIdentifierValue &field); + void visit (HIR::StructExprFieldIndexValue &field); + void visit (HIR::StructExprFieldIdentifier &field); + +private: + CompileStructExprField (Context *ctx); + + tree translated; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_STRUCT_FIELD_EXPR diff --git a/gcc/rust/backend/rust-compile-type.cc b/gcc/rust/backend/rust-compile-type.cc new file mode 100644 index 00000000000..eced909673e --- /dev/null +++ b/gcc/rust/backend/rust-compile-type.cc @@ -0,0 +1,713 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile-type.h" +#include "rust-compile-expr.h" +#include "rust-constexpr.h" + +#include "tree.h" + +namespace Rust { +namespace Compile { + +static const std::string RUST_ENUM_DISR_FIELD_NAME = "RUST$ENUM$DISR"; + +TyTyResolveCompile::TyTyResolveCompile (Context *ctx, bool trait_object_mode) + : ctx (ctx), trait_object_mode (trait_object_mode), + translated (error_mark_node), recurisve_ops (0) +{} + +tree +TyTyResolveCompile::compile (Context *ctx, const TyTy::BaseType *ty, + bool trait_object_mode) +{ + TyTyResolveCompile compiler (ctx, trait_object_mode); + ty->accept_vis (compiler); + + if (compiler.translated != error_mark_node + && TYPE_NAME (compiler.translated) != NULL) + { + // canonicalize the type + compiler.translated = ctx->insert_compiled_type (compiler.translated); + } + + return compiler.translated; +} + +// see: gcc/c/c-decl.cc:8230-8241 +// https://github.com/Rust-GCC/gccrs/blob/0024bc2f028369b871a65ceb11b2fddfb0f9c3aa/gcc/c/c-decl.c#L8229-L8241 +tree +TyTyResolveCompile::get_implicit_enumeral_node_type (Context *ctx) +{ + // static tree enum_node = NULL_TREE; + // if (enum_node == NULL_TREE) + // { + // enum_node = make_node (ENUMERAL_TYPE); + // SET_TYPE_MODE (enum_node, TYPE_MODE (unsigned_type_node)); + // SET_TYPE_ALIGN (enum_node, TYPE_ALIGN (unsigned_type_node)); + // TYPE_USER_ALIGN (enum_node) = 0; + // TYPE_UNSIGNED (enum_node) = 1; + // TYPE_PRECISION (enum_node) = TYPE_PRECISION (unsigned_type_node); + // TYPE_MIN_VALUE (enum_node) = TYPE_MIN_VALUE (unsigned_type_node); + // TYPE_MAX_VALUE (enum_node) = TYPE_MAX_VALUE (unsigned_type_node); + + // // tree identifier = ctx->get_backend ()->get_identifier_node + // // ("enumeral"); tree enum_decl + // // = build_decl (BUILTINS_LOCATION, TYPE_DECL, identifier, + // enum_node); + // // TYPE_NAME (enum_node) = enum_decl; + // } + // return enum_node; + + static tree enum_node = NULL_TREE; + if (enum_node == NULL_TREE) + { + enum_node = ctx->get_backend ()->named_type ( + "enumeral", ctx->get_backend ()->integer_type (false, 64), + Linemap::predeclared_location ()); + } + return enum_node; +} + +void +TyTyResolveCompile::visit (const TyTy::ErrorType &) +{ + translated = error_mark_node; +} + +void +TyTyResolveCompile::visit (const TyTy::InferType &) +{ + translated = error_mark_node; +} + +void +TyTyResolveCompile::visit (const TyTy::ClosureType &) +{ + gcc_unreachable (); +} + +void +TyTyResolveCompile::visit (const TyTy::ProjectionType &type) +{ + type.get ()->accept_vis (*this); +} + +void +TyTyResolveCompile::visit (const TyTy::PlaceholderType &type) +{ + type.resolve ()->accept_vis (*this); +} + +void +TyTyResolveCompile::visit (const TyTy::ParamType ¶m) +{ + if (recurisve_ops++ >= rust_max_recursion_depth) + { + rust_error_at (Location (), + "% count exceeds limit of %i (use " + "% to increase the limit)", + rust_max_recursion_depth); + translated = error_mark_node; + return; + } + + param.resolve ()->accept_vis (*this); +} + +void +TyTyResolveCompile::visit (const TyTy::FnType &type) +{ + Backend::typed_identifier receiver; + std::vector parameters; + std::vector results; + + if (!type.get_return_type ()->is_unit ()) + { + auto hir_type = type.get_return_type (); + auto ret = TyTyResolveCompile::compile (ctx, hir_type, trait_object_mode); + results.push_back (Backend::typed_identifier ( + "_", ret, + ctx->get_mappings ()->lookup_location (hir_type->get_ref ()))); + } + + for (auto ¶m_pair : type.get_params ()) + { + auto param_tyty = param_pair.second; + auto compiled_param_type + = TyTyResolveCompile::compile (ctx, param_tyty, trait_object_mode); + + auto compiled_param = Backend::typed_identifier ( + param_pair.first->as_string (), compiled_param_type, + ctx->get_mappings ()->lookup_location (param_tyty->get_ref ())); + + parameters.push_back (compiled_param); + } + + if (!type.is_varadic ()) + translated + = ctx->get_backend ()->function_type (receiver, parameters, results, NULL, + type.get_ident ().locus); + else + translated + = ctx->get_backend ()->function_type_varadic (receiver, parameters, + results, NULL, + type.get_ident ().locus); +} + +void +TyTyResolveCompile::visit (const TyTy::FnPtr &type) +{ + tree result_type = TyTyResolveCompile::compile (ctx, type.get_return_type ()); + + std::vector parameters; + + auto ¶ms = type.get_params (); + for (auto &p : params) + { + tree pty = TyTyResolveCompile::compile (ctx, p.get_tyty ()); + parameters.push_back (pty); + } + + translated = ctx->get_backend ()->function_ptr_type (result_type, parameters, + type.get_ident ().locus); +} + +void +TyTyResolveCompile::visit (const TyTy::ADTType &type) +{ + tree type_record = error_mark_node; + if (!type.is_enum ()) + { + rust_assert (type.number_of_variants () == 1); + + TyTy::VariantDef &variant = *type.get_variants ().at (0); + std::vector fields; + for (size_t i = 0; i < variant.num_fields (); i++) + { + const TyTy::StructFieldType *field = variant.get_field_at_index (i); + tree compiled_field_ty + = TyTyResolveCompile::compile (ctx, field->get_field_type ()); + + Backend::typed_identifier f (field->get_name (), compiled_field_ty, + ctx->get_mappings ()->lookup_location ( + type.get_ty_ref ())); + fields.push_back (std::move (f)); + } + + type_record = type.is_union () + ? ctx->get_backend ()->union_type (fields) + : ctx->get_backend ()->struct_type (fields); + } + else + { + // see: + // https://github.com/bminor/binutils-gdb/blob/527b8861cd472385fa9160a91dd6d65a25c41987/gdb/dwarf2/read.c#L9010-L9241 + // + // enums are actually a big union so for example the rust enum: + // + // enum AnEnum { + // A, + // B, + // C (char), + // D { x: i64, y: i64 }, + // } + // + // we actually turn this into + // + // union { + // struct A { int RUST$ENUM$DISR; }; <- this is a data-less variant + // struct B { int RUST$ENUM$DISR; }; <- this is a data-less variant + // struct C { int RUST$ENUM$DISR; char __0; }; + // struct D { int RUST$ENUM$DISR; i64 x; i64 y; }; + // } + // + // Ada, qual_union_types might still work for this but I am not 100% sure. + // I ran into some issues lets reuse our normal union and ask Ada people + // about it. + + std::vector variant_records; + for (auto &variant : type.get_variants ()) + { + std::vector fields; + + // add in the qualifier field for the variant + tree enumeral_type + = TyTyResolveCompile::get_implicit_enumeral_node_type (ctx); + Backend::typed_identifier f (RUST_ENUM_DISR_FIELD_NAME, enumeral_type, + ctx->get_mappings ()->lookup_location ( + variant->get_id ())); + fields.push_back (std::move (f)); + + // compile the rest of the fields + for (size_t i = 0; i < variant->num_fields (); i++) + { + const TyTy::StructFieldType *field + = variant->get_field_at_index (i); + tree compiled_field_ty + = TyTyResolveCompile::compile (ctx, field->get_field_type ()); + + std::string field_name = field->get_name (); + if (variant->get_variant_type () + == TyTy::VariantDef::VariantType::TUPLE) + field_name = "__" + field->get_name (); + + Backend::typed_identifier f ( + field_name, compiled_field_ty, + ctx->get_mappings ()->lookup_location (type.get_ty_ref ())); + fields.push_back (std::move (f)); + } + + tree variant_record = ctx->get_backend ()->struct_type (fields); + tree named_variant_record = ctx->get_backend ()->named_type ( + variant->get_ident ().path.get (), variant_record, + variant->get_ident ().locus); + + // set the qualifier to be a builtin + DECL_ARTIFICIAL (TYPE_FIELDS (variant_record)) = 1; + + // add them to the list + variant_records.push_back (named_variant_record); + } + + // now we need to make the actual union, but first we need to make + // named_type TYPE_DECL's out of the variants + + size_t i = 0; + std::vector enum_fields; + for (auto &variant_record : variant_records) + { + TyTy::VariantDef *variant = type.get_variants ().at (i++); + std::string implicit_variant_name = variant->get_identifier (); + + Backend::typed_identifier f (implicit_variant_name, variant_record, + ctx->get_mappings ()->lookup_location ( + type.get_ty_ref ())); + enum_fields.push_back (std::move (f)); + } + + // finally make the union or the enum + type_record = ctx->get_backend ()->union_type (enum_fields); + } + + // Handle repr options + // TODO: "packed" should only narrow type alignment and "align" should only + // widen it. Do we need to check and enforce this here, or is it taken care of + // later on in the gcc middle-end? + TyTy::ADTType::ReprOptions repr = type.get_repr_options (); + if (repr.pack) + { + TYPE_PACKED (type_record) = 1; + if (repr.pack > 1) + { + SET_TYPE_ALIGN (type_record, repr.pack * 8); + TYPE_USER_ALIGN (type_record) = 1; + } + } + else if (repr.align) + { + SET_TYPE_ALIGN (type_record, repr.align * 8); + TYPE_USER_ALIGN (type_record) = 1; + } + + std::string named_struct_str + = type.get_ident ().path.get () + type.subst_as_string (); + translated = ctx->get_backend ()->named_type (named_struct_str, type_record, + type.get_ident ().locus); +} + +void +TyTyResolveCompile::visit (const TyTy::TupleType &type) +{ + if (type.num_fields () == 0) + { + translated = ctx->get_backend ()->unit_type (); + return; + } + + // create implicit struct + std::vector fields; + for (size_t i = 0; i < type.num_fields (); i++) + { + TyTy::BaseType *field = type.get_field (i); + tree compiled_field_ty = TyTyResolveCompile::compile (ctx, field); + + // rustc uses the convention __N, where N is an integer, to + // name the fields of a tuple. We follow this as well, + // because this is used by GDB. One further reason to prefer + // this, rather than simply emitting the integer, is that this + // approach makes it simpler to use a C-only debugger, or + // GDB's C mode, when debugging Rust. + Backend::typed_identifier f ("__" + std::to_string (i), compiled_field_ty, + ctx->get_mappings ()->lookup_location ( + type.get_ty_ref ())); + fields.push_back (std::move (f)); + } + + tree struct_type_record = ctx->get_backend ()->struct_type (fields); + translated + = ctx->get_backend ()->named_type (type.as_string (), struct_type_record, + type.get_ident ().locus); +} + +void +TyTyResolveCompile::visit (const TyTy::ArrayType &type) +{ + tree element_type + = TyTyResolveCompile::compile (ctx, type.get_element_type ()); + tree capacity_expr = CompileExpr::Compile (&type.get_capacity_expr (), ctx); + tree folded_capacity_expr = fold_expr (capacity_expr); + + translated + = ctx->get_backend ()->array_type (element_type, folded_capacity_expr); +} + +void +TyTyResolveCompile::visit (const TyTy::SliceType &type) +{ + tree type_record = create_slice_type_record (type); + + std::string named_struct_str + = std::string ("[") + type.get_element_type ()->get_name () + "]"; + translated = ctx->get_backend ()->named_type (named_struct_str, type_record, + type.get_ident ().locus); +} + +void +TyTyResolveCompile::visit (const TyTy::BoolType &type) +{ + translated + = ctx->get_backend ()->named_type ("bool", + ctx->get_backend ()->bool_type (), + Linemap::predeclared_location ()); +} + +void +TyTyResolveCompile::visit (const TyTy::IntType &type) +{ + switch (type.get_int_kind ()) + { + case TyTy::IntType::I8: + translated = ctx->get_backend ()->named_type ( + "i8", ctx->get_backend ()->integer_type (false, 8), + Linemap::predeclared_location ()); + return; + + case TyTy::IntType::I16: + translated = ctx->get_backend ()->named_type ( + "i16", ctx->get_backend ()->integer_type (false, 16), + Linemap::predeclared_location ()); + return; + + case TyTy::IntType::I32: + translated = ctx->get_backend ()->named_type ( + "i32", ctx->get_backend ()->integer_type (false, 32), + Linemap::predeclared_location ()); + return; + + case TyTy::IntType::I64: + translated = ctx->get_backend ()->named_type ( + "i64", ctx->get_backend ()->integer_type (false, 64), + Linemap::predeclared_location ()); + return; + + case TyTy::IntType::I128: + translated = ctx->get_backend ()->named_type ( + "i128", ctx->get_backend ()->integer_type (false, 128), + Linemap::predeclared_location ()); + return; + } +} + +void +TyTyResolveCompile::visit (const TyTy::UintType &type) +{ + switch (type.get_uint_kind ()) + { + case TyTy::UintType::U8: + translated = ctx->get_backend ()->named_type ( + "u8", ctx->get_backend ()->integer_type (true, 8), + Linemap::predeclared_location ()); + return; + + case TyTy::UintType::U16: + translated = ctx->get_backend ()->named_type ( + "u16", ctx->get_backend ()->integer_type (true, 16), + Linemap::predeclared_location ()); + return; + + case TyTy::UintType::U32: + translated = ctx->get_backend ()->named_type ( + "u32", ctx->get_backend ()->integer_type (true, 32), + Linemap::predeclared_location ()); + return; + + case TyTy::UintType::U64: + translated = ctx->get_backend ()->named_type ( + "u64", ctx->get_backend ()->integer_type (true, 64), + Linemap::predeclared_location ()); + return; + + case TyTy::UintType::U128: + translated = ctx->get_backend ()->named_type ( + "u128", ctx->get_backend ()->integer_type (true, 128), + Linemap::predeclared_location ()); + return; + } +} + +void +TyTyResolveCompile::visit (const TyTy::FloatType &type) +{ + switch (type.get_float_kind ()) + { + case TyTy::FloatType::F32: + translated + = ctx->get_backend ()->named_type ("f32", + ctx->get_backend ()->float_type (32), + Linemap::predeclared_location ()); + return; + + case TyTy::FloatType::F64: + translated + = ctx->get_backend ()->named_type ("f64", + ctx->get_backend ()->float_type (64), + Linemap::predeclared_location ()); + return; + } +} + +void +TyTyResolveCompile::visit (const TyTy::USizeType &type) +{ + translated = ctx->get_backend ()->named_type ( + "usize", + ctx->get_backend ()->integer_type ( + true, ctx->get_backend ()->get_pointer_size ()), + Linemap::predeclared_location ()); +} + +void +TyTyResolveCompile::visit (const TyTy::ISizeType &type) +{ + translated = ctx->get_backend ()->named_type ( + "isize", + ctx->get_backend ()->integer_type ( + false, ctx->get_backend ()->get_pointer_size ()), + Linemap::predeclared_location ()); +} + +void +TyTyResolveCompile::visit (const TyTy::CharType &type) +{ + translated + = ctx->get_backend ()->named_type ("char", + ctx->get_backend ()->wchar_type (), + Linemap::predeclared_location ()); +} + +void +TyTyResolveCompile::visit (const TyTy::ReferenceType &type) +{ + const TyTy::SliceType *slice = nullptr; + const TyTy::StrType *str = nullptr; + if (type.is_dyn_slice_type (&slice)) + { + tree type_record = create_slice_type_record (*slice); + std::string dyn_slice_type_str + = std::string (type.is_mutable () ? "&mut " : "&") + "[" + + slice->get_element_type ()->get_name () + "]"; + + translated + = ctx->get_backend ()->named_type (dyn_slice_type_str, type_record, + slice->get_locus ()); + + return; + } + else if (type.is_dyn_str_type (&str)) + { + tree type_record = create_str_type_record (*str); + std::string dyn_str_type_str + = std::string (type.is_mutable () ? "&mut " : "&") + "str"; + + translated + = ctx->get_backend ()->named_type (dyn_str_type_str, type_record, + str->get_locus ()); + + return; + } + + tree base_compiled_type + = TyTyResolveCompile::compile (ctx, type.get_base (), trait_object_mode); + if (type.is_mutable ()) + { + translated = ctx->get_backend ()->reference_type (base_compiled_type); + } + else + { + auto base = ctx->get_backend ()->immutable_type (base_compiled_type); + translated = ctx->get_backend ()->reference_type (base); + } +} + +void +TyTyResolveCompile::visit (const TyTy::PointerType &type) +{ + const TyTy::SliceType *slice = nullptr; + const TyTy::StrType *str = nullptr; + if (type.is_dyn_slice_type (&slice)) + { + tree type_record = create_slice_type_record (*slice); + std::string dyn_slice_type_str + = std::string (type.is_mutable () ? "*mut " : "*const ") + "[" + + slice->get_element_type ()->get_name () + "]"; + + translated + = ctx->get_backend ()->named_type (dyn_slice_type_str, type_record, + slice->get_locus ()); + + return; + } + else if (type.is_dyn_str_type (&str)) + { + tree type_record = create_str_type_record (*str); + std::string dyn_str_type_str + = std::string (type.is_mutable () ? "*mut " : "*const ") + "str"; + + translated + = ctx->get_backend ()->named_type (dyn_str_type_str, type_record, + str->get_locus ()); + + return; + } + + tree base_compiled_type + = TyTyResolveCompile::compile (ctx, type.get_base (), trait_object_mode); + if (type.is_mutable ()) + { + translated = ctx->get_backend ()->pointer_type (base_compiled_type); + } + else + { + auto base = ctx->get_backend ()->immutable_type (base_compiled_type); + translated = ctx->get_backend ()->pointer_type (base); + } +} + +void +TyTyResolveCompile::visit (const TyTy::StrType &type) +{ + tree raw_str = create_str_type_record (type); + translated + = ctx->get_backend ()->named_type ("str", raw_str, + Linemap::predeclared_location ()); +} + +void +TyTyResolveCompile::visit (const TyTy::NeverType &) +{ + translated = ctx->get_backend ()->unit_type (); +} + +void +TyTyResolveCompile::visit (const TyTy::DynamicObjectType &type) +{ + if (trait_object_mode) + { + translated = ctx->get_backend ()->integer_type ( + true, ctx->get_backend ()->get_pointer_size ()); + return; + } + + // create implicit struct + auto items = type.get_object_items (); + std::vector fields; + + tree uint = ctx->get_backend ()->integer_type ( + true, ctx->get_backend ()->get_pointer_size ()); + tree uintptr_ty = build_pointer_type (uint); + + Backend::typed_identifier f ("pointer", uintptr_ty, + ctx->get_mappings ()->lookup_location ( + type.get_ty_ref ())); + fields.push_back (std::move (f)); + + tree vtable_size = build_int_cst (size_type_node, items.size ()); + tree vtable_type = ctx->get_backend ()->array_type (uintptr_ty, vtable_size); + Backend::typed_identifier vtf ("vtable", vtable_type, + ctx->get_mappings ()->lookup_location ( + type.get_ty_ref ())); + fields.push_back (std::move (vtf)); + + tree type_record = ctx->get_backend ()->struct_type (fields); + translated = ctx->get_backend ()->named_type (type.get_name (), type_record, + type.get_ident ().locus); +} + +tree +TyTyResolveCompile::create_slice_type_record (const TyTy::SliceType &type) +{ + // lookup usize + TyTy::BaseType *usize = nullptr; + bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize); + rust_assert (ok); + + tree element_type + = TyTyResolveCompile::compile (ctx, type.get_element_type ()); + tree data_field_ty = build_pointer_type (element_type); + Backend::typed_identifier data_field ("data", data_field_ty, + type.get_locus ()); + + tree len_field_ty = TyTyResolveCompile::compile (ctx, usize); + Backend::typed_identifier len_field ("len", len_field_ty, type.get_locus ()); + + tree record = ctx->get_backend ()->struct_type ({data_field, len_field}); + SLICE_FLAG (record) = 1; + TYPE_MAIN_VARIANT (record) = ctx->insert_main_variant (record); + + return record; +} + +tree +TyTyResolveCompile::create_str_type_record (const TyTy::StrType &type) +{ + // lookup usize + TyTy::BaseType *usize = nullptr; + bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize); + rust_assert (ok); + + tree char_ptr = build_pointer_type (char_type_node); + tree const_char_type = build_qualified_type (char_ptr, TYPE_QUAL_CONST); + + tree element_type = const_char_type; + tree data_field_ty = build_pointer_type (element_type); + Backend::typed_identifier data_field ("data", data_field_ty, + type.get_locus ()); + + tree len_field_ty = TyTyResolveCompile::compile (ctx, usize); + Backend::typed_identifier len_field ("len", len_field_ty, type.get_locus ()); + + tree record = ctx->get_backend ()->struct_type ({data_field, len_field}); + SLICE_FLAG (record) = 1; + TYPE_MAIN_VARIANT (record) = ctx->insert_main_variant (record); + + return record; +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile-type.h b/gcc/rust/backend/rust-compile-type.h new file mode 100644 index 00000000000..b52fd71bf6b --- /dev/null +++ b/gcc/rust/backend/rust-compile-type.h @@ -0,0 +1,79 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_TYPE +#define RUST_COMPILE_TYPE + +#include "rust-compile-context.h" + +namespace Rust { +namespace Compile { + +class TyTyResolveCompile : protected TyTy::TyConstVisitor +{ +public: + static tree compile (Context *ctx, const TyTy::BaseType *ty, + bool trait_object_mode = false); + + static tree get_implicit_enumeral_node_type (Context *ctx); + + void visit (const TyTy::InferType &) override; + void visit (const TyTy::ADTType &) override; + void visit (const TyTy::TupleType &) override; + void visit (const TyTy::FnType &) override; + void visit (const TyTy::FnPtr &) override; + void visit (const TyTy::ArrayType &) override; + void visit (const TyTy::SliceType &) override; + void visit (const TyTy::BoolType &) override; + void visit (const TyTy::IntType &) override; + void visit (const TyTy::UintType &) override; + void visit (const TyTy::FloatType &) override; + void visit (const TyTy::USizeType &) override; + void visit (const TyTy::ISizeType &) override; + void visit (const TyTy::ErrorType &) override; + void visit (const TyTy::CharType &) override; + void visit (const TyTy::ReferenceType &) override; + void visit (const TyTy::PointerType &) override; + void visit (const TyTy::ParamType &) override; + void visit (const TyTy::StrType &) override; + void visit (const TyTy::NeverType &) override; + void visit (const TyTy::PlaceholderType &) override; + void visit (const TyTy::ProjectionType &) override; + void visit (const TyTy::DynamicObjectType &) override; + void visit (const TyTy::ClosureType &) override; + +public: + static hashval_t type_hasher (tree type); + +protected: + tree create_slice_type_record (const TyTy::SliceType &type); + tree create_str_type_record (const TyTy::StrType &type); + +private: + TyTyResolveCompile (Context *ctx, bool trait_object_mode); + + Context *ctx; + bool trait_object_mode; + tree translated; + int recurisve_ops; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_TYPE diff --git a/gcc/rust/backend/rust-compile-var-decl.h b/gcc/rust/backend/rust-compile-var-decl.h new file mode 100644 index 00000000000..e2ee05b8163 --- /dev/null +++ b/gcc/rust/backend/rust-compile-var-decl.h @@ -0,0 +1,95 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_VAR_DECL +#define RUST_COMPILE_VAR_DECL + +#include "rust-compile-base.h" +#include "rust-hir-visitor.h" + +namespace Rust { +namespace Compile { + +class CompileVarDecl : public HIRCompileBase, public HIR::HIRPatternVisitor +{ + using HIR::HIRPatternVisitor::visit; + +public: + static ::Bvariable *compile (tree fndecl, tree translated_type, + HIR::Pattern *pattern, Context *ctx) + { + CompileVarDecl compiler (ctx, fndecl, translated_type); + pattern->accept_vis (compiler); + return compiler.compiled_variable; + } + + void visit (HIR::IdentifierPattern &pattern) override + { + if (!pattern.is_mut ()) + translated_type = ctx->get_backend ()->immutable_type (translated_type); + + compiled_variable + = ctx->get_backend ()->local_variable (fndecl, pattern.get_identifier (), + translated_type, NULL /*decl_var*/, + pattern.get_locus ()); + + HirId stmt_id = pattern.get_pattern_mappings ().get_hirid (); + ctx->insert_var_decl (stmt_id, compiled_variable); + } + + void visit (HIR::WildcardPattern &pattern) override + { + translated_type = ctx->get_backend ()->immutable_type (translated_type); + + compiled_variable + = ctx->get_backend ()->local_variable (fndecl, "_", translated_type, + NULL /*decl_var*/, + pattern.get_locus ()); + + HirId stmt_id = pattern.get_pattern_mappings ().get_hirid (); + ctx->insert_var_decl (stmt_id, compiled_variable); + } + + // Empty visit for unused Pattern HIR nodes. + void visit (HIR::GroupedPattern &) override {} + void visit (HIR::LiteralPattern &) override {} + void visit (HIR::PathInExpression &) override {} + void visit (HIR::QualifiedPathInExpression &) override {} + void visit (HIR::RangePattern &) override {} + void visit (HIR::ReferencePattern &) override {} + void visit (HIR::SlicePattern &) override {} + void visit (HIR::StructPattern &) override {} + void visit (HIR::TuplePattern &) override {} + void visit (HIR::TupleStructPattern &) override {} + +private: + CompileVarDecl (Context *ctx, tree fndecl, tree translated_type) + : HIRCompileBase (ctx), fndecl (fndecl), translated_type (translated_type), + compiled_variable (ctx->get_backend ()->error_variable ()) + {} + + tree fndecl; + tree translated_type; + + Bvariable *compiled_variable; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_VAR_DECL diff --git a/gcc/rust/backend/rust-compile.cc b/gcc/rust/backend/rust-compile.cc new file mode 100644 index 00000000000..0ccb98d9e12 --- /dev/null +++ b/gcc/rust/backend/rust-compile.cc @@ -0,0 +1,414 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-compile.h" +#include "rust-compile-item.h" +#include "rust-compile-implitem.h" +#include "rust-compile-expr.h" +#include "rust-compile-struct-field-expr.h" +#include "rust-compile-stmt.h" +#include "rust-hir-trait-resolve.h" +#include "rust-hir-path-probe.h" +#include "rust-hir-type-bounds.h" +#include "rust-hir-dot-operator.h" +#include "rust-compile-block.h" + +namespace Rust { +namespace Compile { + +CompileCrate::CompileCrate (HIR::Crate &crate, Context *ctx) + : crate (crate), ctx (ctx) +{} + +CompileCrate::~CompileCrate () {} + +void +CompileCrate::Compile (HIR::Crate &crate, Context *ctx) +{ + CompileCrate c (crate, ctx); + c.go (); +} + +void +CompileCrate::go () +{ + for (auto &item : crate.items) + CompileItem::compile (item.get (), ctx); +} + +// Shared methods in compilation + +tree +HIRCompileBase::coercion_site (HirId id, tree rvalue, + const TyTy::BaseType *rval, + const TyTy::BaseType *lval, + Location lvalue_locus, Location rvalue_locus) +{ + std::vector *adjustments = nullptr; + bool ok = ctx->get_tyctx ()->lookup_autoderef_mappings (id, &adjustments); + if (ok) + { + rvalue = resolve_adjustements (*adjustments, rvalue, rvalue_locus); + } + + return coercion_site1 (rvalue, rval, lval, lvalue_locus, rvalue_locus); +} + +tree +HIRCompileBase::coercion_site1 (tree rvalue, const TyTy::BaseType *rval, + const TyTy::BaseType *lval, + Location lvalue_locus, Location rvalue_locus) +{ + if (rvalue == error_mark_node) + return error_mark_node; + + const TyTy::BaseType *actual = rval->destructure (); + const TyTy::BaseType *expected = lval->destructure (); + + if (expected->get_kind () == TyTy::TypeKind::REF) + { + // this is a dyn object + if (SLICE_TYPE_P (TREE_TYPE (rvalue))) + { + return rvalue; + } + + // bad coercion... of something to a reference + if (actual->get_kind () != TyTy::TypeKind::REF) + return error_mark_node; + + const TyTy::ReferenceType *exp + = static_cast (expected); + const TyTy::ReferenceType *act + = static_cast (actual); + + tree deref_rvalue = indirect_expression (rvalue, rvalue_locus); + tree coerced + = coercion_site1 (deref_rvalue, act->get_base (), exp->get_base (), + lvalue_locus, rvalue_locus); + if (exp->is_dyn_object () && SLICE_TYPE_P (TREE_TYPE (coerced))) + return coerced; + + return address_expression (coerced, rvalue_locus); + } + else if (expected->get_kind () == TyTy::TypeKind::POINTER) + { + // this is a dyn object + if (SLICE_TYPE_P (TREE_TYPE (rvalue))) + { + return rvalue; + } + + // bad coercion... of something to a reference + bool valid_coercion = actual->get_kind () == TyTy::TypeKind::REF + || actual->get_kind () == TyTy::TypeKind::POINTER; + if (!valid_coercion) + return error_mark_node; + + const TyTy::ReferenceType *exp + = static_cast (expected); + + TyTy::BaseType *actual_base = nullptr; + if (actual->get_kind () == TyTy::TypeKind::REF) + { + const TyTy::ReferenceType *act + = static_cast (actual); + + actual_base = act->get_base (); + } + else if (actual->get_kind () == TyTy::TypeKind::POINTER) + { + const TyTy::PointerType *act + = static_cast (actual); + + actual_base = act->get_base (); + } + rust_assert (actual_base != nullptr); + + tree deref_rvalue = indirect_expression (rvalue, rvalue_locus); + tree coerced + = coercion_site1 (deref_rvalue, actual_base, exp->get_base (), + lvalue_locus, rvalue_locus); + + if (exp->is_dyn_object () && SLICE_TYPE_P (TREE_TYPE (coerced))) + return coerced; + + return address_expression (coerced, rvalue_locus); + } + else if (expected->get_kind () == TyTy::TypeKind::ARRAY) + { + if (actual->get_kind () != TyTy::TypeKind::ARRAY) + return error_mark_node; + + tree tree_rval_type = TyTyResolveCompile::compile (ctx, actual); + tree tree_lval_type = TyTyResolveCompile::compile (ctx, expected); + if (!verify_array_capacities (tree_lval_type, tree_rval_type, + lvalue_locus, rvalue_locus)) + return error_mark_node; + } + else if (expected->get_kind () == TyTy::TypeKind::SLICE) + { + // bad coercion + bool valid_coercion = actual->get_kind () == TyTy::TypeKind::SLICE + || actual->get_kind () == TyTy::TypeKind::ARRAY; + if (!valid_coercion) + return error_mark_node; + + // nothing to do here + if (actual->get_kind () == TyTy::TypeKind::SLICE) + return rvalue; + + // return an unsized coercion + Resolver::Adjustment unsize_adj ( + Resolver::Adjustment::AdjustmentType::UNSIZE, actual, expected); + return resolve_unsized_adjustment (unsize_adj, rvalue, rvalue_locus); + } + + return rvalue; +} + +tree +HIRCompileBase::coerce_to_dyn_object (tree compiled_ref, + const TyTy::BaseType *actual, + const TyTy::DynamicObjectType *ty, + Location locus) +{ + tree dynamic_object = TyTyResolveCompile::compile (ctx, ty); + tree dynamic_object_fields = TYPE_FIELDS (dynamic_object); + tree vtable_field = DECL_CHAIN (dynamic_object_fields); + rust_assert (TREE_CODE (TREE_TYPE (vtable_field)) == ARRAY_TYPE); + + //' this assumes ordering and current the structure is + // __trait_object_ptr + // [list of function ptrs] + + std::vector> + probed_bounds_for_receiver = Resolver::TypeBoundsProbe::Probe (actual); + + tree address_of_compiled_ref = null_pointer_node; + if (!actual->is_unit ()) + address_of_compiled_ref = address_expression (compiled_ref, locus); + + std::vector vtable_ctor_elems; + std::vector vtable_ctor_idx; + unsigned long i = 0; + for (auto &bound : ty->get_object_items ()) + { + const Resolver::TraitItemReference *item = bound.first; + const TyTy::TypeBoundPredicate *predicate = bound.second; + + auto address = compute_address_for_trait_item (item, predicate, + probed_bounds_for_receiver, + actual, actual, locus); + vtable_ctor_elems.push_back (address); + vtable_ctor_idx.push_back (i++); + } + + tree vtable_ctor = ctx->get_backend ()->array_constructor_expression ( + TREE_TYPE (vtable_field), vtable_ctor_idx, vtable_ctor_elems, locus); + + std::vector dyn_ctor = {address_of_compiled_ref, vtable_ctor}; + return ctx->get_backend ()->constructor_expression (dynamic_object, false, + dyn_ctor, -1, locus); +} + +tree +HIRCompileBase::compute_address_for_trait_item ( + const Resolver::TraitItemReference *ref, + const TyTy::TypeBoundPredicate *predicate, + std::vector> + &receiver_bounds, + const TyTy::BaseType *receiver, const TyTy::BaseType *root, Location locus) +{ + // There are two cases here one where its an item which has an implementation + // within a trait-impl-block. Then there is the case where there is a default + // implementation for this within the trait. + // + // The awkward part here is that this might be a generic trait and we need to + // figure out the correct monomorphized type for this so we can resolve the + // address of the function , this is stored as part of the + // type-bound-predicate + // + // Algo: + // check if there is an impl-item for this trait-item-ref first + // else assert that the trait-item-ref has an implementation + + TyTy::TypeBoundPredicateItem predicate_item + = predicate->lookup_associated_item (ref->get_identifier ()); + rust_assert (!predicate_item.is_error ()); + + // this is the expected end type + TyTy::BaseType *trait_item_type = predicate_item.get_tyty_for_receiver (root); + rust_assert (trait_item_type->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *trait_item_fntype + = static_cast (trait_item_type); + + // find impl-block for this trait-item-ref + HIR::ImplBlock *associated_impl_block = nullptr; + const Resolver::TraitReference *predicate_trait_ref = predicate->get (); + for (auto &item : receiver_bounds) + { + Resolver::TraitReference *trait_ref = item.first; + HIR::ImplBlock *impl_block = item.second; + if (predicate_trait_ref->is_equal (*trait_ref)) + { + associated_impl_block = impl_block; + break; + } + } + + // FIXME this probably should just return error_mark_node but this helps + // debug for now since we are wrongly returning early on type-resolution + // failures, until we take advantage of more error types and error_mark_node + rust_assert (associated_impl_block != nullptr); + + // lookup self for the associated impl + std::unique_ptr &self_type_path + = associated_impl_block->get_type (); + TyTy::BaseType *self = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + self_type_path->get_mappings ().get_hirid (), &self); + rust_assert (ok); + + // lookup the predicate item from the self + TyTy::TypeBoundPredicate *self_bound = nullptr; + for (auto &bound : self->get_specified_bounds ()) + { + const Resolver::TraitReference *bound_ref = bound.get (); + const Resolver::TraitReference *specified_ref = predicate->get (); + if (bound_ref->is_equal (*specified_ref)) + { + self_bound = &bound; + break; + } + } + rust_assert (self_bound != nullptr); + + // lookup the associated item from the associated impl block + TyTy::TypeBoundPredicateItem associated_self_item + = self_bound->lookup_associated_item (ref->get_identifier ()); + rust_assert (!associated_self_item.is_error ()); + + TyTy::BaseType *mono1 = associated_self_item.get_tyty_for_receiver (self); + rust_assert (mono1 != nullptr); + rust_assert (mono1->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *assocated_item_ty1 = static_cast (mono1); + + // Lookup the impl-block for the associated impl_item if it exists + HIR::Function *associated_function = nullptr; + for (auto &impl_item : associated_impl_block->get_impl_items ()) + { + bool is_function = impl_item->get_impl_item_type () + == HIR::ImplItem::ImplItemType::FUNCTION; + if (!is_function) + continue; + + HIR::Function *fn = static_cast (impl_item.get ()); + bool found_associated_item + = fn->get_function_name ().compare (ref->get_identifier ()) == 0; + if (found_associated_item) + associated_function = fn; + } + + // we found an impl_item for this + if (associated_function != nullptr) + { + // lookup the associated type for this item + TyTy::BaseType *lookup = nullptr; + bool ok = ctx->get_tyctx ()->lookup_type ( + associated_function->get_mappings ().get_hirid (), &lookup); + rust_assert (ok); + rust_assert (lookup->get_kind () == TyTy::TypeKind::FNDEF); + TyTy::FnType *lookup_fntype = static_cast (lookup); + + if (lookup_fntype->needs_substitution ()) + { + TyTy::SubstitutionArgumentMappings mappings + = assocated_item_ty1->solve_missing_mappings_from_this ( + *trait_item_fntype, *lookup_fntype); + lookup_fntype = lookup_fntype->handle_substitions (mappings); + } + + return CompileInherentImplItem::Compile (associated_function, ctx, + lookup_fntype, true, locus); + } + + // we can only compile trait-items with a body + bool trait_item_has_definition = ref->is_optional (); + rust_assert (trait_item_has_definition); + + HIR::TraitItem *trait_item = ref->get_hir_trait_item (); + return CompileTraitItem::Compile (trait_item, ctx, trait_item_fntype, true, + locus); +} + +bool +HIRCompileBase::verify_array_capacities (tree ltype, tree rtype, + Location lvalue_locus, + Location rvalue_locus) +{ + rust_assert (ltype != NULL_TREE); + rust_assert (rtype != NULL_TREE); + + // lets just return ok as other errors have already occurred + if (ltype == error_mark_node || rtype == error_mark_node) + return true; + + tree ltype_domain = TYPE_DOMAIN (ltype); + if (!ltype_domain) + return false; + + if (!TREE_CONSTANT (TYPE_MAX_VALUE (ltype_domain))) + return false; + + unsigned HOST_WIDE_INT ltype_length + = wi::ext (wi::to_offset (TYPE_MAX_VALUE (ltype_domain)) + - wi::to_offset (TYPE_MIN_VALUE (ltype_domain)) + 1, + TYPE_PRECISION (TREE_TYPE (ltype_domain)), + TYPE_SIGN (TREE_TYPE (ltype_domain))) + .to_uhwi (); + + tree rtype_domain = TYPE_DOMAIN (rtype); + if (!rtype_domain) + return false; + + if (!TREE_CONSTANT (TYPE_MAX_VALUE (rtype_domain))) + return false; + + unsigned HOST_WIDE_INT rtype_length + = wi::ext (wi::to_offset (TYPE_MAX_VALUE (rtype_domain)) + - wi::to_offset (TYPE_MIN_VALUE (rtype_domain)) + 1, + TYPE_PRECISION (TREE_TYPE (rtype_domain)), + TYPE_SIGN (TREE_TYPE (rtype_domain))) + .to_uhwi (); + + if (ltype_length != rtype_length) + { + rust_error_at ( + rvalue_locus, + "expected an array with a fixed size of " HOST_WIDE_INT_PRINT_UNSIGNED + " elements, found one with " HOST_WIDE_INT_PRINT_UNSIGNED " elements", + ltype_length, rtype_length); + return false; + } + + return true; +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-compile.h b/gcc/rust/backend/rust-compile.h new file mode 100644 index 00000000000..62ebac69cc1 --- /dev/null +++ b/gcc/rust/backend/rust-compile.h @@ -0,0 +1,47 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_COMPILE_H +#define RUST_COMPILE_H + +#include "rust-system.h" +#include "rust-hir-full.h" +#include "rust-compile-context.h" + +namespace Rust { +namespace Compile { + +class CompileCrate +{ +public: + static void Compile (HIR::Crate &crate, Context *ctx); + + ~CompileCrate (); + +private: + CompileCrate (HIR::Crate &crate, Context *ctx); + void go (); + + HIR::Crate &crate; + Context *ctx; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_COMPILE_H diff --git a/gcc/rust/backend/rust-constexpr.cc b/gcc/rust/backend/rust-constexpr.cc new file mode 100644 index 00000000000..53c6ef6a668 --- /dev/null +++ b/gcc/rust/backend/rust-constexpr.cc @@ -0,0 +1,441 @@ +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-constexpr.h" +#include "rust-location.h" +#include "rust-diagnostics.h" +#include "rust-tree.h" + +#include "fold-const.h" +#include "realmpfr.h" +#include "convert.h" +#include "print-tree.h" +#include "gimplify.h" +#include "tree-iterator.h" + +namespace Rust { +namespace Compile { + +struct constexpr_global_ctx +{ + HOST_WIDE_INT constexpr_ops_count; + + constexpr_global_ctx () : constexpr_ops_count (0) {} +}; + +struct constexpr_ctx +{ + constexpr_global_ctx *global; +}; + +static tree +constant_value_1 (tree decl, bool strict_p, bool return_aggregate_cst_ok_p, + bool unshare_p); +tree +decl_constant_value (tree decl, bool unshare_p); + +static void +non_const_var_error (location_t loc, tree r); + +static tree +constexpr_expression (const constexpr_ctx *ctx, tree); + +static tree +constexpr_fn_retval (const constexpr_ctx *ctx, tree r); + +static tree +eval_store_expression (const constexpr_ctx *ctx, tree r); + +static tree +eval_call_expression (const constexpr_ctx *ctx, tree r); + +static tree +eval_binary_expression (const constexpr_ctx *ctx, tree r); + +static tree +get_function_named_in_call (tree t); + +tree +fold_expr (tree expr) +{ + constexpr_global_ctx global_ctx; + constexpr_ctx ctx = {&global_ctx}; + + tree folded = constexpr_expression (&ctx, expr); + rust_assert (folded != NULL_TREE); + return folded; +} + +static tree +constexpr_expression (const constexpr_ctx *ctx, tree t) +{ + location_t loc = EXPR_LOCATION (t); + + if (CONSTANT_CLASS_P (t)) + { + if (TREE_OVERFLOW (t)) + { + error_at (loc, "overflow in constant expression"); + return t; + } + + return t; + } + + // Avoid excessively long constexpr evaluations + if (++ctx->global->constexpr_ops_count >= constexpr_ops_limit) + { + rust_error_at ( + Location (loc), + "% evaluation operation count exceeds limit of " + "%wd (use %<-fconstexpr-ops-limit=%> to increase the limit)", + constexpr_ops_limit); + + return t; + } + + tree r = t; + tree_code tcode = TREE_CODE (t); + switch (tcode) + { + case CONST_DECL: { + r = decl_constant_value (t, /*unshare_p=*/false); + if (TREE_CODE (r) == TARGET_EXPR + && TREE_CODE (TARGET_EXPR_INITIAL (r)) == CONSTRUCTOR) + r = TARGET_EXPR_INITIAL (r); + if (DECL_P (r)) + { + non_const_var_error (loc, r); + return r; + } + } + break; + + case POINTER_PLUS_EXPR: + case POINTER_DIFF_EXPR: + case PLUS_EXPR: + case MINUS_EXPR: + case MULT_EXPR: + case TRUNC_DIV_EXPR: + case CEIL_DIV_EXPR: + case FLOOR_DIV_EXPR: + case ROUND_DIV_EXPR: + case TRUNC_MOD_EXPR: + case CEIL_MOD_EXPR: + case ROUND_MOD_EXPR: + case RDIV_EXPR: + case EXACT_DIV_EXPR: + case MIN_EXPR: + case MAX_EXPR: + case LSHIFT_EXPR: + case RSHIFT_EXPR: + case LROTATE_EXPR: + case RROTATE_EXPR: + case BIT_IOR_EXPR: + case BIT_XOR_EXPR: + case BIT_AND_EXPR: + case TRUTH_XOR_EXPR: + case LT_EXPR: + case LE_EXPR: + case GT_EXPR: + case GE_EXPR: + case EQ_EXPR: + case NE_EXPR: + case SPACESHIP_EXPR: + case UNORDERED_EXPR: + case ORDERED_EXPR: + case UNLT_EXPR: + case UNLE_EXPR: + case UNGT_EXPR: + case UNGE_EXPR: + case UNEQ_EXPR: + case LTGT_EXPR: + case RANGE_EXPR: + case COMPLEX_EXPR: + r = eval_binary_expression (ctx, t); + break; + + case CALL_EXPR: + r = eval_call_expression (ctx, t); + break; + + case RETURN_EXPR: + rust_assert (TREE_OPERAND (t, 0) != NULL_TREE); + r = constexpr_expression (ctx, TREE_OPERAND (t, 0)); + break; + + case MODIFY_EXPR: + r = eval_store_expression (ctx, t); + break; + + default: + break; + } + + return r; +} + +static tree +eval_store_expression (const constexpr_ctx *ctx, tree t) +{ + tree init = TREE_OPERAND (t, 1); + if (TREE_CLOBBER_P (init)) + /* Just ignore clobbers. */ + return void_node; + + /* First we figure out where we're storing to. */ + tree target = TREE_OPERAND (t, 0); + + tree type = TREE_TYPE (target); + bool preeval = SCALAR_TYPE_P (type) || TREE_CODE (t) == MODIFY_EXPR; + if (preeval) + { + /* Evaluate the value to be stored without knowing what object it will be + stored in, so that any side-effects happen first. */ + init = fold_expr (init); + } + + bool evaluated = false; + tree object = NULL_TREE; + for (tree probe = target; object == NULL_TREE;) + { + switch (TREE_CODE (probe)) + { + default: + if (evaluated) + object = probe; + else + { + probe = constexpr_expression (ctx, probe); + evaluated = true; + } + break; + } + } + + return init; +} + +/* Subroutine of cxx_eval_constant_expression. + Like cxx_eval_unary_expression, except for binary expressions. */ +static tree +eval_binary_expression (const constexpr_ctx *ctx, tree t) +{ + tree orig_lhs = TREE_OPERAND (t, 0); + tree orig_rhs = TREE_OPERAND (t, 1); + tree lhs, rhs; + + lhs = constexpr_expression (ctx, orig_lhs); + rhs = constexpr_expression (ctx, orig_rhs); + + location_t loc = EXPR_LOCATION (t); + enum tree_code code = TREE_CODE (t); + tree type = TREE_TYPE (t); + + return fold_binary_loc (loc, code, type, lhs, rhs); +} + +// Subroutine of cxx_eval_constant_expression. +// Evaluate the call expression tree T in the context of OLD_CALL expression +// evaluation. +static tree +eval_call_expression (const constexpr_ctx *ctx, tree t) +{ + tree fun = get_function_named_in_call (t); + return constexpr_fn_retval (ctx, DECL_SAVED_TREE (fun)); +} + +// Subroutine of check_constexpr_fundef. BODY is the body of a function +// declared to be constexpr, or a sub-statement thereof. Returns the +// return value if suitable, error_mark_node for a statement not allowed in +// a constexpr function, or NULL_TREE if no return value was found. +static tree +constexpr_fn_retval (const constexpr_ctx *ctx, tree body) +{ + switch (TREE_CODE (body)) + { + case STATEMENT_LIST: { + tree expr = NULL_TREE; + for (tree stmt : tsi_range (body)) + { + tree s = constexpr_fn_retval (ctx, stmt); + if (s == error_mark_node) + return error_mark_node; + else if (s == NULL_TREE) + /* Keep iterating. */; + else if (expr) + /* Multiple return statements. */ + return error_mark_node; + else + expr = s; + } + return expr; + } + + case RETURN_EXPR: + return constexpr_expression (ctx, body); + + case DECL_EXPR: { + tree decl = DECL_EXPR_DECL (body); + if (TREE_CODE (decl) == USING_DECL + /* Accept __func__, __FUNCTION__, and __PRETTY_FUNCTION__. */ + || DECL_ARTIFICIAL (decl)) + return NULL_TREE; + return error_mark_node; + } + + case CLEANUP_POINT_EXPR: + return constexpr_fn_retval (ctx, TREE_OPERAND (body, 0)); + + case BIND_EXPR: { + tree b = BIND_EXPR_BODY (body); + return constexpr_fn_retval (ctx, b); + } + break; + + default: + return error_mark_node; + } + return error_mark_node; +} + +// Taken from cp/constexpr.cc +// +// If DECL is a scalar enumeration constant or variable with a +// constant initializer, return the initializer (or, its initializers, +// recursively); otherwise, return DECL. If STRICT_P, the +// initializer is only returned if DECL is a +// constant-expression. If RETURN_AGGREGATE_CST_OK_P, it is ok to +// return an aggregate constant. If UNSHARE_P, return an unshared +// copy of the initializer. +static tree +constant_value_1 (tree decl, bool strict_p, bool return_aggregate_cst_ok_p, + bool unshare_p) +{ + while (TREE_CODE (decl) == CONST_DECL) + { + tree init; + /* If DECL is a static data member in a template + specialization, we must instantiate it here. The + initializer for the static data member is not processed + until needed; we need it now. */ + + init = DECL_INITIAL (decl); + if (init == error_mark_node) + { + if (TREE_CODE (decl) == CONST_DECL) + /* Treat the error as a constant to avoid cascading errors on + excessively recursive template instantiation (c++/9335). */ + return init; + else + return decl; + } + + decl = init; + } + return unshare_p ? unshare_expr (decl) : decl; +} + +// A more relaxed version of decl_really_constant_value, used by the +// common C/C++ code. +tree +decl_constant_value (tree decl, bool unshare_p) +{ + return constant_value_1 (decl, /*strict_p=*/false, + /*return_aggregate_cst_ok_p=*/true, + /*unshare_p=*/unshare_p); +} + +static void +non_const_var_error (location_t loc, tree r) +{ + error_at (loc, + "the value of %qD is not usable in a constant " + "expression", + r); + /* Avoid error cascade. */ + if (DECL_INITIAL (r) == error_mark_node) + return; + + // more in cp/constexpr.cc +} + +static tree +get_callee (tree call) +{ + if (call == NULL_TREE) + return call; + else if (TREE_CODE (call) == CALL_EXPR) + return CALL_EXPR_FN (call); + + return NULL_TREE; +} + +// We have an expression tree T that represents a call, either CALL_EXPR +// or AGGR_INIT_EXPR. If the call is lexically to a named function, +// return the _DECL for that function. +static tree +get_function_named_in_call (tree t) +{ + tree fun = get_callee (t); + if (fun && TREE_CODE (fun) == ADDR_EXPR + && TREE_CODE (TREE_OPERAND (fun, 0)) == FUNCTION_DECL) + fun = TREE_OPERAND (fun, 0); + return fun; +} + +// forked from gcc/cp/constexpr.cc maybe_constexpr_fn + +/* True if a function might be declared constexpr */ + +bool +maybe_constexpr_fn (tree t) +{ + return (DECL_DECLARED_CONSTEXPR_P (t)); +} + +// forked from gcc/cp/constexpr.cc get_nth_callarg + +/* We have an expression tree T that represents a call, either CALL_EXPR. + Return the Nth argument. */ + +inline tree +get_nth_callarg (tree t, int n) +{ + switch (TREE_CODE (t)) + { + case CALL_EXPR: + return CALL_EXPR_ARG (t, n); + + default: + gcc_unreachable (); + return NULL; + } +} + +// forked from gcc/cp/constexpr.cc var_in_maybe_constexpr_fn + +/* True if T was declared in a function that might be constexpr: either a + function that was declared constexpr. */ + +bool +var_in_maybe_constexpr_fn (tree t) +{ + return (DECL_FUNCTION_SCOPE_P (t) && maybe_constexpr_fn (DECL_CONTEXT (t))); +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-constexpr.h b/gcc/rust/backend/rust-constexpr.h new file mode 100644 index 00000000000..3cfcec817a9 --- /dev/null +++ b/gcc/rust/backend/rust-constexpr.h @@ -0,0 +1,31 @@ +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_CONSTEXPR +#define RUST_CONSTEXPR + +#include "rust-system.h" +#include "tree.h" + +namespace Rust { +namespace Compile { + +extern tree fold_expr (tree); + +} // namespace Compile +} // namespace Rust + +#endif // RUST_CONSTEXPR diff --git a/gcc/rust/backend/rust-mangle.cc b/gcc/rust/backend/rust-mangle.cc new file mode 100644 index 00000000000..4d202078a70 --- /dev/null +++ b/gcc/rust/backend/rust-mangle.cc @@ -0,0 +1,307 @@ +#include "rust-mangle.h" +#include "fnv-hash.h" +#include "rust-base62.h" + +// FIXME: Rename those to legacy_* +static const std::string kMangledSymbolPrefix = "_ZN"; +static const std::string kMangledSymbolDelim = "E"; +static const std::string kMangledGenericDelim = "$C$"; +static const std::string kMangledSubstBegin = "$LT$"; +static const std::string kMangledSubstEnd = "$GT$"; +static const std::string kMangledSpace = "$u20$"; +static const std::string kMangledRef = "$RF$"; +static const std::string kMangledPtr = "$BP$"; +static const std::string kMangledLeftSqParen = "$u5b$"; // [ +static const std::string kMangledRightSqParen = "$u5d$"; // ] +static const std::string kQualPathBegin = "_" + kMangledSubstBegin; +static const std::string kMangledComma = "$C$"; + +namespace Rust { +namespace Compile { + +Mangler::MangleVersion Mangler::version = MangleVersion::LEGACY; + +static std::string +legacy_mangle_name (const std::string &name) +{ + // example + // <&T as core::fmt::Debug>::fmt: + // _ZN42_$LT$$RF$T$u20$as$u20$core..fmt..Debug$GT$3fmt17h6dac924c0051eef7E + // replace all white space with $ and & with RF + // + // ::fooA: + // _ZN43_$LT$example..Bar$u20$as$u20$example..A$GT$4fooA17hfc615fa76c7db7a0E: + // + // core::ptr::const_ptr::::cast: + // _ZN4core3ptr9const_ptr33_$LT$impl$u20$$BP$const$u20$T$GT$4cast17hb79f4617226f1d55E: + // + // core::ptr::const_ptr::::as_ptr: + // _ZN4core3ptr9const_ptr43_$LT$impl$u20$$BP$const$u20$$u5b$T$u5d$$GT$6as_ptr17he16e0dcd9473b04fE: + // + // example::Foo::new: + // _ZN7example12Foo$LT$T$GT$3new17h9a2aacb7fd783515E: + // + // >::call + // _ZN74_$LT$example..Identity$u20$as$u20$example..FnLike$LT$$RF$T$C$$RF$T$GT$$GT$4call17ha9ee58935895acb3E + + std::string buffer; + for (size_t i = 0; i < name.size (); i++) + { + std::string m; + char c = name.at (i); + + if (c == ' ') + m = kMangledSpace; + else if (c == '&') + m = kMangledRef; + else if (i == 0 && c == '<') + m = kQualPathBegin; + else if (c == '<') + m = kMangledSubstBegin; + else if (c == '>') + m = kMangledSubstEnd; + else if (c == '*') + m = kMangledPtr; + else if (c == '[') + m = kMangledLeftSqParen; + else if (c == ']') + m = kMangledRightSqParen; + else if (c == ',') + m = kMangledComma; + else if (c == ':') + { + rust_assert (i + 1 < name.size ()); + rust_assert (name.at (i + 1) == ':'); + i++; + m = ".."; + } + else + m.push_back (c); + + buffer += m; + } + + return std::to_string (buffer.size ()) + buffer; +} + +static std::string +legacy_mangle_canonical_path (const Resolver::CanonicalPath &path) +{ + std::string buffer; + for (size_t i = 0; i < path.size (); i++) + { + auto &seg = path.get_seg_at (i); + buffer += legacy_mangle_name (seg.second); + } + return buffer; +} + +// rustc uses a sip128 hash for legacy mangling, but an fnv 128 was quicker to +// implement for now +static std::string +legacy_hash (const std::string &fingerprint) +{ + Hash::FNV128 hasher; + hasher.write ((const unsigned char *) fingerprint.c_str (), + fingerprint.size ()); + + uint64_t hi, lo; + hasher.sum (&hi, &lo); + + char hex[16 + 1]; + memset (hex, 0, sizeof hex); + snprintf (hex, sizeof hex, "%08" PRIx64 "%08" PRIx64, lo, hi); + + return "h" + std::string (hex, sizeof (hex) - 1); +} + +static std::string +v0_tuple_prefix (const TyTy::BaseType *ty) +{ + if (ty->is_unit ()) + return "u"; + + // FIXME: ARTHUR: Add rest of algorithm + return ""; +} + +static std::string +v0_numeric_prefix (const TyTy::BaseType *ty) +{ + static const std::map num_prefixes = { + {"[i8]", "a"}, {"[u8]", "h"}, {"[i16]", "s"}, {"[u16]", "t"}, + {"[i32]", "l"}, {"[u32]", "m"}, {"[i64]", "x"}, {"[u64]", "y"}, + {"[isize]", "i"}, {"[usize]", "j"}, {"[f32]", "f"}, {"[f64]", "d"}, + }; + + auto ty_kind = ty->get_kind (); + auto ty_str = ty->as_string (); + auto numeric_iter = num_prefixes.end (); + + // Special numeric types + if (ty_kind == TyTy::TypeKind::ISIZE) + return "i"; + else if (ty_kind == TyTy::TypeKind::USIZE) + return "j"; + + numeric_iter = num_prefixes.find (ty_str); + if (numeric_iter != num_prefixes.end ()) + return numeric_iter->second; + + return ""; +} + +static std::string +v0_simple_type_prefix (const TyTy::BaseType *ty) +{ + switch (ty->get_kind ()) + { + case TyTy::TypeKind::BOOL: + return "b"; + case TyTy::TypeKind::CHAR: + return "c"; + case TyTy::TypeKind::STR: + return "e"; + case TyTy::TypeKind::NEVER: + return "z"; + + // Placeholder types + case TyTy::TypeKind::ERROR: // Fallthrough + case TyTy::TypeKind::INFER: // Fallthrough + case TyTy::TypeKind::PLACEHOLDER: // Fallthrough + case TyTy::TypeKind::PARAM: + // FIXME: TyTy::TypeKind::BOUND is also a valid variant in rustc + return "p"; + + case TyTy::TypeKind::TUPLE: + return v0_tuple_prefix (ty); + + case TyTy::TypeKind::UINT: // Fallthrough + case TyTy::TypeKind::INT: // Fallthrough + case TyTy::TypeKind::FLOAT: // Fallthrough + case TyTy::TypeKind::ISIZE: // Fallthrough + case TyTy::TypeKind::USIZE: // Fallthrough + return v0_numeric_prefix (ty); + + default: + return ""; + } + + gcc_unreachable (); +} + +// Add an underscore-terminated base62 integer to the mangling string. +// This corresponds to the `` grammar in the v0 mangling RFC: +// - 0 is encoded as "_" +// - any other value is encoded as itself minus one in base 62, followed by +// "_" +static void +v0_add_integer_62 (std::string &mangled, uint64_t x) +{ + if (x > 0) + mangled.append (base62_integer (x - 1)); + + mangled.append ("_"); +} + +// Add a tag-prefixed base62 integer to the mangling string when the +// integer is greater than 0: +// - 0 is encoded as "" (nothing) +// - any other value is encoded as + v0_add_integer_62(itself), that is +// + base62(itself - 1) + '_' +static void +v0_add_opt_integer_62 (std::string &mangled, std::string tag, uint64_t x) +{ + if (x > 0) + { + mangled.append (tag); + v0_add_integer_62 (mangled, x); + } +} + +static void +v0_add_disambiguator (std::string &mangled, uint64_t dis) +{ + v0_add_opt_integer_62 (mangled, "s", dis); +} + +// Add an identifier to the mangled string. This corresponds to the +// `` grammar in the v0 mangling RFC. +static void +v0_add_identifier (std::string &mangled, const std::string &identifier) +{ + // FIXME: gccrs cannot handle unicode identifiers yet, so we never have to + // create mangling for unicode values for now. However, this is handled + // by the v0 mangling scheme. The grammar for unicode identifier is + // contained in , right under the + // one. If the identifier contains unicode values, then an extra "u" needs + // to be added to the mangling string and `punycode` must be used to encode + // the characters. + + mangled += std::to_string (identifier.size ()); + + // If the first character of the identifier is a digit or an underscore, we + // add an extra underscore + if (identifier[0] == '_') + mangled.append ("_"); + + mangled.append (identifier); +} + +static std::string +v0_type_prefix (const TyTy::BaseType *ty) +{ + auto ty_prefix = v0_simple_type_prefix (ty); + if (!ty_prefix.empty ()) + return ty_prefix; + + // FIXME: We need to fetch more type prefixes + gcc_unreachable (); +} + +static std::string +legacy_mangle_item (const TyTy::BaseType *ty, + const Resolver::CanonicalPath &path) +{ + const std::string hash = legacy_hash (ty->as_string ()); + const std::string hash_sig = legacy_mangle_name (hash); + + return kMangledSymbolPrefix + legacy_mangle_canonical_path (path) + hash_sig + + kMangledSymbolDelim; +} + +static std::string +v0_mangle_item (const TyTy::BaseType *ty, const Resolver::CanonicalPath &path) +{ + // we can get this from the canonical_path + auto mappings = Analysis::Mappings::get (); + std::string crate_name; + bool ok = mappings->get_crate_name (path.get_crate_num (), crate_name); + rust_assert (ok); + + std::string mangled; + // FIXME: Add real algorithm once all pieces are implemented + auto ty_prefix = v0_type_prefix (ty); + v0_add_identifier (mangled, crate_name); + v0_add_disambiguator (mangled, 62); + + gcc_unreachable (); +} + +std::string +Mangler::mangle_item (const TyTy::BaseType *ty, + const Resolver::CanonicalPath &path) const +{ + switch (version) + { + case Mangler::MangleVersion::LEGACY: + return legacy_mangle_item (ty, path); + case Mangler::MangleVersion::V0: + return v0_mangle_item (ty, path); + default: + gcc_unreachable (); + } +} + +} // namespace Compile +} // namespace Rust diff --git a/gcc/rust/backend/rust-mangle.h b/gcc/rust/backend/rust-mangle.h new file mode 100644 index 00000000000..6d5a64f8bce --- /dev/null +++ b/gcc/rust/backend/rust-mangle.h @@ -0,0 +1,52 @@ +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_MANGLE_H +#define RUST_MANGLE_H + +#include "rust-system.h" +#include "rust-tyty.h" + +namespace Rust { +namespace Compile { + +class Mangler +{ +public: + enum MangleVersion + { + // Values defined in rust/lang.opt + LEGACY = 0, + V0 = 1, + }; + + // this needs to support Legacy and V0 see github #429 or #305 + std::string mangle_item (const TyTy::BaseType *ty, + const Resolver::CanonicalPath &path) const; + + static void set_mangling (int frust_mangling_value) + { + version = static_cast (frust_mangling_value); + } + +private: + static enum MangleVersion version; +}; + +} // namespace Compile +} // namespace Rust + +#endif // RUST_MANGLE_H diff --git a/gcc/rust/backend/rust-tree.cc b/gcc/rust/backend/rust-tree.cc new file mode 100644 index 00000000000..3d71e19fe82 --- /dev/null +++ b/gcc/rust/backend/rust-tree.cc @@ -0,0 +1,958 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-tree.h" +#include "fold-const.h" +#include "stringpool.h" +#include "attribs.h" +#include "escaped_string.h" + +namespace Rust { + +void +mark_exp_read (tree exp) +{ + if (exp == NULL) + return; + + switch (TREE_CODE (exp)) + { + case VAR_DECL: + gcc_fallthrough (); + case PARM_DECL: + DECL_READ_P (exp) = 1; + break; + case ARRAY_REF: + case COMPONENT_REF: + case MODIFY_EXPR: + case REALPART_EXPR: + case IMAGPART_EXPR: + CASE_CONVERT: + case ADDR_EXPR: + case INDIRECT_REF: + case FLOAT_EXPR: + case NON_DEPENDENT_EXPR: + case VIEW_CONVERT_EXPR: + mark_exp_read (TREE_OPERAND (exp, 0)); + break; + case COMPOUND_EXPR: + mark_exp_read (TREE_OPERAND (exp, 1)); + break; + case COND_EXPR: + if (TREE_OPERAND (exp, 1)) + mark_exp_read (TREE_OPERAND (exp, 1)); + if (TREE_OPERAND (exp, 2)) + mark_exp_read (TREE_OPERAND (exp, 2)); + break; + default: + break; + } +} + +tree +convert_from_reference (tree val) +{ + if (TREE_TYPE (val) && TYPE_REF_P (TREE_TYPE (val))) + { + tree t = TREE_TYPE (TREE_TYPE (val)); + tree ref = build1 (INDIRECT_REF, t, val); + + mark_exp_read (val); + + TREE_SIDE_EFFECTS (ref) + = (TREE_THIS_VOLATILE (ref) || TREE_SIDE_EFFECTS (val)); + val = ref; + } + + return val; +} + +tree +mark_use (tree expr, bool rvalue_p, bool read_p, + location_t loc /* = UNKNOWN_LOCATION */, + bool reject_builtin /* = true */) +{ +#define RECUR(t) mark_use ((t), rvalue_p, read_p, loc, reject_builtin) + + if (expr == NULL_TREE || error_operand_p (expr)) + return expr; + + if (reject_builtin) + return error_mark_node; + + if (read_p) + mark_exp_read (expr); + + bool recurse_op[3] = {false, false, false}; + switch (TREE_CODE (expr)) + { + case COMPONENT_REF: + case NON_DEPENDENT_EXPR: + recurse_op[0] = true; + break; + case COMPOUND_EXPR: + recurse_op[1] = true; + break; + case COND_EXPR: + recurse_op[2] = true; + if (TREE_OPERAND (expr, 1)) + recurse_op[1] = true; + break; + case INDIRECT_REF: + if (REFERENCE_REF_P (expr)) + { + /* Try to look through the reference. */ + tree ref = TREE_OPERAND (expr, 0); + tree r = mark_rvalue_use (ref, loc, reject_builtin); + if (r != ref) + expr = convert_from_reference (r); + } + break; + + case VIEW_CONVERT_EXPR: + if (location_wrapper_p (expr)) + { + loc = EXPR_LOCATION (expr); + tree op = TREE_OPERAND (expr, 0); + tree nop = RECUR (op); + if (nop == error_mark_node) + return error_mark_node; + else if (op == nop) + /* No change. */; + else if (DECL_P (nop) || CONSTANT_CLASS_P (nop)) + { + /* Reuse the location wrapper. */ + TREE_OPERAND (expr, 0) = nop; + /* If we're replacing a DECL with a constant, we also need to + change the TREE_CODE of the location wrapper. */ + if (rvalue_p) + TREE_SET_CODE (expr, NON_LVALUE_EXPR); + } + else + { + /* Drop the location wrapper. */ + expr = nop; + protected_set_expr_location (expr, loc); + } + return expr; + } + gcc_fallthrough (); + CASE_CONVERT: + recurse_op[0] = true; + break; + + default: + break; + } + + for (int i = 0; i < 3; ++i) + if (recurse_op[i]) + { + tree op = TREE_OPERAND (expr, i); + op = RECUR (op); + if (op == error_mark_node) + return error_mark_node; + TREE_OPERAND (expr, i) = op; + } + + return expr; +#undef RECUR +} + +tree +mark_rvalue_use (tree e, location_t loc /* = UNKNOWN_LOCATION */, + bool reject_builtin /* = true */) +{ + return mark_use (e, true, true, loc, reject_builtin); +} + +tree +mark_lvalue_use (tree expr) +{ + return mark_use (expr, false, true, input_location, false); +} + +tree +mark_lvalue_use_nonread (tree expr) +{ + return mark_use (expr, false, false, input_location, false); +} + +tree +mark_discarded_use (tree expr) +{ + if (expr == NULL_TREE) + return expr; + + STRIP_ANY_LOCATION_WRAPPER (expr); + + switch (TREE_CODE (expr)) + { + case COND_EXPR: + TREE_OPERAND (expr, 2) = mark_discarded_use (TREE_OPERAND (expr, 2)); + gcc_fallthrough (); + case COMPOUND_EXPR: + TREE_OPERAND (expr, 1) = mark_discarded_use (TREE_OPERAND (expr, 1)); + return expr; + + case COMPONENT_REF: + case ARRAY_REF: + case INDIRECT_REF: + case MEMBER_REF: + break; + default: + if (DECL_P (expr)) + break; + else + return expr; + } + + return mark_use (expr, true, true, input_location, false); +} + +tree +convert_to_void (tree expr, impl_conv_void implicit) +{ + location_t loc = expr_loc_or_input_loc (expr); + if (expr == error_mark_node || TREE_TYPE (expr) == error_mark_node) + return error_mark_node; + + expr = mark_discarded_use (expr); + if (implicit == ICV_CAST) + /* An explicit cast to void avoids all -Wunused-but-set* warnings. */ + mark_exp_read (expr); + + if (!TREE_TYPE (expr)) + return expr; + + if (VOID_TYPE_P (TREE_TYPE (expr))) + return expr; + switch (TREE_CODE (expr)) + { + case COND_EXPR: { + /* The two parts of a cond expr might be separate lvalues. */ + tree op1 = TREE_OPERAND (expr, 1); + tree op2 = TREE_OPERAND (expr, 2); + bool side_effects + = ((op1 && TREE_SIDE_EFFECTS (op1)) || TREE_SIDE_EFFECTS (op2)); + tree new_op1, new_op2; + new_op1 = NULL_TREE; + if (implicit != ICV_CAST && !side_effects) + { + if (op1) + new_op1 = convert_to_void (op1, ICV_SECOND_OF_COND); + new_op2 = convert_to_void (op2, ICV_THIRD_OF_COND); + } + else + { + if (op1) + new_op1 = convert_to_void (op1, ICV_CAST); + new_op2 = convert_to_void (op2, ICV_CAST); + } + + expr = build3_loc (loc, COND_EXPR, TREE_TYPE (new_op2), + TREE_OPERAND (expr, 0), new_op1, new_op2); + break; + } + + case COMPOUND_EXPR: { + /* The second part of a compound expr contains the value. */ + tree op1 = TREE_OPERAND (expr, 1); + tree new_op1; + if (implicit != ICV_CAST + && !warning_suppressed_p (expr /* What warning? */)) + new_op1 = convert_to_void (op1, ICV_RIGHT_OF_COMMA); + else + new_op1 = convert_to_void (op1, ICV_CAST); + + if (new_op1 != op1) + { + tree t = build2_loc (loc, COMPOUND_EXPR, TREE_TYPE (new_op1), + TREE_OPERAND (expr, 0), new_op1); + expr = t; + } + + break; + } + + case NON_LVALUE_EXPR: + case NOP_EXPR: + /* These have already decayed to rvalue. */ + break; + + case CALL_EXPR: + maybe_warn_nodiscard (expr, implicit); + break; + + case INDIRECT_REF: { + tree type = TREE_TYPE (expr); + int is_reference = TYPE_REF_P (TREE_TYPE (TREE_OPERAND (expr, 0))); + int is_volatile = TYPE_VOLATILE (type); + int is_complete = COMPLETE_TYPE_P (type); + + /* Can't load the value if we don't know the type. */ + if (is_volatile && !is_complete) + { + switch (implicit) + { + case ICV_CAST: + warning_at (loc, 0, + "conversion to void will not access " + "object of incomplete type %qT", + type); + break; + case ICV_SECOND_OF_COND: + warning_at (loc, 0, + "indirection will not access object of " + "incomplete type %qT in second operand " + "of conditional expression", + type); + break; + case ICV_THIRD_OF_COND: + warning_at (loc, 0, + "indirection will not access object of " + "incomplete type %qT in third operand " + "of conditional expression", + type); + break; + case ICV_RIGHT_OF_COMMA: + warning_at (loc, 0, + "indirection will not access object of " + "incomplete type %qT in right operand of " + "comma operator", + type); + break; + case ICV_LEFT_OF_COMMA: + warning_at (loc, 0, + "indirection will not access object of " + "incomplete type %qT in left operand of " + "comma operator", + type); + break; + case ICV_STATEMENT: + warning_at (loc, 0, + "indirection will not access object of " + "incomplete type %qT in statement", + type); + break; + case ICV_THIRD_IN_FOR: + warning_at (loc, 0, + "indirection will not access object of " + "incomplete type %qT in for increment " + "expression", + type); + break; + default: + gcc_unreachable (); + } + } + /* Don't load the value if this is an implicit dereference, or if + the type needs to be handled by ctors/dtors. */ + else if (is_volatile && is_reference) + { + switch (implicit) + { + case ICV_CAST: + warning_at (loc, 0, + "conversion to void will not access " + "object of type %qT", + type); + break; + case ICV_SECOND_OF_COND: + warning_at (loc, 0, + "implicit dereference will not access " + "object of type %qT in second operand of " + "conditional expression", + type); + break; + case ICV_THIRD_OF_COND: + warning_at (loc, 0, + "implicit dereference will not access " + "object of type %qT in third operand of " + "conditional expression", + type); + break; + case ICV_RIGHT_OF_COMMA: + warning_at (loc, 0, + "implicit dereference will not access " + "object of type %qT in right operand of " + "comma operator", + type); + break; + case ICV_LEFT_OF_COMMA: + warning_at (loc, 0, + "implicit dereference will not access " + "object of type %qT in left operand of comma " + "operator", + type); + break; + case ICV_STATEMENT: + warning_at (loc, 0, + "implicit dereference will not access " + "object of type %qT in statement", + type); + break; + case ICV_THIRD_IN_FOR: + warning_at (loc, 0, + "implicit dereference will not access " + "object of type %qT in for increment expression", + type); + break; + default: + gcc_unreachable (); + } + } + else if (is_volatile && TREE_ADDRESSABLE (type)) + { + switch (implicit) + { + case ICV_CAST: + warning_at (loc, 0, + "conversion to void will not access " + "object of non-trivially-copyable type %qT", + type); + break; + case ICV_SECOND_OF_COND: + warning_at (loc, 0, + "indirection will not access object of " + "non-trivially-copyable type %qT in second " + "operand of conditional expression", + type); + break; + case ICV_THIRD_OF_COND: + warning_at (loc, 0, + "indirection will not access object of " + "non-trivially-copyable type %qT in third " + "operand of conditional expression", + type); + break; + case ICV_RIGHT_OF_COMMA: + warning_at (loc, 0, + "indirection will not access object of " + "non-trivially-copyable type %qT in right " + "operand of comma operator", + type); + break; + case ICV_LEFT_OF_COMMA: + warning_at (loc, 0, + "indirection will not access object of " + "non-trivially-copyable type %qT in left " + "operand of comma operator", + type); + break; + case ICV_STATEMENT: + warning_at (loc, 0, + "indirection will not access object of " + "non-trivially-copyable type %qT in statement", + type); + break; + case ICV_THIRD_IN_FOR: + warning_at (loc, 0, + "indirection will not access object of " + "non-trivially-copyable type %qT in for " + "increment expression", + type); + break; + default: + gcc_unreachable (); + } + } + if (is_reference || !is_volatile || !is_complete + || TREE_ADDRESSABLE (type)) + { + /* Emit a warning (if enabled) when the "effect-less" INDIRECT_REF + operation is stripped off. Note that we don't warn about + - an expression with TREE_NO_WARNING set. (For an example of + such expressions, see build_over_call in call.cc.) + - automatic dereferencing of references, since the user cannot + control it. (See also warn_if_unused_value() in c-common.cc.) + */ + if (warn_unused_value && implicit != ICV_CAST + && !warning_suppressed_p (expr, OPT_Wunused_value) + && !is_reference) + warning_at (loc, OPT_Wunused_value, "value computed is not used"); + expr = TREE_OPERAND (expr, 0); + if (TREE_CODE (expr) == CALL_EXPR) + maybe_warn_nodiscard (expr, implicit); + } + + break; + } + + case VAR_DECL: { + /* External variables might be incomplete. */ + tree type = TREE_TYPE (expr); + int is_complete = COMPLETE_TYPE_P (type); + + if (TYPE_VOLATILE (type) && !is_complete) + switch (implicit) + { + case ICV_CAST: + warning_at (loc, 0, + "conversion to void will not access " + "object %qE of incomplete type %qT", + expr, type); + break; + case ICV_SECOND_OF_COND: + warning_at (loc, 0, + "variable %qE of incomplete type %qT will " + "not be accessed in second operand of " + "conditional expression", + expr, type); + break; + case ICV_THIRD_OF_COND: + warning_at (loc, 0, + "variable %qE of incomplete type %qT will " + "not be accessed in third operand of " + "conditional expression", + expr, type); + break; + case ICV_RIGHT_OF_COMMA: + warning_at (loc, 0, + "variable %qE of incomplete type %qT will " + "not be accessed in right operand of comma operator", + expr, type); + break; + case ICV_LEFT_OF_COMMA: + warning_at (loc, 0, + "variable %qE of incomplete type %qT will " + "not be accessed in left operand of comma operator", + expr, type); + break; + case ICV_STATEMENT: + warning_at (loc, 0, + "variable %qE of incomplete type %qT will " + "not be accessed in statement", + expr, type); + break; + case ICV_THIRD_IN_FOR: + warning_at (loc, 0, + "variable %qE of incomplete type %qT will " + "not be accessed in for increment expression", + expr, type); + break; + default: + gcc_unreachable (); + } + + break; + } + + default:; + } + + if (!TREE_SIDE_EFFECTS (expr)) + expr = void_node; + + return expr; +} + +void +maybe_warn_nodiscard (tree expr, impl_conv_void implicit) +{ + tree call = expr; + if (TREE_CODE (expr) == TARGET_EXPR) + call = TARGET_EXPR_INITIAL (expr); + + location_t loc = expr_loc_or_input_loc (call); + tree callee = CALL_EXPR_FN (call); + if (!callee) + return; + + tree type = TREE_TYPE (callee); + if (INDIRECT_TYPE_P (type)) + type = TREE_TYPE (type); + + tree rettype = TREE_TYPE (type); + tree fn = get_fndecl_from_callee (callee); + tree attr; + if (implicit != ICV_CAST && fn + && (attr = lookup_attribute ("nodiscard", DECL_ATTRIBUTES (fn)))) + { + escaped_string msg; + tree args = TREE_VALUE (attr); + if (args) + msg.escape (TREE_STRING_POINTER (TREE_VALUE (args))); + const char *format + = (msg ? G_ ("ignoring return value of %qD, that must be used: %<%s%>") + : G_ ("ignoring return value of %qD, that must be used")); + const char *raw_msg = msg ? (const char *) msg : ""; + auto_diagnostic_group d; + if (warning_at (loc, OPT_Wunused_result, format, fn, raw_msg)) + inform (DECL_SOURCE_LOCATION (fn), "declared here"); + } + else if (implicit != ICV_CAST + && (attr + = lookup_attribute ("nodiscard", TYPE_ATTRIBUTES (rettype)))) + { + escaped_string msg; + tree args = TREE_VALUE (attr); + if (args) + msg.escape (TREE_STRING_POINTER (TREE_VALUE (args))); + const char *format + = (msg ? G_ ( + "ignoring returned value of type %qT, that must be used: %<%s%>") + : G_ ("ignoring returned value of type %qT, that must be used")); + const char *raw_msg = msg ? (const char *) msg : ""; + auto_diagnostic_group d; + if (warning_at (loc, OPT_Wunused_result, format, rettype, raw_msg)) + { + if (fn) + inform (DECL_SOURCE_LOCATION (fn), "in call to %qD, declared here", + fn); + inform (DECL_SOURCE_LOCATION (TYPE_NAME (rettype)), + "%qT declared here", rettype); + } + } +} + +location_t +expr_loc_or_loc (const_tree t, location_t or_loc) +{ + location_t loc = EXPR_LOCATION (t); + if (loc == UNKNOWN_LOCATION) + loc = or_loc; + return loc; +} + +location_t +expr_loc_or_input_loc (const_tree t) +{ + return expr_loc_or_loc (t, input_location); +} + +// FN is the callee of a CALL_EXPR or AGGR_INIT_EXPR; return the FUNCTION_DECL +// if we can. +tree +get_fndecl_from_callee (tree fn) +{ + if (fn == NULL_TREE) + return fn; + if (TREE_CODE (fn) == FUNCTION_DECL) + return fn; + tree type = TREE_TYPE (fn); + if (type == NULL_TREE || !INDIRECT_TYPE_P (type)) + return NULL_TREE; + + STRIP_NOPS (fn); + if (TREE_CODE (fn) == ADDR_EXPR || TREE_CODE (fn) == FDESC_EXPR) + fn = TREE_OPERAND (fn, 0); + if (TREE_CODE (fn) == FUNCTION_DECL) + return fn; + return NULL_TREE; +} + +tree +pointer_offset_expression (tree base_tree, tree index_tree, location_t location) +{ + tree element_type_tree = TREE_TYPE (TREE_TYPE (base_tree)); + if (base_tree == error_mark_node || TREE_TYPE (base_tree) == error_mark_node + || index_tree == error_mark_node || element_type_tree == error_mark_node) + return error_mark_node; + + tree element_size = TYPE_SIZE_UNIT (element_type_tree); + index_tree = fold_convert_loc (location, sizetype, index_tree); + tree offset + = fold_build2_loc (location, MULT_EXPR, sizetype, index_tree, element_size); + + return fold_build2_loc (location, POINTER_PLUS_EXPR, TREE_TYPE (base_tree), + base_tree, offset); +} + +// forked from gcc/cp/tree.cc cp_walk_subtrees +/* Apply FUNC to all language-specific sub-trees of TP in a pre-order + traversal. Called from walk_tree. */ + +tree +rs_walk_subtrees (tree *tp, int *walk_subtrees_p, walk_tree_fn func, void *data, + hash_set *pset) +{ + enum tree_code code = TREE_CODE (*tp); + tree result; + +#define WALK_SUBTREE(NODE) \ + do \ + { \ + result = rs_walk_tree (&(NODE), func, data, pset); \ + if (result) \ + goto out; \ + } \ + while (0) + + if (TYPE_P (*tp)) + { + /* If *WALK_SUBTREES_P is 1, we're interested in the syntactic form of + the argument, so don't look through typedefs, but do walk into + template arguments for alias templates (and non-typedefed classes). + + If *WALK_SUBTREES_P > 1, we're interested in type identity or + equivalence, so look through typedefs, ignoring template arguments for + alias templates, and walk into template args of classes. + + See find_abi_tags_r for an example of setting *WALK_SUBTREES_P to 2 + when that's the behavior the walk_tree_fn wants. */ + if (*walk_subtrees_p == 1 && typedef_variant_p (*tp)) + { + *walk_subtrees_p = 0; + return NULL_TREE; + } + } + + /* Not one of the easy cases. We must explicitly go through the + children. */ + result = NULL_TREE; + switch (code) + { + case TREE_LIST: + WALK_SUBTREE (TREE_PURPOSE (*tp)); + break; + + case RECORD_TYPE: + if (TYPE_PTRMEMFUNC_P (*tp)) + WALK_SUBTREE (TYPE_PTRMEMFUNC_FN_TYPE_RAW (*tp)); + break; + + case CONSTRUCTOR: + if (COMPOUND_LITERAL_P (*tp)) + WALK_SUBTREE (TREE_TYPE (*tp)); + break; + + case DECL_EXPR: + /* User variables should be mentioned in BIND_EXPR_VARS + and their initializers and sizes walked when walking + the containing BIND_EXPR. Compiler temporaries are + handled here. And also normal variables in templates, + since do_poplevel doesn't build a BIND_EXPR then. */ + if (VAR_P (TREE_OPERAND (*tp, 0)) + && (DECL_ARTIFICIAL (TREE_OPERAND (*tp, 0)) + && !TREE_STATIC (TREE_OPERAND (*tp, 0)))) + { + tree decl = TREE_OPERAND (*tp, 0); + WALK_SUBTREE (DECL_INITIAL (decl)); + WALK_SUBTREE (DECL_SIZE (decl)); + WALK_SUBTREE (DECL_SIZE_UNIT (decl)); + } + break; + + default: + return NULL_TREE; + } + + /* We didn't find what we were looking for. */ +out: + return result; + +#undef WALK_SUBTREE +} + +// forked from gcc/cp/tree.cc cp_expr_location + +/* Like EXPR_LOCATION, but also handle some tcc_exceptional that have + locations. */ + +location_t +rs_expr_location (const_tree t_) +{ + tree t = CONST_CAST_TREE (t_); + if (t == NULL_TREE) + return UNKNOWN_LOCATION; + + return EXPR_LOCATION (t); +} + +// forked from gcc/cp/class.cc is_really_empty_class + +/* Returns true if TYPE contains no actual data, just various + possible combinations of empty classes. If IGNORE_VPTR is true, + a vptr doesn't prevent the class from being considered empty. Typically + we want to ignore the vptr on assignment, and not on initialization. */ + +bool +is_really_empty_class (tree type, bool ignore_vptr) +{ + if (CLASS_TYPE_P (type)) + { + tree field; + tree binfo; + tree base_binfo; + int i; + + /* CLASSTYPE_EMPTY_P isn't set properly until the class is actually laid + out, but we'd like to be able to check this before then. */ + if (COMPLETE_TYPE_P (type) && is_empty_class (type)) + return true; + + if (!ignore_vptr && TYPE_CONTAINS_VPTR_P (type)) + return false; + + for (binfo = TYPE_BINFO (type), i = 0; + BINFO_BASE_ITERATE (binfo, i, base_binfo); ++i) + if (!is_really_empty_class (BINFO_TYPE (base_binfo), ignore_vptr)) + return false; + for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) + if (TREE_CODE (field) == FIELD_DECL + && !DECL_ARTIFICIAL (field) + /* An unnamed bit-field is not a data member. */ + && !DECL_UNNAMED_BIT_FIELD (field) + && !is_really_empty_class (TREE_TYPE (field), ignore_vptr)) + return false; + return true; + } + else if (TREE_CODE (type) == ARRAY_TYPE) + return (integer_zerop (array_type_nelts_top (type)) + || is_really_empty_class (TREE_TYPE (type), ignore_vptr)); + return false; +} + +// forked from gcc/cp/class.cc is_empty_class + +/* Returns 1 if TYPE contains only padding bytes. */ + +int +is_empty_class (tree type) +{ + if (type == error_mark_node) + return 0; + + if (!CLASS_TYPE_P (type)) + return 0; + + return CLASSTYPE_EMPTY_P (type); +} + +// forked from gcc/cp/tree.cc array_type_nelts_top + +/* Return, as an INTEGER_CST node, the number of elements for TYPE + (which is an ARRAY_TYPE). This counts only elements of the top + array. */ + +tree +array_type_nelts_top (tree type) +{ + return fold_build2_loc (input_location, PLUS_EXPR, sizetype, + array_type_nelts (type), size_one_node); +} + +// forked from gcc/cp/tree.cc builtin_valid_in_constant_expr_p + +/* Test whether DECL is a builtin that may appear in a + constant-expression. */ + +bool +builtin_valid_in_constant_expr_p (const_tree decl) +{ + STRIP_ANY_LOCATION_WRAPPER (decl); + if (TREE_CODE (decl) != FUNCTION_DECL) + /* Not a function. */ + return false; + if (DECL_BUILT_IN_CLASS (decl) != BUILT_IN_NORMAL) + { + if (fndecl_built_in_p (decl, BUILT_IN_FRONTEND)) + switch (DECL_FE_FUNCTION_CODE (decl)) + { + case RS_BUILT_IN_IS_CONSTANT_EVALUATED: + case RS_BUILT_IN_SOURCE_LOCATION: + case RS_BUILT_IN_IS_CORRESPONDING_MEMBER: + case RS_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS: + return true; + default: + break; + } + /* Not a built-in. */ + return false; + } + switch (DECL_FUNCTION_CODE (decl)) + { + /* These always have constant results like the corresponding + macros/symbol. */ + case BUILT_IN_FILE: + case BUILT_IN_FUNCTION: + case BUILT_IN_LINE: + + /* The following built-ins are valid in constant expressions + when their arguments are. */ + case BUILT_IN_ADD_OVERFLOW_P: + case BUILT_IN_SUB_OVERFLOW_P: + case BUILT_IN_MUL_OVERFLOW_P: + + /* These have constant results even if their operands are + non-constant. */ + case BUILT_IN_CONSTANT_P: + case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE: + return true; + default: + return false; + } +} + +// forked from gcc/cp/decl2.cc decl_maybe_constant_var_p + +/* Returns true if DECL could be a symbolic constant variable, depending on + its initializer. */ + +bool +decl_maybe_constant_var_p (tree decl) +{ + tree type = TREE_TYPE (decl); + if (!VAR_P (decl)) + return false; + if (DECL_DECLARED_CONSTEXPR_P (decl)) + return true; + if (DECL_HAS_VALUE_EXPR_P (decl)) + /* A proxy isn't constant. */ + return false; + if (TYPE_REF_P (type)) + /* References can be constant. */; + else if (RS_TYPE_CONST_NON_VOLATILE_P (type) + && INTEGRAL_OR_ENUMERATION_TYPE_P (type)) + /* And const integers. */; + else + return false; + + if (DECL_INITIAL (decl) && !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)) + /* We know the initializer, and it isn't constant. */ + return false; + else + return true; +} + +// forked from gcc/cp/typeck.cc cp_type_quals + +/* Returns the type qualifiers for this type, including the qualifiers on the + elements for an array type. */ + +int +rs_type_quals (const_tree type) +{ + int quals; + /* This CONST_CAST is okay because strip_array_types returns its + argument unmodified and we assign it to a const_tree. */ + type = strip_array_types (CONST_CAST_TREE (type)); + if (type == error_mark_node + /* Quals on a FUNCTION_TYPE are memfn quals. */ + || TREE_CODE (type) == FUNCTION_TYPE) + return TYPE_UNQUALIFIED; + quals = TYPE_QUALS (type); + /* METHOD and REFERENCE_TYPEs should never have quals. */ + gcc_assert ( + (TREE_CODE (type) != METHOD_TYPE && !TYPE_REF_P (type)) + || ((quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) == TYPE_UNQUALIFIED)); + return quals; +} + +} // namespace Rust diff --git a/gcc/rust/backend/rust-tree.h b/gcc/rust/backend/rust-tree.h new file mode 100644 index 00000000000..a667cbfc8ad --- /dev/null +++ b/gcc/rust/backend/rust-tree.h @@ -0,0 +1,508 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_TREE +#define RUST_TREE + +#include "rust-system.h" +#include "coretypes.h" +#include "tree.h" + +/* Returns true if NODE is a pointer. */ +#define TYPE_PTR_P(NODE) (TREE_CODE (NODE) == POINTER_TYPE) + +/* Returns true if NODE is a reference. */ +#define TYPE_REF_P(NODE) (TREE_CODE (NODE) == REFERENCE_TYPE) + +/* Returns true if NODE is a pointer or a reference. */ +#define INDIRECT_TYPE_P(NODE) (TYPE_PTR_P (NODE) || TYPE_REF_P (NODE)) + +/* [basic.fundamental] + + Types bool, char, wchar_t, and the signed and unsigned integer types + are collectively called integral types. + + Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration + types as well, which is incorrect in C++. Keep these checks in + ascending code order. */ +#define RS_INTEGRAL_TYPE_P(TYPE) \ + (TREE_CODE (TYPE) == BOOLEAN_TYPE || TREE_CODE (TYPE) == INTEGER_TYPE) + +/* [basic.fundamental] + + Integral and floating types are collectively called arithmetic + types. + + As a GNU extension, we also accept complex types. + + Keep these checks in ascending code order. */ +#define ARITHMETIC_TYPE_P(TYPE) \ + (RS_INTEGRAL_TYPE_P (TYPE) || TREE_CODE (TYPE) == REAL_TYPE \ + || TREE_CODE (TYPE) == COMPLEX_TYPE) + +/* True iff TYPE is cv decltype(nullptr). */ +#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) + +/* [basic.types] + + Arithmetic types, enumeration types, pointer types, + pointer-to-member types, and std::nullptr_t are collectively called + scalar types. + + Keep these checks in ascending code order. */ +#define SCALAR_TYPE_P(TYPE) \ + (TREE_CODE (TYPE) == ENUMERAL_TYPE || ARITHMETIC_TYPE_P (TYPE) \ + || TYPE_PTR_P (TYPE) || NULLPTR_TYPE_P (TYPE)) + +/* True if NODE is an implicit INDIRECT_REF from convert_from_reference. */ +#define REFERENCE_REF_P(NODE) \ + (INDIRECT_REF_P (NODE) && TREE_TYPE (TREE_OPERAND (NODE, 0)) \ + && TYPE_REF_P (TREE_TYPE (TREE_OPERAND ((NODE), 0)))) + +// this is a helper to differentiate RECORD types between actual records and +// slices +#define SLICE_FLAG TREE_LANG_FLAG_0 +#define SLICE_TYPE_P(TYPE) \ + (TREE_CODE (TYPE) == RECORD_TYPE && TREE_LANG_FLAG_0 (TYPE)) + +/* Returns true if NODE is a pointer to member function type. */ +#define TYPE_PTRMEMFUNC_P(NODE) \ + (TREE_CODE (NODE) == RECORD_TYPE && TYPE_PTRMEMFUNC_FLAG (NODE)) + +#define TYPE_PTRMEMFUNC_FLAG(NODE) (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE))) + +#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) (TREE_TYPE (TYPE_FIELDS (NODE))) + +/* True if NODE is a compound-literal, i.e., a brace-enclosed + initializer cast to a particular type. This is mostly only set during + template parsing; once the initializer has been digested into an actual + value of the type, the expression is represented by a TARGET_EXPR. */ +#define COMPOUND_LITERAL_P(NODE) \ + (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) + +/* When appearing in an INDIRECT_REF, it means that the tree structure + underneath is actually a call to a constructor. This is needed + when the constructor must initialize local storage (which can + be automatically destroyed), rather than allowing it to allocate + space from the heap. + + When appearing in a SAVE_EXPR, it means that underneath + is a call to a constructor. + + When appearing in a CONSTRUCTOR, the expression is an unconverted + compound literal. + + When appearing in a FIELD_DECL, it means that this field + has been duly initialized in its constructor. */ +#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) + +/* Nonzero if T is a class type. Zero for template type parameters, + typename types, and so forth. */ +#define CLASS_TYPE_P(T) \ + (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) + +/* [class.virtual] + + A class that declares or inherits a virtual function is called a + polymorphic class. */ +#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) + +/* Nonzero if this class has a virtual function table pointer. */ +#define TYPE_CONTAINS_VPTR_P(NODE) \ + (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) + +/* A vector of BINFOs for the direct and indirect virtual base classes + that this type uses in a post-order depth-first left-to-right + order. (In other words, these bases appear in the order that they + should be initialized.) */ +#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) + +/* A vector of BINFOs for the direct and indirect virtual base classes + that this type uses in a post-order depth-first left-to-right + order. (In other words, these bases appear in the order that they + should be initialized.) */ +#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) + +/* We used to have a variant type for lang_type. Keep the name of the + checking accessor for the sole survivor. */ +#define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE)) + +/* Keep these checks in ascending code order. */ +#define RECORD_OR_UNION_CODE_P(T) ((T) == RECORD_TYPE || (T) == UNION_TYPE) +#define OVERLOAD_TYPE_P(T) (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) + +/* Nonzero if this class is "empty" in the sense of the C++ ABI. */ +#define CLASSTYPE_EMPTY_P(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) + +/* True if DECL is declared 'constexpr'. */ +#define DECL_DECLARED_CONSTEXPR_P(DECL) \ + DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (DECL)) + +#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ + TREE_CHECK2 (NODE, VAR_DECL, FUNCTION_DECL) + +// Below macros are copied from gcc/c-family/c-common.h + +/* In a FIELD_DECL, nonzero if the decl was originally a bitfield. */ +#define DECL_C_BIT_FIELD(NODE) (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) == 1) +#define SET_DECL_C_BIT_FIELD(NODE) \ + (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 1) +#define CLEAR_DECL_C_BIT_FIELD(NODE) \ + (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 0) + +/* True if the decl was an unnamed bitfield. */ +#define DECL_UNNAMED_BIT_FIELD(NODE) \ + (DECL_C_BIT_FIELD (NODE) && !DECL_NAME (NODE)) + +/* 1 iff NODE is function-local. */ +#define DECL_FUNCTION_SCOPE_P(NODE) \ + (DECL_CONTEXT (NODE) && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) + +/* Nonzero if this type is const-qualified, but not + volatile-qualified. Other qualifiers are ignored. This macro is + used to test whether or not it is OK to bind an rvalue to a + reference. */ +#define RS_TYPE_CONST_NON_VOLATILE_P(NODE) \ + ((rs_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ + == TYPE_QUAL_CONST) + +/* [basic.fundamental] + + Types bool, char, wchar_t, and the signed and unsigned integer types + are collectively called integral types. + + Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration + types as well, which is incorrect in C++. Keep these checks in + ascending code order. */ +#define RS_INTEGRAL_TYPE_P(TYPE) \ + (TREE_CODE (TYPE) == BOOLEAN_TYPE || TREE_CODE (TYPE) == INTEGER_TYPE) + +/* Returns true if TYPE is an integral or enumeration name. Keep + these checks in ascending code order. */ +#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ + (TREE_CODE (TYPE) == ENUMERAL_TYPE || RS_INTEGRAL_TYPE_P (TYPE)) + +/* Nonzero for a VAR_DECL that was initialized with a + constant-expression. */ +#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ + (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) + +// Above macros are copied from gcc/c-family/c-common.h + +// forked from gcc/cp/cp-tree.h treee_pair_s + +struct GTY (()) tree_pair_s +{ + tree purpose; + tree value; +}; + +// forked from gcc/cp/cp-tree.h tree_pair_p + +typedef tree_pair_s *tree_pair_p; + +// forked from gcc/cp/cp-tree.h lang_type + +/* This structure provides additional information above and beyond + what is provide in the ordinary tree_type. In the past, we used it + for the types of class types, template parameters types, typename + types, and so forth. However, there can be many (tens to hundreds + of thousands) of template parameter types in a compilation, and + there's no need for this additional information in that case. + Therefore, we now use this data structure only for class types. + + In the past, it was thought that there would be relatively few + class types. However, in the presence of heavy use of templates, + many (i.e., thousands) of classes can easily be generated. + Therefore, we should endeavor to keep the size of this structure to + a minimum. */ +struct GTY (()) lang_type +{ + unsigned char align; + + unsigned has_type_conversion : 1; + unsigned has_copy_ctor : 1; + unsigned has_default_ctor : 1; + unsigned const_needs_init : 1; + unsigned ref_needs_init : 1; + unsigned has_const_copy_assign : 1; + unsigned use_template : 2; + + unsigned has_mutable : 1; + unsigned com_interface : 1; + unsigned non_pod_class : 1; + unsigned nearly_empty_p : 1; + unsigned user_align : 1; + unsigned has_copy_assign : 1; + unsigned has_new : 1; + unsigned has_array_new : 1; + + unsigned gets_delete : 2; + unsigned interface_only : 1; + unsigned interface_unknown : 1; + unsigned contains_empty_class_p : 1; + unsigned anon_aggr : 1; + unsigned non_zero_init : 1; + unsigned empty_p : 1; + /* 32 bits allocated. */ + + unsigned vec_new_uses_cookie : 1; + unsigned declared_class : 1; + unsigned diamond_shaped : 1; + unsigned repeated_base : 1; + unsigned being_defined : 1; + unsigned debug_requested : 1; + unsigned fields_readonly : 1; + unsigned ptrmemfunc_flag : 1; + + unsigned lazy_default_ctor : 1; + unsigned lazy_copy_ctor : 1; + unsigned lazy_copy_assign : 1; + unsigned lazy_destructor : 1; + unsigned has_const_copy_ctor : 1; + unsigned has_complex_copy_ctor : 1; + unsigned has_complex_copy_assign : 1; + unsigned non_aggregate : 1; + + unsigned has_complex_dflt : 1; + unsigned has_list_ctor : 1; + unsigned non_std_layout : 1; + unsigned is_literal : 1; + unsigned lazy_move_ctor : 1; + unsigned lazy_move_assign : 1; + unsigned has_complex_move_ctor : 1; + unsigned has_complex_move_assign : 1; + + unsigned has_constexpr_ctor : 1; + unsigned unique_obj_representations : 1; + unsigned unique_obj_representations_set : 1; + bool erroneous : 1; + bool non_pod_aggregate : 1; + + /* When adding a flag here, consider whether or not it ought to + apply to a template instance if it applies to the template. If + so, make sure to copy it in instantiate_class_template! */ + + /* There are some bits left to fill out a 32-bit word. Keep track + of this by updating the size of this bitfield whenever you add or + remove a flag. */ + unsigned dummy : 3; + + tree primary_base; + vec *vcall_indices; + tree vtables; + tree typeinfo_var; + vec *vbases; + tree as_base; + vec *pure_virtuals; + tree friend_classes; + vec *GTY ((reorder ("resort_type_member_vec"))) members; + tree key_method; + tree decl_list; + tree befriending_classes; + /* In a RECORD_TYPE, information specific to Objective-C++, such + as a list of adopted protocols or a pointer to a corresponding + @interface. See objc/objc-act.h for details. */ + tree objc_info; + /* FIXME reuse another field? */ + tree lambda_expr; +}; + +namespace Rust { + +// forked from gcc/cp/cp-tree.h tsubst_flags_t + +/* This type is used for parameters and variables which hold + combinations of the flags in enum tsubst_flags. */ +typedef int tsubst_flags_t; + +// forked from gcc/cp/cvt.cc convert_to_void +// +// When an expression is used in a void context, its value is discarded and +// no lvalue-rvalue and similar conversions happen [expr.static.cast/4, +// stmt.expr/1, expr.comma/1]. This permits dereferencing an incomplete type +// in a void context. The C++ standard does not define what an `access' to an +// object is, but there is reason to believe that it is the lvalue to rvalue +// conversion -- if it were not, `*&*p = 1' would violate [expr]/4 in that it +// accesses `*p' not to calculate the value to be stored. But, dcl.type.cv/8 +// indicates that volatile semantics should be the same between C and C++ +// where ever possible. C leaves it implementation defined as to what +// constitutes an access to a volatile. So, we interpret `*vp' as a read of +// the volatile object `vp' points to, unless that is an incomplete type. For +// volatile references we do not do this interpretation, because that would +// make it impossible to ignore the reference return value from functions. We +// issue warnings in the confusing cases. +// +// The IMPLICIT is ICV_CAST when the user is explicitly converting an +// expression to void via a cast. If an expression is being implicitly +// converted, IMPLICIT indicates the context of the implicit conversion. + +/* Possible cases of implicit or explicit bad conversions to void. */ +enum impl_conv_void +{ + ICV_CAST, /* (explicit) conversion to void */ + ICV_SECOND_OF_COND, /* second operand of conditional expression */ + ICV_THIRD_OF_COND, /* third operand of conditional expression */ + ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ + ICV_LEFT_OF_COMMA, /* left operand of comma operator */ + ICV_STATEMENT, /* statement */ + ICV_THIRD_IN_FOR /* for increment expression */ +}; + +/* BUILT_IN_FRONTEND function codes. */ +enum rs_built_in_function +{ + RS_BUILT_IN_IS_CONSTANT_EVALUATED, + RS_BUILT_IN_INTEGER_PACK, + RS_BUILT_IN_IS_CORRESPONDING_MEMBER, + RS_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS, + RS_BUILT_IN_SOURCE_LOCATION, + RS_BUILT_IN_LAST +}; + +extern tree +convert_to_void (tree expr, impl_conv_void implicit); + +// The lvalue-to-rvalue conversion (7.1) is applied if and only if the +// expression is a glvalue of volatile-qualified type and it is one of the +// following: +// * ( expression ), where expression is one of these expressions, +// * id-expression (8.1.4), +// * subscripting (8.2.1), +// * class member access (8.2.5), +// * indirection (8.3.1), +// * pointer-to-member operation (8.5), +// * conditional expression (8.16) where both the second and the third +// operands are one of these expressions, or +// * comma expression (8.19) where the right operand is one of these +// expressions. +extern tree +mark_discarded_use (tree expr); + +// Mark EXP as read, not just set, for set but not used -Wunused warning +// purposes. +extern void +mark_exp_read (tree exp); + +// We've seen an actual use of EXPR. Possibly replace an outer variable +// reference inside with its constant value or a lambda capture. +extern tree +mark_use (tree expr, bool rvalue_p, bool read_p, location_t loc, + bool reject_builtin); + +// Called whenever the expression EXPR is used in an rvalue context. +// When REJECT_BUILTIN is true the expression is checked to make sure +// it doesn't make it possible to obtain the address of a GCC built-in +// function with no library fallback (or any of its bits, such as in +// a conversion to bool). +extern tree +mark_rvalue_use (tree e, location_t loc /* = UNKNOWN_LOCATION */, + bool reject_builtin /* = true */); + +// Called whenever an expression is used in an lvalue context. +extern tree +mark_lvalue_use (tree expr); + +// As above, but don't consider this use a read. +extern tree +mark_lvalue_use_nonread (tree expr); + +// We are using a reference VAL for its value. Bash that reference all the way +// down to its lowest form. +extern tree +convert_from_reference (tree val); + +// Subroutine of convert_to_void. Warn if we're discarding something with +// attribute [[nodiscard]]. +extern void +maybe_warn_nodiscard (tree expr, impl_conv_void implicit); + +extern location_t +expr_loc_or_loc (const_tree t, location_t or_loc); + +extern location_t +expr_loc_or_input_loc (const_tree t); + +// FN is the callee of a CALL_EXPR or AGGR_INIT_EXPR; return the FUNCTION_DECL +// if we can. +extern tree +get_fndecl_from_callee (tree fn); + +// FIXME some helpers from HIRCompileBase could probably be moved here over time + +// Return an expression for the address of BASE[INDEX], used in offset intrinsic +extern tree +pointer_offset_expression (tree base_tree, tree index_tree, location_t locus); + +/* A tree node, together with a location, so that we can track locations + (and ranges) during parsing. + + The location is redundant for node kinds that have locations, + but not all node kinds do (e.g. constants, and references to + params, locals, etc), so we stash a copy here. */ + +extern location_t rs_expr_location (const_tree); + +extern int +is_empty_class (tree type); + +extern tree array_type_nelts_top (tree); + +extern bool +is_really_empty_class (tree, bool); + +extern bool builtin_valid_in_constant_expr_p (const_tree); + +extern bool maybe_constexpr_fn (tree); + +extern bool var_in_maybe_constexpr_fn (tree); + +extern int +rs_type_quals (const_tree type); + +extern bool decl_maybe_constant_var_p (tree); + +extern tree +rs_walk_subtrees (tree *, int *, walk_tree_fn, void *, hash_set *); +#define rs_walk_tree(tp, func, data, pset) \ + walk_tree_1 (tp, func, data, pset, rs_walk_subtrees) +#define rs_walk_tree_without_duplicates(tp, func, data) \ + walk_tree_without_duplicates_1 (tp, func, data, rs_walk_subtrees) + +// forked from gcc/cp/cp-tree.h cp_expr_loc_or_loc + +inline location_t +rs_expr_loc_or_loc (const_tree t, location_t or_loc) +{ + location_t loc = rs_expr_location (t); + if (loc == UNKNOWN_LOCATION) + loc = or_loc; + return loc; +} + +// forked from gcc/cp/cp-tree.h cp_expr_loc_or_input_loc + +inline location_t +rs_expr_loc_or_input_loc (const_tree t) +{ + return rs_expr_loc_or_loc (t, input_location); +} + +} // namespace Rust + +#endif // RUST_TREE diff --git a/gcc/rust/rust-backend.h b/gcc/rust/rust-backend.h new file mode 100644 index 00000000000..126283c1a54 --- /dev/null +++ b/gcc/rust/rust-backend.h @@ -0,0 +1,506 @@ +// Copyright (C) 2020-2022 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_BACKEND_H +#define RUST_BACKEND_H + +#include +#include +#include + +#include "rust-location.h" +#include "rust-linemap.h" +#include "rust-diagnostics.h" +#include "operator.h" +#include "tree.h" + +// Pointers to these types are created by the backend, passed to the +// frontend, and passed back to the backend. The types must be +// defined by the backend using these names. + +// The backend representation of a variable. +class Bvariable; + +// The backend interface. This is a pure abstract class that a +// specific backend will implement. + +class Backend +{ +public: + virtual ~Backend () {} + + // Name/type/location. Used for function parameters, struct fields, + // interface methods. + struct typed_identifier + { + std::string name; + tree type; + Location location; + + typed_identifier () + : name (), type (NULL_TREE), location (Linemap::unknown_location ()) + {} + + typed_identifier (const std::string &a_name, tree a_type, + Location a_location) + : name (a_name), type (a_type), location (a_location) + {} + }; + + // debug + virtual void debug (tree) = 0; + virtual void debug (Bvariable *) = 0; + + virtual tree get_identifier_node (const std::string &str) = 0; + + // Types. + + // get unit-type + virtual tree unit_type () = 0; + + // Get the unnamed boolean type. + virtual tree bool_type () = 0; + + // Get the char type + virtual tree char_type () = 0; + + // Get the wchar type + virtual tree wchar_type () = 0; + + // Get the Host pointer size in bits + virtual int get_pointer_size () = 0; + + // Get the raw str type const char* + virtual tree raw_str_type () = 0; + + // Get an unnamed integer type with the given signedness and number + // of bits. + virtual tree integer_type (bool is_unsigned, int bits) = 0; + + // Get an unnamed floating point type with the given number of bits + // (32 or 64). + virtual tree float_type (int bits) = 0; + + // Get an unnamed complex type with the given number of bits (64 or 128). + virtual tree complex_type (int bits) = 0; + + // Get a pointer type. + virtual tree pointer_type (tree to_type) = 0; + + // Get a reference type. + virtual tree reference_type (tree to_type) = 0; + + // make type immutable + virtual tree immutable_type (tree base) = 0; + + // Get a function type. The receiver, parameter, and results are + // generated from the types in the Function_type. The Function_type + // is provided so that the names are available. This should return + // not the type of a Go function (which is a pointer to a struct) + // but the type of a C function pointer (which will be used as the + // type of the first field of the struct). If there is more than + // one result, RESULT_STRUCT is a struct type to hold the results, + // and RESULTS may be ignored; if there are zero or one results, + // RESULT_STRUCT is NULL. + virtual tree function_type (const typed_identifier &receiver, + const std::vector ¶meters, + const std::vector &results, + tree result_struct, Location location) + = 0; + + virtual tree + function_type_varadic (const typed_identifier &receiver, + const std::vector ¶meters, + const std::vector &results, + tree result_struct, Location location) + = 0; + + virtual tree function_ptr_type (tree result, + const std::vector &praameters, + Location location) + = 0; + + // Get a struct type. + virtual tree struct_type (const std::vector &fields) = 0; + + // Get a union type. + virtual tree union_type (const std::vector &fields) = 0; + + // Get an array type. + virtual tree array_type (tree element_type, tree length) = 0; + + // Return a named version of a type. The location is the location + // of the type definition. This will not be called for a type + // created via placeholder_pointer_type, placeholder_struct_type, or + // placeholder_array_type.. (It may be called for a pointer, + // struct, or array type in a case like "type P *byte; type Q P".) + virtual tree named_type (const std::string &name, tree, Location) = 0; + + // Return the size of a type. + virtual int64_t type_size (tree) = 0; + + // Return the alignment of a type. + virtual int64_t type_alignment (tree) = 0; + + // Return the alignment of a struct field of this type. This is + // normally the same as type_alignment, but not always. + virtual int64_t type_field_alignment (tree) = 0; + + // Return the offset of field INDEX in a struct type. INDEX is the + // entry in the FIELDS std::vector parameter of struct_type or + // set_placeholder_struct_type. + virtual int64_t type_field_offset (tree, size_t index) = 0; + + // Expressions. + + // Return an expression for a zero value of the given type. This is + // used for cases such as local variable initialization and + // converting nil to other types. + virtual tree zero_expression (tree) = 0; + + virtual tree unit_expression () = 0; + + // Create a reference to a variable. + virtual tree var_expression (Bvariable *var, Location) = 0; + + // Return an expression for the multi-precision integer VAL in BTYPE. + virtual tree integer_constant_expression (tree btype, mpz_t val) = 0; + + // Return an expression for the floating point value VAL in BTYPE. + virtual tree float_constant_expression (tree btype, mpfr_t val) = 0; + + // Return an expression for the complex value VAL in BTYPE. + virtual tree complex_constant_expression (tree btype, mpc_t val) = 0; + + // Return an expression for the string value VAL. + virtual tree string_constant_expression (const std::string &val) = 0; + + // Get a char literal + virtual tree char_constant_expression (char c) = 0; + + // Get a char literal + virtual tree wchar_constant_expression (wchar_t c) = 0; + + // Return an expression for the boolean value VAL. + virtual tree boolean_constant_expression (bool val) = 0; + + // Return an expression for the real part of BCOMPLEX. + virtual tree real_part_expression (tree bcomplex, Location) = 0; + + // Return an expression for the imaginary part of BCOMPLEX. + virtual tree imag_part_expression (tree bcomplex, Location) = 0; + + // Return an expression for the complex number (BREAL, BIMAG). + virtual tree complex_expression (tree breal, tree bimag, Location) = 0; + + // Return an expression that converts EXPR to TYPE. + virtual tree convert_expression (tree type, tree expr, Location) = 0; + + // Return an expression for the field at INDEX in BSTRUCT. + virtual tree struct_field_expression (tree bstruct, size_t index, Location) + = 0; + + // Create an expression that executes BSTAT before BEXPR. + virtual tree compound_expression (tree bstat, tree bexpr, Location) = 0; + + // Return an expression that executes THEN_EXPR if CONDITION is true, or + // ELSE_EXPR otherwise and returns the result as type BTYPE, within the + // specified function FUNCTION. ELSE_EXPR may be NULL. BTYPE may be NULL. + virtual tree conditional_expression (tree function, tree btype, + tree condition, tree then_expr, + tree else_expr, Location) + = 0; + + // Return an expression for the negation operation OP EXPR. + // Supported values of OP are enumerated in NegationOperator. + virtual tree negation_expression (NegationOperator op, tree expr, Location) + = 0; + + // Return an expression for the operation LEFT OP RIGHT. + // Supported values of OP are enumerated in ArithmeticOrLogicalOperator. + virtual tree arithmetic_or_logical_expression (ArithmeticOrLogicalOperator op, + tree left, tree right, + Location) + = 0; + + // Return an expression for the operation LEFT OP RIGHT. + // Supported values of OP are enumerated in ComparisonOperator. + virtual tree comparison_expression (ComparisonOperator op, tree left, + tree right, Location) + = 0; + + // Return an expression for the operation LEFT OP RIGHT. + // Supported values of OP are enumerated in LazyBooleanOperator. + virtual tree lazy_boolean_expression (LazyBooleanOperator op, tree left, + tree right, Location) + = 0; + + // Return an expression that constructs BTYPE with VALS. BTYPE must be the + // backend representation a of struct. VALS must be in the same order as the + // corresponding fields in BTYPE. + virtual tree constructor_expression (tree btype, bool is_variant, + const std::vector &vals, int, + Location) + = 0; + + // Return an expression that constructs an array of BTYPE with INDEXES and + // VALS. INDEXES and VALS must have the same amount of elements. Each index + // in INDEXES must be in the same order as the corresponding value in VALS. + virtual tree + array_constructor_expression (tree btype, + const std::vector &indexes, + const std::vector &vals, Location) + = 0; + + virtual tree array_initializer (tree, tree, tree, tree, tree, tree *, + Location) + = 0; + + // Return an expression for ARRAY[INDEX] as an l-value. ARRAY is a valid + // fixed-length array, not a slice. + virtual tree array_index_expression (tree array, tree index, Location) = 0; + + // Create an expression for a call to FN with ARGS, taking place within + // caller CALLER. + virtual tree call_expression (tree fn, const std::vector &args, + tree static_chain, Location) + = 0; + + // Statements. + + // Create a variable initialization statement in the specified + // function. This initializes a local variable at the point in the + // program flow where it is declared. + virtual tree init_statement (tree, Bvariable *var, tree init) = 0; + + // Create an assignment statement within the specified function. + virtual tree assignment_statement (tree lhs, tree rhs, Location) = 0; + + // Create a return statement, passing the representation of the + // function and the list of values to return. + virtual tree return_statement (tree, const std::vector &, Location) = 0; + + // Create an if statement within a function. ELSE_BLOCK may be NULL. + virtual tree if_statement (tree, tree condition, tree then_block, + tree else_block, Location) + = 0; + + // infinite loop expressions + virtual tree loop_expression (tree body, Location) = 0; + + // exit expressions + virtual tree exit_expression (tree condition, Location) = 0; + + // Create a single statement from two statements. + virtual tree compound_statement (tree, tree) = 0; + + // Create a single statement from a list of statements. + virtual tree statement_list (const std::vector &) = 0; + + // Create a statement that attempts to execute BSTAT and calls EXCEPT_STMT if + // an exception occurs. EXCEPT_STMT may be NULL. FINALLY_STMT may be NULL and + // if not NULL, it will always be executed. This is used for handling defers + // in Go functions. In C++, the resulting code is of this form: + // try { BSTAT; } catch { EXCEPT_STMT; } finally { FINALLY_STMT; } + virtual tree exception_handler_statement (tree bstat, tree except_stmt, + tree finally_stmt, Location) + = 0; + + // Blocks. + + // Create a block. The frontend will call this function when it + // starts converting a block within a function. FUNCTION is the + // current function. ENCLOSING is the enclosing block; it will be + // NULL for the top-level block in a function. VARS is the list of + // local variables defined within this block; each entry will be + // created by the local_variable function. START_LOCATION is the + // location of the start of the block, more or less the location of + // the initial curly brace. END_LOCATION is the location of the end + // of the block, more or less the location of the final curly brace. + // The statements will be added after the block is created. + virtual tree block (tree function, tree enclosing, + const std::vector &vars, + Location start_location, Location end_location) + = 0; + + // Add the statements to a block. The block is created first. Then + // the statements are created. Then the statements are added to the + // block. This will called exactly once per block. The vector may + // be empty if there are no statements. + virtual void block_add_statements (tree, const std::vector &) = 0; + + // Variables. + + // Create an error variable. This is used for cases which should + // not occur in a correct program, in order to keep the compilation + // going without crashing. + virtual Bvariable *error_variable () = 0; + + // Create a global variable. NAME is the package-qualified name of + // the variable. ASM_NAME is the encoded identifier for the + // variable, incorporating the package, and made safe for the + // assembler. BTYPE is the type of the variable. IS_EXTERNAL is + // true if the variable is defined in some other package. IS_HIDDEN + // is true if the variable is not exported (name begins with a lower + // case letter). IN_UNIQUE_SECTION is true if the variable should + // be put into a unique section if possible; this is intended to + // permit the linker to garbage collect the variable if it is not + // referenced. LOCATION is where the variable was defined. + virtual Bvariable *global_variable (const std::string &name, + const std::string &asm_name, tree btype, + bool is_external, bool is_hidden, + bool in_unique_section, Location location) + = 0; + + // A global variable will 1) be initialized to zero, or 2) be + // initialized to a constant value, or 3) be initialized in the init + // function. In case 2, the frontend will call + // global_variable_set_init to set the initial value. If this is + // not called, the backend should initialize a global variable to 0. + // The init function may then assign a value to it. + virtual void global_variable_set_init (Bvariable *, tree) = 0; + + // Create a local variable. The frontend will create the local + // variables first, and then create the block which contains them. + // FUNCTION is the function in which the variable is defined. NAME + // is the name of the variable. TYPE is the type. DECL_VAR, if not + // null, gives the location at which the value of this variable may + // be found, typically used to create an inner-scope reference to an + // outer-scope variable, to extend the lifetime of the variable beyond + // the inner scope. IS_ADDRESS_TAKEN is true if the address of this + // variable is taken (this implies that the address does not escape + // the function, as otherwise the variable would be on the heap). + // LOCATION is where the variable is defined. For each local variable + // the frontend will call init_statement to set the initial value. + virtual Bvariable *local_variable (tree function, const std::string &name, + tree type, Bvariable *decl_var, + Location location) + = 0; + + // Create a function parameter. This is an incoming parameter, not + // a result parameter (result parameters are treated as local + // variables). The arguments are as for local_variable. + virtual Bvariable *parameter_variable (tree function, const std::string &name, + tree type, Location location) + = 0; + + // Create a static chain parameter. This is the closure parameter. + virtual Bvariable *static_chain_variable (tree function, + const std::string &name, tree type, + Location location) + = 0; + + // Create a temporary variable. A temporary variable has no name, + // just a type. We pass in FUNCTION and BLOCK in case they are + // needed. If INIT is not NULL, the variable should be initialized + // to that value. Otherwise the initial value is irrelevant--the + // backend does not have to explicitly initialize it to zero. + // ADDRESS_IS_TAKEN is true if the programs needs to take the + // address of this temporary variable. LOCATION is the location of + // the statement or expression which requires creating the temporary + // variable, and may not be very useful. This function should + // return a variable which can be referenced later and should set + // *PSTATEMENT to a statement which initializes the variable. + virtual Bvariable *temporary_variable (tree, tree, tree, tree init, + bool address_is_taken, + Location location, tree *pstatement) + = 0; + + // Labels. + + // Create a new label. NAME will be empty if this is a label + // created by the frontend for a loop construct. The location is + // where the label is defined. + virtual tree label (tree, const std::string &name, Location) = 0; + + // Create a statement which defines a label. This statement will be + // put into the codestream at the point where the label should be + // defined. + virtual tree label_definition_statement (tree) = 0; + + // Create a goto statement to a label. + virtual tree goto_statement (tree, Location) = 0; + + // Create an expression for the address of a label. This is used to + // get the return address of a deferred function which may call + // recover. + virtual tree label_address (tree, Location) = 0; + + // Functions. + + // Bit flags to pass to the function method. + + // Set if this is a function declaration rather than a definition; + // the definition will be in another compilation unit. + static const unsigned int function_is_declaration = 1 << 0; + + // Set if the function should never be inlined because they call + // recover and must be visible for correct panic recovery. + static const unsigned int function_is_uninlinable = 1 << 1; + + // Set if the function does not return. This is set for the + // implementation of panic. + static const unsigned int function_does_not_return = 1 << 2; + + // Set if the function should be put in a unique section if + // possible. This is used for field tracking. + static const unsigned int function_in_unique_section = 1 << 3; + + // Declare or define a function of FNTYPE. + // NAME is the Go name of the function. ASM_NAME, if not the empty + // string, is the name that should be used in the symbol table; this + // will be non-empty if a magic extern comment is used. FLAGS is + // bit flags described above. + virtual tree function (tree fntype, const std::string &name, + const std::string &asm_name, unsigned int flags, + Location) + = 0; + + // Create a statement that runs all deferred calls for FUNCTION. This should + // be a statement that looks like this in C++: + // finish: + // try { DEFER_RETURN; } catch { CHECK_DEFER; goto finish; } + virtual tree function_defer_statement (tree function, tree undefer, + tree check_defer, Location) + = 0; + + // Record PARAM_VARS as the variables to use for the parameters of FUNCTION. + // This will only be called for a function definition. Returns true on + // success, false on failure. + virtual bool + function_set_parameters (tree function, + const std::vector ¶m_vars) + = 0; + + // Utility. + + // Write the definitions for all TYPE_DECLS, CONSTANT_DECLS, + // FUNCTION_DECLS, and VARIABLE_DECLS declared globally. + virtual void + write_global_definitions (const std::vector &type_decls, + const std::vector &constant_decls, + const std::vector &function_decls, + const std::vector &variable_decls) + = 0; + + // Write SIZE bytes of export data from BYTES to the proper + // section in the output object file. + virtual void write_export_data (const char *bytes, unsigned int size) = 0; +}; + +#endif // RUST_BACKEND_H diff --git a/gcc/rust/rust-gcc.cc b/gcc/rust/rust-gcc.cc new file mode 100644 index 00000000000..ffb67f3fe78 --- /dev/null +++ b/gcc/rust/rust-gcc.cc @@ -0,0 +1,2717 @@ +// rust-gcc.cc -- Rust frontend to gcc IR. +// Copyright (C) 2011-2022 Free Software Foundation, Inc. +// Contributed by Ian Lance Taylor, Google. +// forked from gccgo + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-system.h" + +// This has to be included outside of extern "C", so we have to +// include it here before tree.h includes it later. +#include + +#include "tree.h" +#include "opts.h" +#include "fold-const.h" +#include "stringpool.h" +#include "stor-layout.h" +#include "varasm.h" +#include "tree-iterator.h" +#include "tm.h" +#include "function.h" +#include "cgraph.h" +#include "convert.h" +#include "gimple-expr.h" +#include "gimplify.h" +#include "langhooks.h" +#include "toplev.h" +#include "output.h" +#include "realmpfr.h" +#include "builtins.h" +#include "print-tree.h" +#include "attribs.h" + +#include "rust-location.h" +#include "rust-linemap.h" +#include "rust-backend.h" +#include "rust-object-export.h" + +#include "backend/rust-tree.h" + +// TODO: this will have to be significantly modified to work with Rust + +// Bvariable is a bit more complicated, because of zero-sized types. +// The GNU linker does not permit dynamic variables with zero size. +// When we see such a variable, we generate a version of the type with +// non-zero size. However, when referring to the global variable, we +// want an expression of zero size; otherwise, if, say, the global +// variable is passed to a function, we will be passing a +// non-zero-sized value to a zero-sized value, which can lead to a +// miscompilation. + +class Bvariable +{ +public: + Bvariable (tree t) : t_ (t), orig_type_ (NULL) {} + + Bvariable (tree t, tree orig_type) : t_ (t), orig_type_ (orig_type) {} + + // Get the tree for use as an expression. + tree get_tree (Location) const; + + // Get the actual decl; + tree get_decl () const { return this->t_; } + +private: + tree t_; + tree orig_type_; +}; + +// Get the tree of a variable for use as an expression. If this is a +// zero-sized global, create an expression that refers to the decl but +// has zero size. +tree +Bvariable::get_tree (Location location) const +{ + if (this->t_ == error_mark_node) + return error_mark_node; + + TREE_USED (this->t_) = 1; + if (this->orig_type_ == NULL || TREE_TYPE (this->t_) == this->orig_type_) + { + return this->t_; + } + + // Return *(orig_type*)&decl. */ + tree t = build_fold_addr_expr_loc (location.gcc_location (), this->t_); + t = fold_build1_loc (location.gcc_location (), NOP_EXPR, + build_pointer_type (this->orig_type_), t); + return build_fold_indirect_ref_loc (location.gcc_location (), t); +} + +// This file implements the interface between the Rust frontend proper +// and the gcc IR. This implements specific instantiations of +// abstract classes defined by the Rust frontend proper. The Rust +// frontend proper class methods of these classes to generate the +// backend representation. + +class Gcc_backend : public Backend +{ +public: + Gcc_backend (); + + void debug (tree t) { debug_tree (t); }; + void debug (Bvariable *t) { debug_tree (t->get_decl ()); }; + + tree get_identifier_node (const std::string &str) + { + return get_identifier_with_length (str.data (), str.length ()); + } + + // Types. + + tree unit_type () + { + static tree unit_type; + if (unit_type == nullptr) + { + auto unit_type_node = struct_type ({}); + unit_type = named_type ("()", unit_type_node, + ::Linemap::predeclared_location ()); + } + + return unit_type; + } + + tree bool_type () { return boolean_type_node; } + + tree char_type () { return char_type_node; } + + tree wchar_type () + { + tree wchar = make_unsigned_type (32); + TYPE_STRING_FLAG (wchar) = 1; + return wchar; + } + + int get_pointer_size (); + + tree raw_str_type (); + + tree integer_type (bool, int); + + tree float_type (int); + + tree complex_type (int); + + tree pointer_type (tree); + + tree reference_type (tree); + + tree immutable_type (tree); + + tree function_type (const typed_identifier &, + const std::vector &, + const std::vector &, tree, + const Location); + + tree function_type_varadic (const typed_identifier &, + const std::vector &, + const std::vector &, tree, + const Location); + + tree function_ptr_type (tree, const std::vector &, Location); + + tree struct_type (const std::vector &); + + tree union_type (const std::vector &); + + tree array_type (tree, tree); + + tree named_type (const std::string &, tree, Location); + + int64_t type_size (tree); + + int64_t type_alignment (tree); + + int64_t type_field_alignment (tree); + + int64_t type_field_offset (tree, size_t index); + + // Expressions. + + tree zero_expression (tree); + + tree unit_expression () { return integer_zero_node; } + + tree var_expression (Bvariable *var, Location); + + tree integer_constant_expression (tree type, mpz_t val); + + tree float_constant_expression (tree type, mpfr_t val); + + tree complex_constant_expression (tree type, mpc_t val); + + tree string_constant_expression (const std::string &val); + + tree wchar_constant_expression (wchar_t c); + + tree char_constant_expression (char c); + + tree boolean_constant_expression (bool val); + + tree real_part_expression (tree bcomplex, Location); + + tree imag_part_expression (tree bcomplex, Location); + + tree complex_expression (tree breal, tree bimag, Location); + + tree convert_expression (tree type, tree expr, Location); + + tree struct_field_expression (tree, size_t, Location); + + tree compound_expression (tree, tree, Location); + + tree conditional_expression (tree, tree, tree, tree, tree, Location); + + tree negation_expression (NegationOperator op, tree expr, Location); + + tree arithmetic_or_logical_expression (ArithmeticOrLogicalOperator op, + tree left, tree right, Location); + + tree comparison_expression (ComparisonOperator op, tree left, tree right, + Location); + + tree lazy_boolean_expression (LazyBooleanOperator op, tree left, tree right, + Location); + + tree constructor_expression (tree, bool, const std::vector &, int, + Location); + + tree array_constructor_expression (tree, const std::vector &, + const std::vector &, Location); + + tree array_initializer (tree, tree, tree, tree, tree, tree *, Location); + + tree array_index_expression (tree array, tree index, Location); + + tree call_expression (tree fn, const std::vector &args, + tree static_chain, Location); + + // Statements. + + tree init_statement (tree, Bvariable *var, tree init); + + tree assignment_statement (tree lhs, tree rhs, Location); + + tree return_statement (tree, const std::vector &, Location); + + tree if_statement (tree, tree condition, tree then_block, tree else_block, + Location); + + tree compound_statement (tree, tree); + + tree statement_list (const std::vector &); + + tree exception_handler_statement (tree bstat, tree except_stmt, + tree finally_stmt, Location); + + tree loop_expression (tree body, Location); + + tree exit_expression (tree condition, Location); + + // Blocks. + + tree block (tree, tree, const std::vector &, Location, Location); + + void block_add_statements (tree, const std::vector &); + + // Variables. + + Bvariable *error_variable () { return new Bvariable (error_mark_node); } + + Bvariable *global_variable (const std::string &var_name, + const std::string &asm_name, tree type, + bool is_external, bool is_hidden, + bool in_unique_section, Location location); + + void global_variable_set_init (Bvariable *, tree); + + Bvariable *local_variable (tree, const std::string &, tree, Bvariable *, + Location); + + Bvariable *parameter_variable (tree, const std::string &, tree, Location); + + Bvariable *static_chain_variable (tree, const std::string &, tree, Location); + + Bvariable *temporary_variable (tree, tree, tree, tree, bool, Location, + tree *); + + // Labels. + + tree label (tree, const std::string &name, Location); + + tree label_definition_statement (tree); + + tree goto_statement (tree, Location); + + tree label_address (tree, Location); + + // Functions. + + tree function (tree fntype, const std::string &name, + const std::string &asm_name, unsigned int flags, Location); + + tree function_defer_statement (tree function, tree undefer, tree defer, + Location); + + bool function_set_parameters (tree function, + const std::vector &); + + void write_global_definitions (const std::vector &, + const std::vector &, + const std::vector &, + const std::vector &); + + void write_export_data (const char *bytes, unsigned int size); + +private: + tree fill_in_fields (tree, const std::vector &); + + tree fill_in_array (tree, tree, tree); + + tree non_zero_size_type (tree); + + tree convert_tree (tree, tree, Location); +}; + +// A helper function to create a GCC identifier from a C++ string. + +static inline tree +get_identifier_from_string (const std::string &str) +{ + return get_identifier_with_length (str.data (), str.length ()); +} + +// Define the built-in functions that are exposed to GCCRust. + +Gcc_backend::Gcc_backend () +{ + /* We need to define the fetch_and_add functions, since we use them + for ++ and --. */ + // tree t = this->integer_type (true, BITS_PER_UNIT)->get_tree (); + // tree p = build_pointer_type (build_qualified_type (t, TYPE_QUAL_VOLATILE)); + // this->define_builtin (BUILT_IN_SYNC_ADD_AND_FETCH_1, + // "__sync_fetch_and_add_1", + // NULL, build_function_type_list (t, p, t, NULL_TREE), 0); + + // t = this->integer_type (true, BITS_PER_UNIT * 2)->get_tree (); + // p = build_pointer_type (build_qualified_type (t, TYPE_QUAL_VOLATILE)); + // this->define_builtin (BUILT_IN_SYNC_ADD_AND_FETCH_2, + // "__sync_fetch_and_add_2", + // NULL, build_function_type_list (t, p, t, NULL_TREE), 0); + + // t = this->integer_type (true, BITS_PER_UNIT * 4)->get_tree (); + // p = build_pointer_type (build_qualified_type (t, TYPE_QUAL_VOLATILE)); + // this->define_builtin (BUILT_IN_SYNC_ADD_AND_FETCH_4, + // "__sync_fetch_and_add_4", + // NULL, build_function_type_list (t, p, t, NULL_TREE), 0); + + // t = this->integer_type (true, BITS_PER_UNIT * 8)->get_tree (); + // p = build_pointer_type (build_qualified_type (t, TYPE_QUAL_VOLATILE)); + // this->define_builtin (BUILT_IN_SYNC_ADD_AND_FETCH_8, + // "__sync_fetch_and_add_8", + // NULL, build_function_type_list (t, p, t, NULL_TREE), 0); + + // // We use __builtin_expect for magic import functions. + // this->define_builtin (BUILT_IN_EXPECT, "__builtin_expect", NULL, + // build_function_type_list (long_integer_type_node, + // long_integer_type_node, + // long_integer_type_node, + // NULL_TREE), + // builtin_const); + + // // We use __builtin_memcmp for struct comparisons. + // this->define_builtin (BUILT_IN_MEMCMP, "__builtin_memcmp", "memcmp", + // build_function_type_list (integer_type_node, + // const_ptr_type_node, + // const_ptr_type_node, + // size_type_node, NULL_TREE), + // 0); + + // // We use __builtin_memmove for copying data. + // this->define_builtin (BUILT_IN_MEMMOVE, "__builtin_memmove", "memmove", + // build_function_type_list (void_type_node, ptr_type_node, + // const_ptr_type_node, + // size_type_node, NULL_TREE), + // 0); + + // // We use __builtin_memset for zeroing data. + // this->define_builtin (BUILT_IN_MEMSET, "__builtin_memset", "memset", + // build_function_type_list (void_type_node, ptr_type_node, + // integer_type_node, + // size_type_node, NULL_TREE), + // 0); + + // // Used by runtime/internal/sys and math/bits. + // this->define_builtin (BUILT_IN_CTZ, "__builtin_ctz", "ctz", + // build_function_type_list (integer_type_node, + // unsigned_type_node, + // NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_CTZLL, "__builtin_ctzll", "ctzll", + // build_function_type_list (integer_type_node, + // long_long_unsigned_type_node, + // NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_CLZ, "__builtin_clz", "clz", + // build_function_type_list (integer_type_node, + // unsigned_type_node, + // NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_CLZLL, "__builtin_clzll", "clzll", + // build_function_type_list (integer_type_node, + // long_long_unsigned_type_node, + // NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_POPCOUNT, "__builtin_popcount", "popcount", + // build_function_type_list (integer_type_node, + // unsigned_type_node, + // NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_POPCOUNTLL, "__builtin_popcountll", + // "popcountll", + // build_function_type_list (integer_type_node, + // long_long_unsigned_type_node, + // NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_BSWAP16, "__builtin_bswap16", "bswap16", + // build_function_type_list (uint16_type_node, + // uint16_type_node, NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_BSWAP32, "__builtin_bswap32", "bswap32", + // build_function_type_list (uint32_type_node, + // uint32_type_node, NULL_TREE), + // builtin_const); + // this->define_builtin (BUILT_IN_BSWAP64, "__builtin_bswap64", "bswap64", + // build_function_type_list (uint64_type_node, + // uint64_type_node, NULL_TREE), + // builtin_const); + + // We provide some functions for the math library. + + // We use __builtin_return_address in the thunk we build for + // functions which call recover, and for runtime.getcallerpc. + // t = build_function_type_list (ptr_type_node, unsigned_type_node, + // NULL_TREE); this->define_builtin (BUILT_IN_RETURN_ADDRESS, + // "__builtin_return_address", + // NULL, t, 0); + + // The runtime calls __builtin_dwarf_cfa for runtime.getcallersp. + // t = build_function_type_list (ptr_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_DWARF_CFA, "__builtin_dwarf_cfa", NULL, t, + // 0); + + // The runtime calls __builtin_extract_return_addr when recording + // the address to which a function returns. + // this->define_builtin ( + // BUILT_IN_EXTRACT_RETURN_ADDR, "__builtin_extract_return_addr", NULL, + // build_function_type_list (ptr_type_node, ptr_type_node, NULL_TREE), 0); + + // The compiler uses __builtin_trap for some exception handling + // cases. + // this->define_builtin (BUILT_IN_TRAP, "__builtin_trap", NULL, + // build_function_type (void_type_node, void_list_node), + // builtin_noreturn); + + // The runtime uses __builtin_prefetch. + // this->define_builtin (BUILT_IN_PREFETCH, "__builtin_prefetch", NULL, + // build_varargs_function_type_list (void_type_node, + // const_ptr_type_node, + // NULL_TREE), + // builtin_novops); + + // The compiler uses __builtin_unreachable for cases that cannot + // occur. + // this->define_builtin (BUILT_IN_UNREACHABLE, "__builtin_unreachable", NULL, + // build_function_type (void_type_node, void_list_node), + // builtin_const | builtin_noreturn); + + // We provide some atomic functions. + // t = build_function_type_list (uint32_type_node, ptr_type_node, + // integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_LOAD_4, "__atomic_load_4", NULL, t, + // 0); + + // t = build_function_type_list (uint64_type_node, ptr_type_node, + // integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_LOAD_8, "__atomic_load_8", NULL, t, + // 0); + + // t = build_function_type_list (void_type_node, ptr_type_node, + // uint32_type_node, + // integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_STORE_4, "__atomic_store_4", NULL, t, + // 0); + + // t = build_function_type_list (void_type_node, ptr_type_node, + // uint64_type_node, + // integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_STORE_8, "__atomic_store_8", NULL, t, + // 0); + + // t = build_function_type_list (uint32_type_node, ptr_type_node, + // uint32_type_node, integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_EXCHANGE_4, "__atomic_exchange_4", + // NULL, + // t, 0); + + // t = build_function_type_list (uint64_type_node, ptr_type_node, + // uint64_type_node, integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_EXCHANGE_8, "__atomic_exchange_8", + // NULL, + // t, 0); + + // t = build_function_type_list (boolean_type_node, ptr_type_node, + // ptr_type_node, + // uint32_type_node, boolean_type_node, + // integer_type_node, integer_type_node, + // NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4, + // "__atomic_compare_exchange_4", NULL, t, 0); + + // t = build_function_type_list (boolean_type_node, ptr_type_node, + // ptr_type_node, + // uint64_type_node, boolean_type_node, + // integer_type_node, integer_type_node, + // NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8, + // "__atomic_compare_exchange_8", NULL, t, 0); + + // t = build_function_type_list (uint32_type_node, ptr_type_node, + // uint32_type_node, integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_ADD_FETCH_4, "__atomic_add_fetch_4", + // NULL, t, 0); + + // t = build_function_type_list (uint64_type_node, ptr_type_node, + // uint64_type_node, integer_type_node, NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_ADD_FETCH_8, "__atomic_add_fetch_8", + // NULL, t, 0); + + // t = build_function_type_list (unsigned_char_type_node, ptr_type_node, + // unsigned_char_type_node, integer_type_node, + // NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_AND_FETCH_1, "__atomic_and_fetch_1", + // NULL, t, 0); + // this->define_builtin (BUILT_IN_ATOMIC_FETCH_AND_1, "__atomic_fetch_and_1", + // NULL, t, 0); + + // t = build_function_type_list (unsigned_char_type_node, ptr_type_node, + // unsigned_char_type_node, integer_type_node, + // NULL_TREE); + // this->define_builtin (BUILT_IN_ATOMIC_OR_FETCH_1, "__atomic_or_fetch_1", + // NULL, + // t, 0); + // this->define_builtin (BUILT_IN_ATOMIC_FETCH_OR_1, "__atomic_fetch_or_1", + // NULL, + // t, 0); +} + +// Get an unnamed integer type. + +int +Gcc_backend::get_pointer_size () +{ + return POINTER_SIZE; +} + +tree +Gcc_backend::raw_str_type () +{ + tree char_ptr = build_pointer_type (char_type_node); + tree const_char_type = build_qualified_type (char_ptr, TYPE_QUAL_CONST); + return const_char_type; +} + +tree +Gcc_backend::integer_type (bool is_unsigned, int bits) +{ + tree type; + if (is_unsigned) + { + if (bits == INT_TYPE_SIZE) + type = unsigned_type_node; + else if (bits == SHORT_TYPE_SIZE) + type = short_unsigned_type_node; + else if (bits == LONG_TYPE_SIZE) + type = long_unsigned_type_node; + else if (bits == LONG_LONG_TYPE_SIZE) + type = long_long_unsigned_type_node; + else + type = make_unsigned_type (bits); + } + else + { + if (bits == INT_TYPE_SIZE) + type = integer_type_node; + else if (bits == SHORT_TYPE_SIZE) + type = short_integer_type_node; + else if (bits == LONG_TYPE_SIZE) + type = long_integer_type_node; + else if (bits == LONG_LONG_TYPE_SIZE) + type = long_long_integer_type_node; + else + type = make_signed_type (bits); + } + return type; +} + +// Get an unnamed float type. + +tree +Gcc_backend::float_type (int bits) +{ + tree type; + if (bits == FLOAT_TYPE_SIZE) + type = float_type_node; + else if (bits == DOUBLE_TYPE_SIZE) + type = double_type_node; + else if (bits == LONG_DOUBLE_TYPE_SIZE) + type = long_double_type_node; + else + { + type = make_node (REAL_TYPE); + TYPE_PRECISION (type) = bits; + layout_type (type); + } + return type; +} + +// Get an unnamed complex type. + +tree +Gcc_backend::complex_type (int bits) +{ + tree type; + if (bits == FLOAT_TYPE_SIZE * 2) + type = complex_float_type_node; + else if (bits == DOUBLE_TYPE_SIZE * 2) + type = complex_double_type_node; + else if (bits == LONG_DOUBLE_TYPE_SIZE * 2) + type = complex_long_double_type_node; + else + { + type = make_node (REAL_TYPE); + TYPE_PRECISION (type) = bits / 2; + layout_type (type); + type = build_complex_type (type); + } + return type; +} + +// Get a pointer type. + +tree +Gcc_backend::pointer_type (tree to_type) +{ + if (to_type == error_mark_node) + return error_mark_node; + tree type = build_pointer_type (to_type); + return type; +} + +// Get a reference type. + +tree +Gcc_backend::reference_type (tree to_type) +{ + if (to_type == error_mark_node) + return error_mark_node; + tree type = build_reference_type (to_type); + return type; +} + +// Get immutable type + +tree +Gcc_backend::immutable_type (tree base) +{ + if (base == error_mark_node) + return error_mark_node; + tree constified = build_qualified_type (base, TYPE_QUAL_CONST); + return constified; +} + +// Make a function type. + +tree +Gcc_backend::function_type (const typed_identifier &receiver, + const std::vector ¶meters, + const std::vector &results, + tree result_struct, Location) +{ + tree args = NULL_TREE; + tree *pp = &args; + if (receiver.type != NULL_TREE) + { + tree t = receiver.type; + if (t == error_mark_node) + return error_mark_node; + *pp = tree_cons (NULL_TREE, t, NULL_TREE); + pp = &TREE_CHAIN (*pp); + } + + for (std::vector::const_iterator p = parameters.begin (); + p != parameters.end (); ++p) + { + tree t = p->type; + if (t == error_mark_node) + return error_mark_node; + *pp = tree_cons (NULL_TREE, t, NULL_TREE); + pp = &TREE_CHAIN (*pp); + } + + // Varargs is handled entirely at the Rust level. When converted to + // GENERIC functions are not varargs. + *pp = void_list_node; + + tree result; + if (results.empty ()) + result = void_type_node; + else if (results.size () == 1) + result = results.front ().type; + else + { + gcc_assert (result_struct != NULL); + result = result_struct; + } + if (result == error_mark_node) + return error_mark_node; + + // The libffi library cannot represent a zero-sized object. To + // avoid causing confusion on 32-bit SPARC, we treat a function that + // returns a zero-sized value as returning void. That should do no + // harm since there is no actual value to be returned. See + // https://gcc.gnu.org/PR72814 for details. + if (result != void_type_node && int_size_in_bytes (result) == 0) + result = void_type_node; + + tree fntype = build_function_type (result, args); + if (fntype == error_mark_node) + return error_mark_node; + + return build_pointer_type (fntype); +} + +tree +Gcc_backend::function_type_varadic ( + const typed_identifier &receiver, + const std::vector ¶meters, + const std::vector &results, tree result_struct, Location) +{ + size_t n = parameters.size () + (receiver.type != NULL_TREE ? 1 : 0); + tree *args = XALLOCAVEC (tree, n); + size_t offs = 0; + + if (receiver.type != NULL_TREE) + { + tree t = receiver.type; + if (t == error_mark_node) + return error_mark_node; + + args[offs++] = t; + } + + for (std::vector::const_iterator p = parameters.begin (); + p != parameters.end (); ++p) + { + tree t = p->type; + if (t == error_mark_node) + return error_mark_node; + args[offs++] = t; + } + + tree result; + if (results.empty ()) + result = void_type_node; + else if (results.size () == 1) + result = results.front ().type; + else + { + gcc_assert (result_struct != NULL_TREE); + result = result_struct; + } + if (result == error_mark_node) + return error_mark_node; + + // The libffi library cannot represent a zero-sized object. To + // avoid causing confusion on 32-bit SPARC, we treat a function that + // returns a zero-sized value as returning void. That should do no + // harm since there is no actual value to be returned. See + // https://gcc.gnu.org/PR72814 for details. + if (result != void_type_node && int_size_in_bytes (result) == 0) + result = void_type_node; + + tree fntype = build_varargs_function_type_array (result, n, args); + if (fntype == error_mark_node) + return error_mark_node; + + return build_pointer_type (fntype); +} + +tree +Gcc_backend::function_ptr_type (tree result_type, + const std::vector ¶meters, + Location /* locus */) +{ + tree args = NULL_TREE; + tree *pp = &args; + + for (auto ¶m : parameters) + { + if (param == error_mark_node) + return error_mark_node; + + *pp = tree_cons (NULL_TREE, param, NULL_TREE); + pp = &TREE_CHAIN (*pp); + } + + *pp = void_list_node; + + tree result = result_type; + if (result != void_type_node && int_size_in_bytes (result) == 0) + result = void_type_node; + + tree fntype = build_function_type (result, args); + if (fntype == error_mark_node) + return error_mark_node; + + return build_pointer_type (fntype); +} + +// Make a struct type. + +tree +Gcc_backend::struct_type (const std::vector &fields) +{ + return this->fill_in_fields (make_node (RECORD_TYPE), fields); +} + +// Make a union type. + +tree +Gcc_backend::union_type (const std::vector &fields) +{ + return this->fill_in_fields (make_node (UNION_TYPE), fields); +} + +// Fill in the fields of a struct or union type. + +tree +Gcc_backend::fill_in_fields (tree fill, + const std::vector &fields) +{ + tree field_trees = NULL_TREE; + tree *pp = &field_trees; + for (std::vector::const_iterator p = fields.begin (); + p != fields.end (); ++p) + { + tree name_tree = get_identifier_from_string (p->name); + tree type_tree = p->type; + if (type_tree == error_mark_node) + return error_mark_node; + tree field = build_decl (p->location.gcc_location (), FIELD_DECL, + name_tree, type_tree); + DECL_CONTEXT (field) = fill; + *pp = field; + pp = &DECL_CHAIN (field); + } + TYPE_FIELDS (fill) = field_trees; + layout_type (fill); + + // Because Rust permits converting between named struct types and + // equivalent struct types, for which we use VIEW_CONVERT_EXPR, and + // because we don't try to maintain TYPE_CANONICAL for struct types, + // we need to tell the middle-end to use structural equality. + SET_TYPE_STRUCTURAL_EQUALITY (fill); + + return fill; +} + +// Make an array type. + +tree +Gcc_backend::array_type (tree element_type, tree length) +{ + return this->fill_in_array (make_node (ARRAY_TYPE), element_type, length); +} + +// Fill in an array type. + +tree +Gcc_backend::fill_in_array (tree fill, tree element_type, tree length_tree) +{ + if (element_type == error_mark_node || length_tree == error_mark_node) + return error_mark_node; + + gcc_assert (TYPE_SIZE (element_type) != NULL_TREE); + + length_tree = fold_convert (sizetype, length_tree); + + // build_index_type takes the maximum index, which is one less than + // the length. + tree index_type_tree = build_index_type ( + fold_build2 (MINUS_EXPR, sizetype, length_tree, size_one_node)); + + TREE_TYPE (fill) = element_type; + TYPE_DOMAIN (fill) = index_type_tree; + TYPE_ADDR_SPACE (fill) = TYPE_ADDR_SPACE (element_type); + layout_type (fill); + + if (TYPE_STRUCTURAL_EQUALITY_P (element_type)) + SET_TYPE_STRUCTURAL_EQUALITY (fill); + else if (TYPE_CANONICAL (element_type) != element_type + || TYPE_CANONICAL (index_type_tree) != index_type_tree) + TYPE_CANONICAL (fill) = build_array_type (TYPE_CANONICAL (element_type), + TYPE_CANONICAL (index_type_tree)); + + return fill; +} + +// Return a named version of a type. + +tree +Gcc_backend::named_type (const std::string &name, tree type, Location location) +{ + if (type == error_mark_node) + return error_mark_node; + + // The middle-end expects a basic type to have a name. In Rust every + // basic type will have a name. The first time we see a basic type, + // give it whatever Rust name we have at this point. + if (TYPE_NAME (type) == NULL_TREE + && location.gcc_location () == BUILTINS_LOCATION + && (TREE_CODE (type) == INTEGER_TYPE || TREE_CODE (type) == REAL_TYPE + || TREE_CODE (type) == COMPLEX_TYPE + || TREE_CODE (type) == BOOLEAN_TYPE)) + { + tree decl = build_decl (BUILTINS_LOCATION, TYPE_DECL, + get_identifier_from_string (name), type); + TYPE_NAME (type) = decl; + return type; + } + + tree copy = build_variant_type_copy (type); + tree decl = build_decl (location.gcc_location (), TYPE_DECL, + get_identifier_from_string (name), copy); + DECL_ORIGINAL_TYPE (decl) = type; + TYPE_NAME (copy) = decl; + return copy; +} + +// Return the size of a type. + +int64_t +Gcc_backend::type_size (tree t) +{ + if (t == error_mark_node) + return 1; + if (t == void_type_node) + return 0; + t = TYPE_SIZE_UNIT (t); + gcc_assert (tree_fits_uhwi_p (t)); + unsigned HOST_WIDE_INT val_wide = TREE_INT_CST_LOW (t); + int64_t ret = static_cast (val_wide); + if (ret < 0 || static_cast (ret) != val_wide) + return -1; + return ret; +} + +// Return the alignment of a type. + +int64_t +Gcc_backend::type_alignment (tree t) +{ + if (t == error_mark_node) + return 1; + return TYPE_ALIGN_UNIT (t); +} + +// Return the alignment of a struct field of type BTYPE. + +int64_t +Gcc_backend::type_field_alignment (tree t) +{ + if (t == error_mark_node) + return 1; + return rust_field_alignment (t); +} + +// Return the offset of a field in a struct. + +int64_t +Gcc_backend::type_field_offset (tree struct_tree, size_t index) +{ + if (struct_tree == error_mark_node) + return 0; + gcc_assert (TREE_CODE (struct_tree) == RECORD_TYPE); + tree field = TYPE_FIELDS (struct_tree); + for (; index > 0; --index) + { + field = DECL_CHAIN (field); + gcc_assert (field != NULL_TREE); + } + HOST_WIDE_INT offset_wide = int_byte_position (field); + int64_t ret = static_cast (offset_wide); + gcc_assert (ret == offset_wide); + return ret; +} + +// Return the zero value for a type. + +tree +Gcc_backend::zero_expression (tree t) +{ + tree ret; + if (t == error_mark_node) + ret = error_mark_node; + else + ret = build_zero_cst (t); + return ret; +} + +// An expression that references a variable. + +tree +Gcc_backend::var_expression (Bvariable *var, Location location) +{ + return var->get_tree (location); +} + +// Return a typed value as a constant integer. + +tree +Gcc_backend::integer_constant_expression (tree t, mpz_t val) +{ + if (t == error_mark_node) + return error_mark_node; + + tree ret = double_int_to_tree (t, mpz_get_double_int (t, val, true)); + return ret; +} + +// Return a typed value as a constant floating-point number. + +tree +Gcc_backend::float_constant_expression (tree t, mpfr_t val) +{ + tree ret; + if (t == error_mark_node) + return error_mark_node; + + REAL_VALUE_TYPE r1; + real_from_mpfr (&r1, val, t, GMP_RNDN); + REAL_VALUE_TYPE r2; + real_convert (&r2, TYPE_MODE (t), &r1); + ret = build_real (t, r2); + return ret; +} + +// Return a typed real and imaginary value as a constant complex number. + +tree +Gcc_backend::complex_constant_expression (tree t, mpc_t val) +{ + tree ret; + if (t == error_mark_node) + return error_mark_node; + + REAL_VALUE_TYPE r1; + real_from_mpfr (&r1, mpc_realref (val), TREE_TYPE (t), GMP_RNDN); + REAL_VALUE_TYPE r2; + real_convert (&r2, TYPE_MODE (TREE_TYPE (t)), &r1); + + REAL_VALUE_TYPE r3; + real_from_mpfr (&r3, mpc_imagref (val), TREE_TYPE (t), GMP_RNDN); + REAL_VALUE_TYPE r4; + real_convert (&r4, TYPE_MODE (TREE_TYPE (t)), &r3); + + ret = build_complex (t, build_real (TREE_TYPE (t), r2), + build_real (TREE_TYPE (t), r4)); + return ret; +} + +// Make a constant string expression. + +tree +Gcc_backend::string_constant_expression (const std::string &val) +{ + tree index_type = build_index_type (size_int (val.length ())); + tree const_char_type = build_qualified_type (char_type_node, TYPE_QUAL_CONST); + tree string_type = build_array_type (const_char_type, index_type); + TYPE_STRING_FLAG (string_type) = 1; + tree string_val = build_string (val.length (), val.data ()); + TREE_TYPE (string_val) = string_type; + + return string_val; +} + +tree +Gcc_backend::wchar_constant_expression (wchar_t c) +{ + return build_int_cst (this->wchar_type (), c); +} + +tree +Gcc_backend::char_constant_expression (char c) +{ + return build_int_cst (this->char_type (), c); +} + +// Make a constant boolean expression. + +tree +Gcc_backend::boolean_constant_expression (bool val) +{ + return val ? boolean_true_node : boolean_false_node; +} + +// Return the real part of a complex expression. + +tree +Gcc_backend::real_part_expression (tree complex_tree, Location location) +{ + if (complex_tree == error_mark_node) + return error_mark_node; + gcc_assert (COMPLEX_FLOAT_TYPE_P (TREE_TYPE (complex_tree))); + tree ret + = fold_build1_loc (location.gcc_location (), REALPART_EXPR, + TREE_TYPE (TREE_TYPE (complex_tree)), complex_tree); + return ret; +} + +// Return the imaginary part of a complex expression. + +tree +Gcc_backend::imag_part_expression (tree complex_tree, Location location) +{ + if (complex_tree == error_mark_node) + return error_mark_node; + gcc_assert (COMPLEX_FLOAT_TYPE_P (TREE_TYPE (complex_tree))); + tree ret + = fold_build1_loc (location.gcc_location (), IMAGPART_EXPR, + TREE_TYPE (TREE_TYPE (complex_tree)), complex_tree); + return ret; +} + +// Make a complex expression given its real and imaginary parts. + +tree +Gcc_backend::complex_expression (tree real_tree, tree imag_tree, + Location location) +{ + if (real_tree == error_mark_node || imag_tree == error_mark_node) + return error_mark_node; + gcc_assert (TYPE_MAIN_VARIANT (TREE_TYPE (real_tree)) + == TYPE_MAIN_VARIANT (TREE_TYPE (imag_tree))); + gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (real_tree))); + tree ret = fold_build2_loc (location.gcc_location (), COMPLEX_EXPR, + build_complex_type (TREE_TYPE (real_tree)), + real_tree, imag_tree); + return ret; +} + +// An expression that converts an expression to a different type. + +tree +Gcc_backend::convert_expression (tree type_tree, tree expr_tree, + Location location) +{ + if (type_tree == error_mark_node || expr_tree == error_mark_node + || TREE_TYPE (expr_tree) == error_mark_node) + return error_mark_node; + + tree ret; + if (this->type_size (type_tree) == 0 + || TREE_TYPE (expr_tree) == void_type_node) + { + // Do not convert zero-sized types. + ret = expr_tree; + } + else if (TREE_CODE (type_tree) == INTEGER_TYPE) + ret = fold (convert_to_integer (type_tree, expr_tree)); + else if (TREE_CODE (type_tree) == REAL_TYPE) + ret = fold (convert_to_real (type_tree, expr_tree)); + else if (TREE_CODE (type_tree) == COMPLEX_TYPE) + ret = fold (convert_to_complex (type_tree, expr_tree)); + else if (TREE_CODE (type_tree) == POINTER_TYPE + && TREE_CODE (TREE_TYPE (expr_tree)) == INTEGER_TYPE) + ret = fold (convert_to_pointer (type_tree, expr_tree)); + else if (TREE_CODE (type_tree) == RECORD_TYPE + || TREE_CODE (type_tree) == ARRAY_TYPE) + ret = fold_build1_loc (location.gcc_location (), VIEW_CONVERT_EXPR, + type_tree, expr_tree); + else + ret = fold_convert_loc (location.gcc_location (), type_tree, expr_tree); + + return ret; +} + +// Return an expression for the field at INDEX in BSTRUCT. + +tree +Gcc_backend::struct_field_expression (tree struct_tree, size_t index, + Location location) +{ + if (struct_tree == error_mark_node + || TREE_TYPE (struct_tree) == error_mark_node) + return error_mark_node; + gcc_assert (TREE_CODE (TREE_TYPE (struct_tree)) == RECORD_TYPE + || TREE_CODE (TREE_TYPE (struct_tree)) == UNION_TYPE); + tree field = TYPE_FIELDS (TREE_TYPE (struct_tree)); + if (field == NULL_TREE) + { + // This can happen for a type which refers to itself indirectly + // and then turns out to be erroneous. + return error_mark_node; + } + for (unsigned int i = index; i > 0; --i) + { + field = DECL_CHAIN (field); + gcc_assert (field != NULL_TREE); + } + if (TREE_TYPE (field) == error_mark_node) + return error_mark_node; + tree ret = fold_build3_loc (location.gcc_location (), COMPONENT_REF, + TREE_TYPE (field), struct_tree, field, NULL_TREE); + if (TREE_CONSTANT (struct_tree)) + TREE_CONSTANT (ret) = 1; + return ret; +} + +// Return an expression that executes BSTAT before BEXPR. + +tree +Gcc_backend::compound_expression (tree stat, tree expr, Location location) +{ + if (stat == error_mark_node || expr == error_mark_node) + return error_mark_node; + tree ret = fold_build2_loc (location.gcc_location (), COMPOUND_EXPR, + TREE_TYPE (expr), stat, expr); + return ret; +} + +// Return an expression that executes THEN_EXPR if CONDITION is true, or +// ELSE_EXPR otherwise. + +tree +Gcc_backend::conditional_expression (tree, tree type_tree, tree cond_expr, + tree then_expr, tree else_expr, + Location location) +{ + if (type_tree == error_mark_node || cond_expr == error_mark_node + || then_expr == error_mark_node || else_expr == error_mark_node) + return error_mark_node; + tree ret = build3_loc (location.gcc_location (), COND_EXPR, type_tree, + cond_expr, then_expr, else_expr); + return ret; +} + +/* Helper function that converts rust operators to equivalent GCC tree_code. + Note that CompoundAssignmentOperator don't get their corresponding tree_code, + because they get compiled away when we lower AST to HIR. */ +static enum tree_code +operator_to_tree_code (NegationOperator op) +{ + switch (op) + { + case NegationOperator::NEGATE: + return NEGATE_EXPR; + case NegationOperator::NOT: + return TRUTH_NOT_EXPR; + default: + gcc_unreachable (); + } +} + +/* Note that GCC tree code distinguishes floating point division and integer + division. These two types of division are represented as the same rust + operator, and can only be distinguished via context(i.e. the TREE_TYPE of the + operands). */ +static enum tree_code +operator_to_tree_code (ArithmeticOrLogicalOperator op, bool floating_point) +{ + switch (op) + { + case ArithmeticOrLogicalOperator::ADD: + return PLUS_EXPR; + case ArithmeticOrLogicalOperator::SUBTRACT: + return MINUS_EXPR; + case ArithmeticOrLogicalOperator::MULTIPLY: + return MULT_EXPR; + case ArithmeticOrLogicalOperator::DIVIDE: + if (floating_point) + return RDIV_EXPR; + else + return TRUNC_DIV_EXPR; + case ArithmeticOrLogicalOperator::MODULUS: + return TRUNC_MOD_EXPR; + case ArithmeticOrLogicalOperator::BITWISE_AND: + return BIT_AND_EXPR; + case ArithmeticOrLogicalOperator::BITWISE_OR: + return BIT_IOR_EXPR; + case ArithmeticOrLogicalOperator::BITWISE_XOR: + return BIT_XOR_EXPR; + case ArithmeticOrLogicalOperator::LEFT_SHIFT: + return LSHIFT_EXPR; + case ArithmeticOrLogicalOperator::RIGHT_SHIFT: + return RSHIFT_EXPR; + default: + gcc_unreachable (); + } +} + +static enum tree_code +operator_to_tree_code (ComparisonOperator op) +{ + switch (op) + { + case ComparisonOperator::EQUAL: + return EQ_EXPR; + case ComparisonOperator::NOT_EQUAL: + return NE_EXPR; + case ComparisonOperator::GREATER_THAN: + return GT_EXPR; + case ComparisonOperator::LESS_THAN: + return LT_EXPR; + case ComparisonOperator::GREATER_OR_EQUAL: + return GE_EXPR; + case ComparisonOperator::LESS_OR_EQUAL: + return LE_EXPR; + default: + gcc_unreachable (); + } +} + +static enum tree_code +operator_to_tree_code (LazyBooleanOperator op) +{ + switch (op) + { + case LazyBooleanOperator::LOGICAL_OR: + return TRUTH_ORIF_EXPR; + case LazyBooleanOperator::LOGICAL_AND: + return TRUTH_ANDIF_EXPR; + default: + gcc_unreachable (); + } +} + +/* Helper function for deciding if a tree is a floating point node. */ +bool +is_floating_point (tree t) +{ + auto tree_type = TREE_CODE (TREE_TYPE (t)); + return tree_type == REAL_TYPE || tree_type == COMPLEX_TYPE; +} + +// Return an expression for the negation operation OP EXPR. +tree +Gcc_backend::negation_expression (NegationOperator op, tree expr_tree, + Location location) +{ + /* Check if the expression is an error, in which case we return an error + expression. */ + if (expr_tree == error_mark_node || TREE_TYPE (expr_tree) == error_mark_node) + return error_mark_node; + + /* For negation operators, the resulting type should be the same as its + operand. */ + auto tree_type = TREE_TYPE (expr_tree); + auto original_type = tree_type; + auto tree_code = operator_to_tree_code (op); + + /* For floating point operations we may need to extend the precision of type. + For example, a 64-bit machine may not support operations on float32. */ + bool floating_point = is_floating_point (expr_tree); + auto extended_type = NULL_TREE; + if (floating_point) + { + extended_type = excess_precision_type (tree_type); + if (extended_type != NULL_TREE) + { + expr_tree = convert (extended_type, expr_tree); + tree_type = extended_type; + } + } + + /* Construct a new tree and build an expression from it. */ + auto new_tree = fold_build1_loc (location.gcc_location (), tree_code, + tree_type, expr_tree); + if (floating_point && extended_type != NULL_TREE) + new_tree = convert (original_type, expr_tree); + return new_tree; +} + +// Return an expression for the arithmetic or logical operation LEFT OP RIGHT. +tree +Gcc_backend::arithmetic_or_logical_expression (ArithmeticOrLogicalOperator op, + tree left_tree, tree right_tree, + Location location) +{ + /* Check if either expression is an error, in which case we return an error + expression. */ + if (left_tree == error_mark_node || right_tree == error_mark_node) + return error_mark_node; + + /* We need to determine if we're doing floating point arithmetics of integer + arithmetics. */ + bool floating_point = is_floating_point (left_tree); + + /* For arithmetic or logical operators, the resulting type should be the same + as the lhs operand. */ + auto tree_type = TREE_TYPE (left_tree); + auto original_type = tree_type; + auto tree_code = operator_to_tree_code (op, floating_point); + + /* For floating point operations we may need to extend the precision of type. + For example, a 64-bit machine may not support operations on float32. */ + auto extended_type = NULL_TREE; + if (floating_point) + { + extended_type = excess_precision_type (tree_type); + if (extended_type != NULL_TREE) + { + left_tree = convert (extended_type, left_tree); + right_tree = convert (extended_type, right_tree); + tree_type = extended_type; + } + } + + /* Construct a new tree and build an expression from it. */ + auto new_tree = fold_build2_loc (location.gcc_location (), tree_code, + tree_type, left_tree, right_tree); + TREE_CONSTANT (new_tree) + = TREE_CONSTANT (left_tree) && TREE_CONSTANT (right_tree); + + if (floating_point && extended_type != NULL_TREE) + new_tree = convert (original_type, new_tree); + return new_tree; +} + +// Return an expression for the comparison operation LEFT OP RIGHT. +tree +Gcc_backend::comparison_expression (ComparisonOperator op, tree left_tree, + tree right_tree, Location location) +{ + /* Check if either expression is an error, in which case we return an error + expression. */ + if (left_tree == error_mark_node || right_tree == error_mark_node) + return error_mark_node; + + /* For comparison operators, the resulting type should be boolean. */ + auto tree_type = boolean_type_node; + auto tree_code = operator_to_tree_code (op); + + /* Construct a new tree and build an expression from it. */ + auto new_tree = fold_build2_loc (location.gcc_location (), tree_code, + tree_type, left_tree, right_tree); + return new_tree; +} + +// Return an expression for the lazy boolean operation LEFT OP RIGHT. +tree +Gcc_backend::lazy_boolean_expression (LazyBooleanOperator op, tree left_tree, + tree right_tree, Location location) +{ + /* Check if either expression is an error, in which case we return an error + expression. */ + if (left_tree == error_mark_node || right_tree == error_mark_node) + return error_mark_node; + + /* For lazy boolean operators, the resulting type should be the same as the + rhs operand. */ + auto tree_type = TREE_TYPE (right_tree); + auto tree_code = operator_to_tree_code (op); + + /* Construct a new tree and build an expression from it. */ + auto new_tree = fold_build2_loc (location.gcc_location (), tree_code, + tree_type, left_tree, right_tree); + return new_tree; +} + +// Return an expression that constructs BTYPE with VALS. + +tree +Gcc_backend::constructor_expression (tree type_tree, bool is_variant, + const std::vector &vals, + int union_index, Location location) +{ + if (type_tree == error_mark_node) + return error_mark_node; + + vec *init; + vec_alloc (init, vals.size ()); + + tree sink = NULL_TREE; + bool is_constant = true; + tree field = TYPE_FIELDS (type_tree); + + if (is_variant) + { + gcc_assert (union_index != -1); + gcc_assert (TREE_CODE (type_tree) == UNION_TYPE); + + for (int i = 0; i < union_index; i++) + { + gcc_assert (field != NULL_TREE); + field = DECL_CHAIN (field); + } + + tree nested_ctor + = constructor_expression (TREE_TYPE (field), false, vals, -1, location); + + constructor_elt empty = {NULL, NULL}; + constructor_elt *elt = init->quick_push (empty); + elt->index = field; + elt->value + = this->convert_tree (TREE_TYPE (field), nested_ctor, location); + if (!TREE_CONSTANT (elt->value)) + is_constant = false; + } + else + { + if (union_index != -1) + { + gcc_assert (TREE_CODE (type_tree) == UNION_TYPE); + tree val = vals.front (); + for (int i = 0; i < union_index; i++) + { + gcc_assert (field != NULL_TREE); + field = DECL_CHAIN (field); + } + if (TREE_TYPE (field) == error_mark_node || val == error_mark_node + || TREE_TYPE (val) == error_mark_node) + return error_mark_node; + + if (int_size_in_bytes (TREE_TYPE (field)) == 0) + { + // GIMPLE cannot represent indices of zero-sized types so + // trying to construct a map with zero-sized keys might lead + // to errors. Instead, we evaluate each expression that + // would have been added as a map element for its + // side-effects and construct an empty map. + append_to_statement_list (val, &sink); + } + else + { + constructor_elt empty = {NULL, NULL}; + constructor_elt *elt = init->quick_push (empty); + elt->index = field; + elt->value + = this->convert_tree (TREE_TYPE (field), val, location); + if (!TREE_CONSTANT (elt->value)) + is_constant = false; + } + } + else + { + gcc_assert (TREE_CODE (type_tree) == RECORD_TYPE); + for (std::vector::const_iterator p = vals.begin (); + p != vals.end (); ++p, field = DECL_CHAIN (field)) + { + gcc_assert (field != NULL_TREE); + tree val = (*p); + if (TREE_TYPE (field) == error_mark_node || val == error_mark_node + || TREE_TYPE (val) == error_mark_node) + return error_mark_node; + + if (int_size_in_bytes (TREE_TYPE (field)) == 0) + { + // GIMPLE cannot represent indices of zero-sized types so + // trying to construct a map with zero-sized keys might lead + // to errors. Instead, we evaluate each expression that + // would have been added as a map element for its + // side-effects and construct an empty map. + append_to_statement_list (val, &sink); + continue; + } + + constructor_elt empty = {NULL, NULL}; + constructor_elt *elt = init->quick_push (empty); + elt->index = field; + elt->value + = this->convert_tree (TREE_TYPE (field), val, location); + if (!TREE_CONSTANT (elt->value)) + is_constant = false; + } + gcc_assert (field == NULL_TREE); + } + } + + tree ret = build_constructor (type_tree, init); + if (is_constant) + TREE_CONSTANT (ret) = 1; + if (sink != NULL_TREE) + ret = fold_build2_loc (location.gcc_location (), COMPOUND_EXPR, type_tree, + sink, ret); + return ret; +} + +tree +Gcc_backend::array_constructor_expression ( + tree type_tree, const std::vector &indexes, + const std::vector &vals, Location location) +{ + if (type_tree == error_mark_node) + return error_mark_node; + + gcc_assert (indexes.size () == vals.size ()); + + tree element_type = TREE_TYPE (type_tree); + HOST_WIDE_INT element_size = int_size_in_bytes (element_type); + vec *init; + vec_alloc (init, element_size == 0 ? 0 : vals.size ()); + + tree sink = NULL_TREE; + bool is_constant = true; + for (size_t i = 0; i < vals.size (); ++i) + { + tree index = size_int (indexes[i]); + tree val = vals[i]; + + if (index == error_mark_node || val == error_mark_node) + return error_mark_node; + + if (element_size == 0) + { + // GIMPLE cannot represent arrays of zero-sized types so trying + // to construct an array of zero-sized values might lead to errors. + // Instead, we evaluate each expression that would have been added as + // an array value for its side-effects and construct an empty array. + append_to_statement_list (val, &sink); + continue; + } + + if (!TREE_CONSTANT (val)) + is_constant = false; + + constructor_elt empty = {NULL, NULL}; + constructor_elt *elt = init->quick_push (empty); + elt->index = index; + elt->value = val; + } + + tree ret = build_constructor (type_tree, init); + if (is_constant) + TREE_CONSTANT (ret) = 1; + if (sink != NULL_TREE) + ret = fold_build2_loc (location.gcc_location (), COMPOUND_EXPR, type_tree, + sink, ret); + return ret; +} + +// Build insns to create an array, initialize all elements of the array to +// value, and return it +tree +Gcc_backend::array_initializer (tree fndecl, tree block, tree array_type, + tree length, tree value, tree *tmp, + Location locus) +{ + std::vector stmts; + + // Temporary array we initialize with the desired value. + tree t = NULL_TREE; + Bvariable *tmp_array = this->temporary_variable (fndecl, block, array_type, + NULL_TREE, true, locus, &t); + tree arr = tmp_array->get_tree (locus); + stmts.push_back (t); + + // Temporary for the array length used for initialization loop guard. + Bvariable *tmp_len = this->temporary_variable (fndecl, block, size_type_node, + length, true, locus, &t); + tree len = tmp_len->get_tree (locus); + stmts.push_back (t); + + // Temporary variable for pointer used to initialize elements. + tree ptr_type = this->pointer_type (TREE_TYPE (array_type)); + tree ptr_init + = build1_loc (locus.gcc_location (), ADDR_EXPR, ptr_type, + this->array_index_expression (arr, integer_zero_node, locus)); + Bvariable *tmp_ptr = this->temporary_variable (fndecl, block, ptr_type, + ptr_init, false, locus, &t); + tree ptr = tmp_ptr->get_tree (locus); + stmts.push_back (t); + + // push statement list for the loop + std::vector loop_stmts; + + // Loop exit condition: + // if (length == 0) break; + t = this->comparison_expression (ComparisonOperator::EQUAL, len, + this->zero_expression (TREE_TYPE (len)), + locus); + + t = this->exit_expression (t, locus); + loop_stmts.push_back (t); + + // Assign value to the current pointer position + // *ptr = value; + t = this->assignment_statement (build_fold_indirect_ref (ptr), value, locus); + loop_stmts.push_back (t); + + // Move pointer to next element + // ptr++; + tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptr_type)); + t = build2 (POSTINCREMENT_EXPR, ptr_type, ptr, convert (ptr_type, size)); + loop_stmts.push_back (t); + + // Decrement loop counter. + // length--; + t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (len), len, + convert (TREE_TYPE (len), integer_one_node)); + loop_stmts.push_back (t); + + // pop statments and finish loop + tree loop_body = this->statement_list (loop_stmts); + stmts.push_back (this->loop_expression (loop_body, locus)); + + // Return the temporary in the provided pointer and the statement list which + // initializes it. + *tmp = tmp_array->get_tree (locus); + return this->statement_list (stmts); +} + +// Return an expression representing ARRAY[INDEX] + +tree +Gcc_backend::array_index_expression (tree array_tree, tree index_tree, + Location location) +{ + if (array_tree == error_mark_node || TREE_TYPE (array_tree) == error_mark_node + || index_tree == error_mark_node) + return error_mark_node; + + // A function call that returns a zero sized object will have been + // changed to return void. If we see void here, assume we are + // dealing with a zero sized type and just evaluate the operands. + tree ret; + if (TREE_TYPE (array_tree) != void_type_node) + ret = build4_loc (location.gcc_location (), ARRAY_REF, + TREE_TYPE (TREE_TYPE (array_tree)), array_tree, + index_tree, NULL_TREE, NULL_TREE); + else + ret = fold_build2_loc (location.gcc_location (), COMPOUND_EXPR, + void_type_node, array_tree, index_tree); + + return ret; +} + +// Create an expression for a call to FN_EXPR with FN_ARGS. +tree +Gcc_backend::call_expression (tree fn, const std::vector &fn_args, + tree chain_expr, Location location) +{ + if (fn == error_mark_node || TREE_TYPE (fn) == error_mark_node) + return error_mark_node; + + gcc_assert (FUNCTION_POINTER_TYPE_P (TREE_TYPE (fn))); + tree rettype = TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))); + + size_t nargs = fn_args.size (); + tree *args = nargs == 0 ? NULL : new tree[nargs]; + for (size_t i = 0; i < nargs; ++i) + { + args[i] = fn_args.at (i); + } + + tree fndecl = fn; + if (TREE_CODE (fndecl) == ADDR_EXPR) + fndecl = TREE_OPERAND (fndecl, 0); + + // This is to support builtin math functions when using 80387 math. + tree excess_type = NULL_TREE; + if (optimize && TREE_CODE (fndecl) == FUNCTION_DECL + && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL) + && DECL_IS_UNDECLARED_BUILTIN (fndecl) && nargs > 0 + && ((SCALAR_FLOAT_TYPE_P (rettype) + && SCALAR_FLOAT_TYPE_P (TREE_TYPE (args[0]))) + || (COMPLEX_FLOAT_TYPE_P (rettype) + && COMPLEX_FLOAT_TYPE_P (TREE_TYPE (args[0]))))) + { + excess_type = excess_precision_type (TREE_TYPE (args[0])); + if (excess_type != NULL_TREE) + { + tree excess_fndecl + = mathfn_built_in (excess_type, DECL_FUNCTION_CODE (fndecl)); + if (excess_fndecl == NULL_TREE) + excess_type = NULL_TREE; + else + { + fn = build_fold_addr_expr_loc (location.gcc_location (), + excess_fndecl); + for (size_t i = 0; i < nargs; ++i) + { + if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (args[i])) + || COMPLEX_FLOAT_TYPE_P (TREE_TYPE (args[i]))) + args[i] = ::convert (excess_type, args[i]); + } + } + } + } + + tree ret + = build_call_array_loc (location.gcc_location (), + excess_type != NULL_TREE ? excess_type : rettype, + fn, nargs, args); + + // check for deprecated function usage + if (fndecl && TREE_DEPRECATED (fndecl)) + { + // set up the call-site information for `warn_deprecated_use` + input_location = location.gcc_location (); + warn_deprecated_use (fndecl, NULL_TREE); + } + + if (chain_expr) + CALL_EXPR_STATIC_CHAIN (ret) = chain_expr; + + if (excess_type != NULL_TREE) + { + // Calling convert here can undo our excess precision change. + // That may or may not be a bug in convert_to_real. + ret = build1_loc (location.gcc_location (), NOP_EXPR, rettype, ret); + } + + delete[] args; + return ret; +} + +// Variable initialization. + +tree +Gcc_backend::init_statement (tree, Bvariable *var, tree init_tree) +{ + tree var_tree = var->get_decl (); + if (var_tree == error_mark_node || init_tree == error_mark_node) + return error_mark_node; + gcc_assert (TREE_CODE (var_tree) == VAR_DECL); + + // To avoid problems with GNU ld, we don't make zero-sized + // externally visible variables. That might lead us to doing an + // initialization of a zero-sized expression to a non-zero sized + // variable, or vice-versa. Avoid crashes by omitting the + // initializer. Such initializations don't mean anything anyhow. + if (int_size_in_bytes (TREE_TYPE (var_tree)) != 0 && init_tree != NULL_TREE + && TREE_TYPE (init_tree) != void_type_node + && int_size_in_bytes (TREE_TYPE (init_tree)) != 0) + { + DECL_INITIAL (var_tree) = init_tree; + init_tree = NULL_TREE; + } + + tree ret = build1_loc (DECL_SOURCE_LOCATION (var_tree), DECL_EXPR, + void_type_node, var_tree); + if (init_tree != NULL_TREE) + ret = build2_loc (DECL_SOURCE_LOCATION (var_tree), COMPOUND_EXPR, + void_type_node, init_tree, ret); + + return ret; +} + +// Assignment. + +tree +Gcc_backend::assignment_statement (tree lhs, tree rhs, Location location) +{ + if (lhs == error_mark_node || rhs == error_mark_node) + return error_mark_node; + + // To avoid problems with GNU ld, we don't make zero-sized + // externally visible variables. That might lead us to doing an + // assignment of a zero-sized expression to a non-zero sized + // expression; avoid crashes here by avoiding assignments of + // zero-sized expressions. Such assignments don't really mean + // anything anyhow. + if (TREE_TYPE (lhs) == void_type_node + || int_size_in_bytes (TREE_TYPE (lhs)) == 0 + || TREE_TYPE (rhs) == void_type_node + || int_size_in_bytes (TREE_TYPE (rhs)) == 0) + return this->compound_statement (lhs, rhs); + + rhs = this->convert_tree (TREE_TYPE (lhs), rhs, location); + + return fold_build2_loc (location.gcc_location (), MODIFY_EXPR, void_type_node, + lhs, rhs); +} + +// Return. + +tree +Gcc_backend::return_statement (tree fntree, const std::vector &vals, + Location location) +{ + if (fntree == error_mark_node) + return error_mark_node; + tree result = DECL_RESULT (fntree); + if (result == error_mark_node) + return error_mark_node; + + // If the result size is zero bytes, we have set the function type + // to have a result type of void, so don't return anything. + // See the function_type method. + tree res_type = TREE_TYPE (result); + if (res_type == void_type_node || int_size_in_bytes (res_type) == 0) + { + tree stmt_list = NULL_TREE; + for (std::vector::const_iterator p = vals.begin (); + p != vals.end (); p++) + { + tree val = (*p); + if (val == error_mark_node) + return error_mark_node; + append_to_statement_list (val, &stmt_list); + } + tree ret = fold_build1_loc (location.gcc_location (), RETURN_EXPR, + void_type_node, NULL_TREE); + append_to_statement_list (ret, &stmt_list); + return stmt_list; + } + + tree ret; + if (vals.empty ()) + ret = fold_build1_loc (location.gcc_location (), RETURN_EXPR, + void_type_node, NULL_TREE); + else if (vals.size () == 1) + { + tree val = vals.front (); + if (val == error_mark_node) + return error_mark_node; + tree set = fold_build2_loc (location.gcc_location (), MODIFY_EXPR, + void_type_node, result, vals.front ()); + ret = fold_build1_loc (location.gcc_location (), RETURN_EXPR, + void_type_node, set); + } + else + { + // To return multiple values, copy the values into a temporary + // variable of the right structure type, and then assign the + // temporary variable to the DECL_RESULT in the return + // statement. + tree stmt_list = NULL_TREE; + tree rettype = TREE_TYPE (result); + + if (DECL_STRUCT_FUNCTION (fntree) == NULL) + push_struct_function (fntree); + else + push_cfun (DECL_STRUCT_FUNCTION (fntree)); + tree rettmp = create_tmp_var (rettype, "RESULT"); + pop_cfun (); + + tree field = TYPE_FIELDS (rettype); + for (std::vector::const_iterator p = vals.begin (); + p != vals.end (); p++, field = DECL_CHAIN (field)) + { + gcc_assert (field != NULL_TREE); + tree ref + = fold_build3_loc (location.gcc_location (), COMPONENT_REF, + TREE_TYPE (field), rettmp, field, NULL_TREE); + tree val = (*p); + if (val == error_mark_node) + return error_mark_node; + tree set = fold_build2_loc (location.gcc_location (), MODIFY_EXPR, + void_type_node, ref, (*p)); + append_to_statement_list (set, &stmt_list); + } + gcc_assert (field == NULL_TREE); + tree set = fold_build2_loc (location.gcc_location (), MODIFY_EXPR, + void_type_node, result, rettmp); + tree ret_expr = fold_build1_loc (location.gcc_location (), RETURN_EXPR, + void_type_node, set); + append_to_statement_list (ret_expr, &stmt_list); + ret = stmt_list; + } + return ret; +} + +// Create a statement that attempts to execute BSTAT and calls EXCEPT_STMT if an +// error occurs. EXCEPT_STMT may be NULL. FINALLY_STMT may be NULL and if not +// NULL, it will always be executed. This is used for handling defers in Rust +// functions. In C++, the resulting code is of this form: +// try { BSTAT; } catch { EXCEPT_STMT; } finally { FINALLY_STMT; } + +tree +Gcc_backend::exception_handler_statement (tree try_stmt, tree except_stmt, + tree finally_stmt, Location location) +{ + if (try_stmt == error_mark_node || except_stmt == error_mark_node + || finally_stmt == error_mark_node) + return error_mark_node; + + if (except_stmt != NULL_TREE) + try_stmt = build2_loc (location.gcc_location (), TRY_CATCH_EXPR, + void_type_node, try_stmt, + build2_loc (location.gcc_location (), CATCH_EXPR, + void_type_node, NULL, except_stmt)); + if (finally_stmt != NULL_TREE) + try_stmt = build2_loc (location.gcc_location (), TRY_FINALLY_EXPR, + void_type_node, try_stmt, finally_stmt); + return try_stmt; +} + +// If. + +tree +Gcc_backend::if_statement (tree, tree cond_tree, tree then_tree, tree else_tree, + Location location) +{ + if (cond_tree == error_mark_node || then_tree == error_mark_node + || else_tree == error_mark_node) + return error_mark_node; + tree ret = build3_loc (location.gcc_location (), COND_EXPR, void_type_node, + cond_tree, then_tree, else_tree); + return ret; +} + +// Loops + +tree +Gcc_backend::loop_expression (tree body, Location locus) +{ + return fold_build1_loc (locus.gcc_location (), LOOP_EXPR, void_type_node, + body); +} + +tree +Gcc_backend::exit_expression (tree cond_tree, Location locus) +{ + return fold_build1_loc (locus.gcc_location (), EXIT_EXPR, void_type_node, + cond_tree); +} + +// Pair of statements. + +tree +Gcc_backend::compound_statement (tree s1, tree s2) +{ + tree stmt_list = NULL_TREE; + tree t = s1; + if (t == error_mark_node) + return error_mark_node; + append_to_statement_list (t, &stmt_list); + t = s2; + if (t == error_mark_node) + return error_mark_node; + append_to_statement_list (t, &stmt_list); + + // If neither statement has any side effects, stmt_list can be NULL + // at this point. + if (stmt_list == NULL_TREE) + stmt_list = integer_zero_node; + + return stmt_list; +} + +// List of statements. + +tree +Gcc_backend::statement_list (const std::vector &statements) +{ + tree stmt_list = NULL_TREE; + for (std::vector::const_iterator p = statements.begin (); + p != statements.end (); ++p) + { + tree t = (*p); + if (t == error_mark_node) + return error_mark_node; + append_to_statement_list (t, &stmt_list); + } + return stmt_list; +} + +// Make a block. For some reason gcc uses a dual structure for +// blocks: BLOCK tree nodes and BIND_EXPR tree nodes. Since the +// BIND_EXPR node points to the BLOCK node, we store the BIND_EXPR in +// the Bblock. + +tree +Gcc_backend::block (tree fndecl, tree enclosing, + const std::vector &vars, + Location start_location, Location) +{ + tree block_tree = make_node (BLOCK); + if (enclosing == NULL) + { + gcc_assert (fndecl != NULL_TREE); + + // We may have already created a block for local variables when + // we take the address of a parameter. + if (DECL_INITIAL (fndecl) == NULL_TREE) + { + BLOCK_SUPERCONTEXT (block_tree) = fndecl; + DECL_INITIAL (fndecl) = block_tree; + } + else + { + tree superblock_tree = DECL_INITIAL (fndecl); + BLOCK_SUPERCONTEXT (block_tree) = superblock_tree; + tree *pp; + for (pp = &BLOCK_SUBBLOCKS (superblock_tree); *pp != NULL_TREE; + pp = &BLOCK_CHAIN (*pp)) + ; + *pp = block_tree; + } + } + else + { + tree superblock_tree = BIND_EXPR_BLOCK (enclosing); + gcc_assert (TREE_CODE (superblock_tree) == BLOCK); + + BLOCK_SUPERCONTEXT (block_tree) = superblock_tree; + tree *pp; + for (pp = &BLOCK_SUBBLOCKS (superblock_tree); *pp != NULL_TREE; + pp = &BLOCK_CHAIN (*pp)) + ; + *pp = block_tree; + } + + tree *pp = &BLOCK_VARS (block_tree); + for (std::vector::const_iterator pv = vars.begin (); + pv != vars.end (); ++pv) + { + *pp = (*pv)->get_decl (); + if (*pp != error_mark_node) + pp = &DECL_CHAIN (*pp); + } + *pp = NULL_TREE; + + TREE_USED (block_tree) = 1; + + tree bind_tree + = build3_loc (start_location.gcc_location (), BIND_EXPR, void_type_node, + BLOCK_VARS (block_tree), NULL_TREE, block_tree); + TREE_SIDE_EFFECTS (bind_tree) = 1; + return bind_tree; +} + +// Add statements to a block. + +void +Gcc_backend::block_add_statements (tree bind_tree, + const std::vector &statements) +{ + tree stmt_list = NULL_TREE; + for (std::vector::const_iterator p = statements.begin (); + p != statements.end (); ++p) + { + tree s = (*p); + if (s != error_mark_node) + append_to_statement_list (s, &stmt_list); + } + + gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR); + BIND_EXPR_BODY (bind_tree) = stmt_list; +} + +// This is not static because we declare it with GTY(()) in rust-c.h. +tree rust_non_zero_struct; + +// Return a type corresponding to TYPE with non-zero size. + +tree +Gcc_backend::non_zero_size_type (tree type) +{ + if (int_size_in_bytes (type) != 0) + return type; + + switch (TREE_CODE (type)) + { + case RECORD_TYPE: + if (TYPE_FIELDS (type) != NULL_TREE) + { + tree ns = make_node (RECORD_TYPE); + tree field_trees = NULL_TREE; + tree *pp = &field_trees; + for (tree field = TYPE_FIELDS (type); field != NULL_TREE; + field = DECL_CHAIN (field)) + { + tree ft = TREE_TYPE (field); + if (field == TYPE_FIELDS (type)) + ft = non_zero_size_type (ft); + tree f = build_decl (DECL_SOURCE_LOCATION (field), FIELD_DECL, + DECL_NAME (field), ft); + DECL_CONTEXT (f) = ns; + *pp = f; + pp = &DECL_CHAIN (f); + } + TYPE_FIELDS (ns) = field_trees; + layout_type (ns); + return ns; + } + + if (rust_non_zero_struct == NULL_TREE) + { + type = make_node (RECORD_TYPE); + tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, + get_identifier ("dummy"), boolean_type_node); + DECL_CONTEXT (field) = type; + TYPE_FIELDS (type) = field; + layout_type (type); + rust_non_zero_struct = type; + } + return rust_non_zero_struct; + + case ARRAY_TYPE: { + tree element_type = non_zero_size_type (TREE_TYPE (type)); + return build_array_type_nelts (element_type, 1); + } + + default: + gcc_unreachable (); + } + + gcc_unreachable (); +} + +// Convert EXPR_TREE to TYPE_TREE. Sometimes the same unnamed Rust type +// can be created multiple times and thus have multiple tree +// representations. Make sure this does not confuse the middle-end. + +tree +Gcc_backend::convert_tree (tree type_tree, tree expr_tree, Location location) +{ + if (type_tree == TREE_TYPE (expr_tree)) + return expr_tree; + + if (type_tree == error_mark_node || expr_tree == error_mark_node + || TREE_TYPE (expr_tree) == error_mark_node) + return error_mark_node; + + if (POINTER_TYPE_P (type_tree) || INTEGRAL_TYPE_P (type_tree) + || SCALAR_FLOAT_TYPE_P (type_tree) || COMPLEX_FLOAT_TYPE_P (type_tree)) + return fold_convert_loc (location.gcc_location (), type_tree, expr_tree); + else if (TREE_CODE (type_tree) == RECORD_TYPE + || TREE_CODE (type_tree) == UNION_TYPE + || TREE_CODE (type_tree) == ARRAY_TYPE) + { + gcc_assert (int_size_in_bytes (type_tree) + == int_size_in_bytes (TREE_TYPE (expr_tree))); + if (TYPE_MAIN_VARIANT (type_tree) + == TYPE_MAIN_VARIANT (TREE_TYPE (expr_tree))) + return fold_build1_loc (location.gcc_location (), NOP_EXPR, type_tree, + expr_tree); + return fold_build1_loc (location.gcc_location (), VIEW_CONVERT_EXPR, + type_tree, expr_tree); + } + + gcc_unreachable (); +} + +// Make a global variable. + +Bvariable * +Gcc_backend::global_variable (const std::string &var_name, + const std::string &asm_name, tree type_tree, + bool is_external, bool is_hidden, + bool in_unique_section, Location location) +{ + if (type_tree == error_mark_node) + return this->error_variable (); + + // The GNU linker does not like dynamic variables with zero size. + tree orig_type_tree = type_tree; + if ((is_external || !is_hidden) && int_size_in_bytes (type_tree) == 0) + type_tree = this->non_zero_size_type (type_tree); + + tree decl = build_decl (location.gcc_location (), VAR_DECL, + get_identifier_from_string (var_name), type_tree); + if (is_external) + DECL_EXTERNAL (decl) = 1; + else + TREE_STATIC (decl) = 1; + if (!is_hidden) + { + TREE_PUBLIC (decl) = 1; + SET_DECL_ASSEMBLER_NAME (decl, get_identifier_from_string (asm_name)); + } + else + { + SET_DECL_ASSEMBLER_NAME (decl, get_identifier_from_string (asm_name)); + } + + TREE_USED (decl) = 1; + + if (in_unique_section) + resolve_unique_section (decl, 0, 1); + + rust_preserve_from_gc (decl); + + return new Bvariable (decl, orig_type_tree); +} + +// Set the initial value of a global variable. + +void +Gcc_backend::global_variable_set_init (Bvariable *var, tree expr_tree) +{ + if (expr_tree == error_mark_node) + return; + gcc_assert (TREE_CONSTANT (expr_tree)); + tree var_decl = var->get_decl (); + if (var_decl == error_mark_node) + return; + DECL_INITIAL (var_decl) = expr_tree; + + // If this variable goes in a unique section, it may need to go into + // a different one now that DECL_INITIAL is set. + if (symtab_node::get (var_decl) + && symtab_node::get (var_decl)->implicit_section) + { + set_decl_section_name (var_decl, (const char *) NULL); + resolve_unique_section (var_decl, compute_reloc_for_constant (expr_tree), + 1); + } +} + +// Make a local variable. + +Bvariable * +Gcc_backend::local_variable (tree function, const std::string &name, + tree type_tree, Bvariable *decl_var, + Location location) +{ + if (type_tree == error_mark_node) + return this->error_variable (); + tree decl = build_decl (location.gcc_location (), VAR_DECL, + get_identifier_from_string (name), type_tree); + DECL_CONTEXT (decl) = function; + + if (decl_var != NULL) + { + DECL_HAS_VALUE_EXPR_P (decl) = 1; + SET_DECL_VALUE_EXPR (decl, decl_var->get_decl ()); + } + rust_preserve_from_gc (decl); + return new Bvariable (decl); +} + +// Make a function parameter variable. + +Bvariable * +Gcc_backend::parameter_variable (tree function, const std::string &name, + tree type_tree, Location location) +{ + if (type_tree == error_mark_node) + return this->error_variable (); + tree decl = build_decl (location.gcc_location (), PARM_DECL, + get_identifier_from_string (name), type_tree); + DECL_CONTEXT (decl) = function; + DECL_ARG_TYPE (decl) = type_tree; + + rust_preserve_from_gc (decl); + return new Bvariable (decl); +} + +// Make a static chain variable. + +Bvariable * +Gcc_backend::static_chain_variable (tree fndecl, const std::string &name, + tree type_tree, Location location) +{ + if (type_tree == error_mark_node) + return this->error_variable (); + tree decl = build_decl (location.gcc_location (), PARM_DECL, + get_identifier_from_string (name), type_tree); + DECL_CONTEXT (decl) = fndecl; + DECL_ARG_TYPE (decl) = type_tree; + TREE_USED (decl) = 1; + DECL_ARTIFICIAL (decl) = 1; + DECL_IGNORED_P (decl) = 1; + TREE_READONLY (decl) = 1; + + struct function *f = DECL_STRUCT_FUNCTION (fndecl); + if (f == NULL) + { + push_struct_function (fndecl); + pop_cfun (); + f = DECL_STRUCT_FUNCTION (fndecl); + } + gcc_assert (f->static_chain_decl == NULL); + f->static_chain_decl = decl; + DECL_STATIC_CHAIN (fndecl) = 1; + + rust_preserve_from_gc (decl); + return new Bvariable (decl); +} + +// Make a temporary variable. + +Bvariable * +Gcc_backend::temporary_variable (tree fndecl, tree bind_tree, tree type_tree, + tree init_tree, bool is_address_taken, + Location location, tree *pstatement) +{ + gcc_assert (fndecl != NULL_TREE); + if (type_tree == error_mark_node || init_tree == error_mark_node + || fndecl == error_mark_node) + { + *pstatement = error_mark_node; + return this->error_variable (); + } + + tree var; + // We can only use create_tmp_var if the type is not addressable. + if (!TREE_ADDRESSABLE (type_tree)) + { + if (DECL_STRUCT_FUNCTION (fndecl) == NULL) + push_struct_function (fndecl); + else + push_cfun (DECL_STRUCT_FUNCTION (fndecl)); + + var = create_tmp_var (type_tree, "RUSTTMP"); + pop_cfun (); + } + else + { + gcc_assert (bind_tree != NULL_TREE); + var = build_decl (location.gcc_location (), VAR_DECL, + create_tmp_var_name ("RUSTTMP"), type_tree); + DECL_ARTIFICIAL (var) = 1; + DECL_IGNORED_P (var) = 1; + TREE_USED (var) = 1; + DECL_CONTEXT (var) = fndecl; + + // We have to add this variable to the BLOCK and the BIND_EXPR. + gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR); + tree block_tree = BIND_EXPR_BLOCK (bind_tree); + gcc_assert (TREE_CODE (block_tree) == BLOCK); + DECL_CHAIN (var) = BLOCK_VARS (block_tree); + BLOCK_VARS (block_tree) = var; + BIND_EXPR_VARS (bind_tree) = BLOCK_VARS (block_tree); + } + + if (this->type_size (type_tree) != 0 && init_tree != NULL_TREE + && TREE_TYPE (init_tree) != void_type_node) + DECL_INITIAL (var) = this->convert_tree (type_tree, init_tree, location); + + if (is_address_taken) + TREE_ADDRESSABLE (var) = 1; + + *pstatement + = build1_loc (location.gcc_location (), DECL_EXPR, void_type_node, var); + + // For a zero sized type, don't initialize VAR with BINIT, but still + // evaluate BINIT for its side effects. + if (init_tree != NULL_TREE + && (this->type_size (type_tree) == 0 + || TREE_TYPE (init_tree) == void_type_node)) + *pstatement = this->compound_statement (init_tree, *pstatement); + + return new Bvariable (var); +} + +// Make a label. + +tree +Gcc_backend::label (tree func_tree, const std::string &name, Location location) +{ + tree decl; + if (name.empty ()) + { + if (DECL_STRUCT_FUNCTION (func_tree) == NULL) + push_struct_function (func_tree); + else + push_cfun (DECL_STRUCT_FUNCTION (func_tree)); + + decl = create_artificial_label (location.gcc_location ()); + + pop_cfun (); + } + else + { + tree id = get_identifier_from_string (name); + decl + = build_decl (location.gcc_location (), LABEL_DECL, id, void_type_node); + DECL_CONTEXT (decl) = func_tree; + } + return decl; +} + +// Make a statement which defines a label. + +tree +Gcc_backend::label_definition_statement (tree label) +{ + return fold_build1_loc (DECL_SOURCE_LOCATION (label), LABEL_EXPR, + void_type_node, label); +} + +// Make a goto statement. + +tree +Gcc_backend::goto_statement (tree label, Location location) +{ + return fold_build1_loc (location.gcc_location (), GOTO_EXPR, void_type_node, + label); +} + +// Get the address of a label. + +tree +Gcc_backend::label_address (tree label, Location location) +{ + TREE_USED (label) = 1; + TREE_ADDRESSABLE (label) = 1; + tree ret + = fold_convert_loc (location.gcc_location (), ptr_type_node, + build_fold_addr_expr_loc (location.gcc_location (), + label)); + return ret; +} + +// Declare or define a new function. + +tree +Gcc_backend::function (tree functype, const std::string &name, + const std::string &asm_name, unsigned int flags, + Location location) +{ + if (functype != error_mark_node) + { + gcc_assert (FUNCTION_POINTER_TYPE_P (functype)); + functype = TREE_TYPE (functype); + } + tree id = get_identifier_from_string (name); + if (functype == error_mark_node || id == error_mark_node) + return error_mark_node; + + tree decl + = build_decl (location.gcc_location (), FUNCTION_DECL, id, functype); + if (!asm_name.empty ()) + SET_DECL_ASSEMBLER_NAME (decl, get_identifier_from_string (asm_name)); + + if ((flags & function_is_declaration) != 0) + DECL_EXTERNAL (decl) = 1; + else + { + tree restype = TREE_TYPE (functype); + tree resdecl = build_decl (location.gcc_location (), RESULT_DECL, + NULL_TREE, restype); + DECL_ARTIFICIAL (resdecl) = 1; + DECL_IGNORED_P (resdecl) = 1; + DECL_CONTEXT (resdecl) = decl; + DECL_RESULT (decl) = resdecl; + } + if ((flags & function_is_uninlinable) != 0) + DECL_UNINLINABLE (decl) = 1; + if ((flags & function_does_not_return) != 0) + TREE_THIS_VOLATILE (decl) = 1; + if ((flags & function_in_unique_section) != 0) + resolve_unique_section (decl, 0, 1); + + rust_preserve_from_gc (decl); + return decl; +} + +// Create a statement that runs all deferred calls for FUNCTION. This should +// be a statement that looks like this in C++: +// finish: +// try { UNDEFER; } catch { CHECK_DEFER; goto finish; } + +tree +Gcc_backend::function_defer_statement (tree function, tree undefer_tree, + tree defer_tree, Location location) +{ + if (undefer_tree == error_mark_node || defer_tree == error_mark_node + || function == error_mark_node) + return error_mark_node; + + if (DECL_STRUCT_FUNCTION (function) == NULL) + push_struct_function (function); + else + push_cfun (DECL_STRUCT_FUNCTION (function)); + + tree stmt_list = NULL; + tree label = this->label (function, "", location); + tree label_def = this->label_definition_statement (label); + append_to_statement_list (label_def, &stmt_list); + + tree jump_stmt = this->goto_statement (label, location); + tree catch_body + = build2 (COMPOUND_EXPR, void_type_node, defer_tree, jump_stmt); + catch_body = build2 (CATCH_EXPR, void_type_node, NULL, catch_body); + tree try_catch + = build2 (TRY_CATCH_EXPR, void_type_node, undefer_tree, catch_body); + append_to_statement_list (try_catch, &stmt_list); + pop_cfun (); + + return stmt_list; +} + +// Record PARAM_VARS as the variables to use for the parameters of FUNCTION. +// This will only be called for a function definition. + +bool +Gcc_backend::function_set_parameters ( + tree function, const std::vector ¶m_vars) +{ + if (function == error_mark_node) + return false; + + tree params = NULL_TREE; + tree *pp = ¶ms; + for (std::vector::const_iterator pv = param_vars.begin (); + pv != param_vars.end (); ++pv) + { + *pp = (*pv)->get_decl (); + gcc_assert (*pp != error_mark_node); + pp = &DECL_CHAIN (*pp); + } + *pp = NULL_TREE; + DECL_ARGUMENTS (function) = params; + return true; +} + +// Write the definitions for all TYPE_DECLS, CONSTANT_DECLS, +// FUNCTION_DECLS, and VARIABLE_DECLS declared globally, as well as +// emit early debugging information. + +void +Gcc_backend::write_global_definitions ( + const std::vector &type_decls, const std::vector &constant_decls, + const std::vector &function_decls, + const std::vector &variable_decls) +{ + size_t count_definitions = type_decls.size () + constant_decls.size () + + function_decls.size () + variable_decls.size (); + + tree *defs = new tree[count_definitions]; + + // Convert all non-erroneous declarations into Gimple form. + size_t i = 0; + for (std::vector::const_iterator p = variable_decls.begin (); + p != variable_decls.end (); ++p) + { + tree v = (*p)->get_decl (); + if (v != error_mark_node) + { + defs[i] = v; + rust_preserve_from_gc (defs[i]); + ++i; + } + } + + for (std::vector::const_iterator p = type_decls.begin (); + p != type_decls.end (); ++p) + { + tree type_tree = (*p); + if (type_tree != error_mark_node && IS_TYPE_OR_DECL_P (type_tree)) + { + defs[i] = TYPE_NAME (type_tree); + gcc_assert (defs[i] != NULL); + rust_preserve_from_gc (defs[i]); + ++i; + } + } + for (std::vector::const_iterator p = constant_decls.begin (); + p != constant_decls.end (); ++p) + { + if ((*p) != error_mark_node) + { + defs[i] = (*p); + rust_preserve_from_gc (defs[i]); + ++i; + } + } + for (std::vector::const_iterator p = function_decls.begin (); + p != function_decls.end (); ++p) + { + tree decl = (*p); + if (decl != error_mark_node) + { + rust_preserve_from_gc (decl); + if (DECL_STRUCT_FUNCTION (decl) == NULL) + allocate_struct_function (decl, false); + dump_function (TDI_original, decl); + cgraph_node::finalize_function (decl, true); + + defs[i] = decl; + ++i; + } + } + + // Pass everything back to the middle-end. + + wrapup_global_declarations (defs, i); + + delete[] defs; +} + +void +Gcc_backend::write_export_data (const char *bytes, unsigned int size) +{ + rust_write_export_data (bytes, size); +} + +// Return the backend generator. + +Backend * +rust_get_backend () +{ + return new Gcc_backend (); +} -- 2.25.1