diff --git a/raltool/.gitignore b/raltool/.gitignore
new file mode 100644
index 000000000000..26a07695f3dc
--- /dev/null
+++ b/raltool/.gitignore
@@ -0,0 +1,4 @@
+[._]*.sw[a-p]
+*.org
+*.rs.bk
+target
\ No newline at end of file
diff --git a/raltool/Cargo.lock b/raltool/Cargo.lock
new file mode 100644
index 000000000000..58d38336771c
--- /dev/null
+++ b/raltool/Cargo.lock
@@ -0,0 +1,533 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27"
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "clap"
+version = "3.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8c93436c21e4698bacadf42917db28b23017027a4deccb35dbe47a7e7840123"
+dependencies = [
+ "atty",
+ "bitflags",
+ "clap_derive",
+ "indexmap",
+ "lazy_static",
+ "os_str_bytes",
+ "strsim",
+ "termcolor",
+ "textwrap",
+]
+
+[[package]]
+name = "clap_derive"
+version = "3.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da95d038ede1a964ce99f49cbe27a7fb538d1da595e4b4f70b8c8f338d17bf16"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fdbfe11fe19ff083c48923cf179540e8cd0535903dc35e178a1fdeeb59aef51f"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "lazy_static",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38"
+dependencies = [
+ "cfg-if",
+ "lazy_static",
+]
+
+[[package]]
+name = "either"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+
+[[package]]
+name = "env_logger"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3"
+dependencies = [
+ "atty",
+ "humantime",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+
+[[package]]
+name = "heck"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
+name = "indexmap"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "inflections"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a257582fdcde896fd96463bf2d40eefea0580021c0712a0e2b028b60b47a837a"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.120"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09"
+
+[[package]]
+name = "linked-hash-map"
+version = "0.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3"
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
+dependencies = [
+ "hermit-abi",
+ "libc",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "raltool"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "clap",
+ "env_logger",
+ "inflections",
+ "log",
+ "proc-macro2",
+ "quote",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_yaml",
+ "svd-parser",
+]
+
+[[package]]
+name = "rayon"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90"
+dependencies = [
+ "autocfg",
+ "crossbeam-deque",
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "lazy_static",
+ "num_cpus",
+]
+
+[[package]]
+name = "regex"
+version = "1.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+
+[[package]]
+name = "ryu"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f"
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "serde"
+version = "1.0.136"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.136"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_yaml"
+version = "0.8.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0"
+dependencies = [
+ "indexmap",
+ "ryu",
+ "serde",
+ "yaml-rust",
+]
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "svd-parser"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "697e7645ad9f5311fe3d872d094b135627b1616aea9e1573dddd28ca522579b9"
+dependencies = [
+ "anyhow",
+ "once_cell",
+ "rayon",
+ "regex",
+ "thiserror",
+ "xmltree",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.88"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebd69e719f31e88618baa1eaa6ee2de5c9a1c004f1e9ecdb58e8352a13f20a01"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb"
+
+[[package]]
+name = "thiserror"
+version = "1.0.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "xml-rs"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c1cb601d29fe2c2ac60a2b2e5e293994d87a1f6fa9687a31a15270f909be9c2"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "xmltree"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ff8eaee9d17062850f1e6163b509947969242990ee59a35801af437abe041e70"
+dependencies = [
+ "xml-rs",
+]
+
+[[package]]
+name = "yaml-rust"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
+dependencies = [
+ "linked-hash-map",
+]
diff --git a/raltool/Cargo.toml b/raltool/Cargo.toml
new file mode 100644
index 000000000000..8dfc00fc1e49
--- /dev/null
+++ b/raltool/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "raltool"
+license = "MIT OR Apache-2.0"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+clap = { version = "3.1.6", features = ["derive"] }
+env_logger = "0.9.0"
+inflections = "1.1"
+log = { version = "~0.4", features = ["std"] }
+quote = "1.0"
+proc-macro2 = "1.0"
+anyhow = "1.0.19"
+rayon = "1.5"
+regex = "1.4.3"
+serde = { version = "1.0.123", features = [ "derive" ]}
+serde_yaml = "0.8.15"
+svd-parser = { version = "0.10.2", features = ["derive-from"] }
diff --git a/raltool/LICENSE-APACHE b/raltool/LICENSE-APACHE
new file mode 100644
index 000000000000..16fe87b06e80
--- /dev/null
+++ b/raltool/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/raltool/LICENSE-MIT b/raltool/LICENSE-MIT
new file mode 100644
index 000000000000..bdc2e8383836
--- /dev/null
+++ b/raltool/LICENSE-MIT
@@ -0,0 +1,27 @@
+Copyright (c) 2016 Jorge Aparicio
+Copyright (c) 2021 Embassy project contributors
+Copyright (c) 2022 imxrt-rs project contributors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/raltool/README.md b/raltool/README.md
new file mode 100644
index 000000000000..fdf7c58d453b
--- /dev/null
+++ b/raltool/README.md
@@ -0,0 +1,117 @@
+raltool
+=======
+
+`raltool` is a fork of [`chiptool`][], which is itself a fork of [`svd2rust`][].
+The register access layer (RAL) generated by `raltool` strives to compile faster
+than anything generated by `chiptool` or `svd2rust`, supporting the needs of
+Rust projects that work with very large SVDs.
+
+`raltool` is an experiment to support the [`imxrt-ral`][] project. The
+tool's interface and features do not vary much from [`chiptool`][], so see
+that project's documentation for more information. However, the
+`raltool`-generated code is different than both `chiptool`- and
+`svd2rust`-generated code. For more information on the `raltool`-generated API,
+see the [`imxrt-ral`][] project.
+
+ [`chiptool`]: https://github.com/embassy-rs/chiptool
+ [`svd2rust`]: https://github.com/rust-embedded/svd2rust
+ [`imxrt-ral`]: https://github.com/imxrt-rs/imxrt-ral
+
+Benchmarks
+----------
+
+`svd2rust`, `chiptool`, and `raltool` ingested a patched SVD for an i.MX RT 1062
+MCU and generated a Rust crate. The table below shows the `cargo build` build
+times for each crate. Each build succeeded without warnings.
+
+| Codegen tool | Build time |
+|:------------------------|:-----------|
+| `svd2rust` | 1m 30s |
+| `chiptool` 1 | 47s |
+| **`raltool`** | **9s** |
+
+1 Lightly modified the generated crate to suppress warnings.
+
+`svd2rust` version 0.24.1. `chiptool` built at revision `73b33d9`. rustc 1.63.0.
+
+`chiptool` enhancements
+-----------------------
+
+ ┌───────┐ IR_A ┌───────────┐ IR_A' ┏━━━━━━━━━┓
+ SVD_A -> │ Parse │ ---> │ Transform │ ----> ┃ ┃
+ └───────┘ └───────────┘ ┃ ┃
+ ┌───────┐ IR_B ┌───────────┐ IR_B' ┃ ┃ IR ┌─────────┐
+ SVD_B -> │ Parse │ ---> │ Transform │ ----> ┃ Combine ┃ -> │ Codegen │ -> RAL Crate
+ └───────┘ └───────────┘ ┃ ┃ └─────────┘
+ ┌───────┐ IR_C ┌───────────┐ IR_C' ┃ ┃
+ SVD_C -> │ Parse │ ---> │ Transform │ ----> ┃ ┃
+ └───────┘ └───────────┘ ┗━━━━━━━━━┛
+
+`raltool` changes more than just the generated code. `raltool` accepts multiple
+SVD files, and introduces a new "combine" phase to the process. The
+combine phase consolidates blocks, fieldsets, and enums *across* devices. The
+combine pass runs after the transform pass(es), consuming one or more `chiptool`
+intermediate representations (IR) to create a new IR. This single, combined IR
+represents all the peripherals, blocks, fieldsets, and enums for all devices.
+The codegen phase generates modules to represent that combined IR.
+
+In practice, the combine phase can automatically reduce `N` UART peripherals
+from `N` SVDs down to 1 UART peripheral. This works even if the input SVDs have
+different names for blocks, fieldsets, or enums.
+
+The combine phase is conservative, and it won't combine device elements if
+they don't seem equivalent. If you know that blocks, fieldsets, or enums
+should be equivalent, you can use transforms to coax the IR into a combine-able
+form, or you can patch your SVD(s).
+
+Limitations
+-----------
+
+Aliased registers are not supported. To control codegen, use a transform to
+remove aliased registers / fields. The codegen phase will select an arbitrary
+register alias for codegen, though it prefers an alias that's both
+read-write.
+
+Aliased cluster arrays are also not supported. The recommendation is to either
+remove the peripherals, or describe the peripherals differently, using
+`chiptool` YAML. Surprisingly, aliased cluster arrays appear in practice; see
+the i.MX RT 11xx SVDs.
+
+The transform phase(s) run before the combine phase, not after. It's
+trivial to introduce a transform phase on the combined IR, but there
+hasn't been a need for this.
+
+`raltool` simply generates a tree of Rust source files. It does not generate a
+`Cargo.toml` to define the package. You're responsible for defining this
+manifest, adding all dependencies, and defining the features expected in the
+generated code. `cargo init` and `cargo add` can help with this:
+
+``` bash
+cd path/to/output
+
+cargo init
+cargo add cortex-m ral-registers
+
+# Edit Cargo.toml, add feature flags for devices.
+# See lib.rs for expected features.
+```
+
+License
+-------
+
+Licensed under either of
+
+- Apache License, Version 2.0 ([LICENSE-APACHE][] or
+ http://www.apache.org/licenses/LICENSE-2.0)
+- MIT license ([LICENSE-MIT][] or http://opensource.org/licenses/MIT)
+
+at your option.
+
+ [LICENSE-APACHE]: LICENSE-APACHE
+ [LICENSE-MIT]: LICENSE-MIT
+
+**Contribution**
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/raltool/src/combine.rs b/raltool/src/combine.rs
new file mode 100644
index 000000000000..810618dcf922
--- /dev/null
+++ b/raltool/src/combine.rs
@@ -0,0 +1,441 @@
+//! Helper types to combine and consolidate IRs across devices.
+
+use crate::ir;
+use std::{
+ cmp::Ordering,
+ collections::{hash_map::Entry, HashMap},
+};
+
+/// An element version.
+pub struct Version<'ir, E> {
+ /// Reference to the element.
+ elem: &'ir E,
+ /// The IRs that use this version.
+ irs: Vec<&'ir ir::IR>,
+}
+
+impl<'ir, E> Version<'ir, E> {
+ fn new(elem: &'ir E, ir: &'ir ir::IR) -> Self {
+ Self {
+ elem,
+ irs: vec![ir],
+ }
+ }
+
+ /// Acquire the IR element.
+ pub fn element(&self) -> &'ir E {
+ self.elem
+ }
+
+ /// Returns `true` if the provided IR uses this element version.
+ ///
+ /// This uses a pointer comparison to understand if the IRs are equal.
+ /// It does not use any (Partial)Eq trait.
+ pub fn is_used_by(&self, ir: &ir::IR) -> bool {
+ self.irs.iter().any(|jr| std::ptr::eq(ir, *jr))
+ }
+}
+
+/// A version of an enum.
+pub type EnumVersion<'ir> = Version<'ir, ir::Enum>;
+/// A version of a field set.
+pub type FieldSetVersion<'ir> = Version<'ir, ir::FieldSet>;
+/// A version of a block.
+pub type BlockVersion<'ir> = Version<'ir, ir::Block>;
+
+/// Multiple versions of some element.
+type Versions<'ir, E> = Vec>;
+
+/// Used to sort versions by most popular (most IR associations) to least
+/// popular (fewest IR associations).
+fn popularity(a: &Version<'_, E>, b: &Version<'_, E>) -> Ordering {
+ b.irs.len().cmp(&a.irs.len())
+}
+
+#[derive(Clone, Copy)]
+struct CompareIr<'ir, E> {
+ elem: &'ir E,
+ ir: &'ir ir::IR,
+}
+
+impl<'ir, E> CompareIr<'ir, E> {
+ fn from_version(version: &Version<'ir, E>) -> Self {
+ Self::new(
+ version.element(),
+ version.irs.first().expect("Versions always have an IR"),
+ )
+ }
+ fn new(elem: &'ir E, ir: &'ir ir::IR) -> Self {
+ Self { elem, ir }
+ }
+ fn query(ir: &'ir ir::IR, query: impl FnOnce(&'ir ir::IR) -> Option<&'ir E>) -> Option {
+ query(ir).map(|elem| Self::new(elem, ir))
+ }
+}
+
+/// Assert two elements as equivalent.
+///
+/// The implementation invokes this callback for similarly-named things
+/// across IRs. For instance, the input will always be two UART
+/// blocks from two different devices. You'll never see an UART and an
+/// I2C block being compared for equivalence (unless your IR is really
+/// messed up).
+type Equivalence = fn(CompareIr, CompareIr) -> bool;
+
+/// Ensure the items in two, possibly non-sorted contiguous
+/// collections are equivalent.
+fn equivalent_slices(xs: &[E], ys: &[E], equiv: impl Fn(&E, &E) -> bool) -> bool {
+ xs.len() == ys.len() && xs.iter().all(|x| ys.iter().any(|y| equiv(x, y)))
+}
+
+fn equivalent_options(
+ a: Option>,
+ b: Option>,
+ equiv: Equivalence,
+) -> bool {
+ match (a, b) {
+ (Some(a), Some(b)) => equiv(a, b),
+ (None, None) => true,
+ (_, _) => false,
+ }
+}
+
+/// Check if two enums are equivalent.
+fn equivalent_enum(
+ CompareIr { elem: a, .. }: CompareIr,
+ CompareIr { elem: b, .. }: CompareIr,
+) -> bool {
+ a.bit_size == b.bit_size
+ && equivalent_slices(&a.variants, &b.variants, |q, r| q.value == r.value)
+}
+
+/// Check if two fieldsets are equivalent.
+fn equivalent_fieldsets(
+ CompareIr { elem: a, ir: air }: CompareIr,
+ CompareIr { elem: b, ir: bir }: CompareIr,
+) -> bool {
+ let try_equivalent_enum = |a: &Option, b: &Option| -> bool {
+ let a = a
+ .as_ref()
+ .and_then(|a| CompareIr::query(air, |ir| ir.enums.get(a)));
+ let b = b
+ .as_ref()
+ .and_then(|b| CompareIr::query(bir, |ir| ir.enums.get(b)));
+ equivalent_options(a, b, equivalent_enum)
+ };
+
+ a.bit_size == b.bit_size
+ && equivalent_slices(&a.fields, &b.fields, |q, r| {
+ q.bit_offset == r.bit_offset
+ && q.array == r.array
+ && q.bit_size == r.bit_size
+ && try_equivalent_enum(&q.enum_read, &r.enum_read)
+ && try_equivalent_enum(&q.enum_write, &r.enum_write)
+ && try_equivalent_enum(&q.enum_readwrite, &r.enum_readwrite)
+ })
+}
+
+fn equivalent_registers(
+ CompareIr { elem: a, ir: air }: CompareIr,
+ CompareIr { elem: b, ir: bir }: CompareIr,
+) -> bool {
+ let query_builder =
+ |ir| move |fieldset: &String| CompareIr::query(ir, |ir| ir.fieldsets.get(fieldset));
+
+ a.access == b.access
+ && a.bit_size == b.bit_size
+ && equivalent_options(
+ a.fieldset.as_ref().and_then(query_builder(air)),
+ b.fieldset.as_ref().and_then(query_builder(bir)),
+ equivalent_fieldsets,
+ )
+}
+
+/// Check if two blocks are equivalent.
+fn equivalent_blocks(
+ CompareIr { elem: a, ir: air }: CompareIr,
+ CompareIr { elem: b, ir: bir }: CompareIr,
+) -> bool {
+ a.extends == b.extends
+ && equivalent_slices(&a.items, &b.items, |q, r| {
+ q.byte_offset == r.byte_offset
+ && q.array == r.array
+ && match (&q.inner, &r.inner) {
+ (
+ ir::BlockItemInner::Block(ir::BlockItemBlock { block: ablock }),
+ ir::BlockItemInner::Block(ir::BlockItemBlock { block: bblock }),
+ ) => equivalent_blocks(
+ CompareIr::query(air, |ir| ir.blocks.get(ablock)).unwrap(),
+ CompareIr::query(bir, |ir| ir.blocks.get(bblock)).unwrap(),
+ ),
+ (
+ ir::BlockItemInner::Register(aregister),
+ ir::BlockItemInner::Register(bregister),
+ ) => equivalent_registers(
+ CompareIr::new(aregister, air),
+ CompareIr::new(bregister, bir),
+ ),
+ _ => false,
+ }
+ })
+}
+
+/// Manages versions for an IR element type.
+struct VersionLookup<'ir, E> {
+ versions: HashMap<&'ir str, Versions<'ir, E>>,
+}
+
+impl<'ir, E> VersionLookup<'ir, E> {
+ /// Create new version lookups for an IR's elements.
+ fn new(
+ equiv: Equivalence,
+ map: impl Iterator- ,
+ ) -> Self {
+ let versions = map.fold(
+ HashMap::new(),
+ |mut versions: HashMap<&'ir str, Versions<'ir, E>>, (ir, path, elem)| {
+ versions
+ .entry(path.as_str())
+ .and_modify(|versions| {
+ if let Some(version) = versions.iter_mut().find(|version| {
+ (equiv)(CompareIr::from_version(version), CompareIr::new(elem, ir))
+ }) {
+ version.irs.push(ir);
+ } else {
+ versions.push(Version::new(elem, ir))
+ }
+ })
+ .or_insert_with(|| vec![Version::new(elem, ir)])
+ .sort_unstable_by(popularity);
+ versions
+ },
+ );
+ Self { versions }
+ }
+
+ fn from_irs(
+ equiv: Equivalence,
+ irs: &'ir [ir::IR],
+ access: impl Fn(&'ir ir::IR) -> &HashMap,
+ ) -> Self {
+ let map = irs
+ .iter()
+ .flat_map(|ir| std::iter::repeat(ir).zip(access(ir).iter()))
+ .map(|(ir, (path, elem))| (ir, path, elem));
+ Self::new(equiv, map)
+ }
+
+ fn get(&self, ir: &ir::IR, path: &str) -> Option<&Version> {
+ self.versions
+ .get(path)
+ .and_then(|versions| versions.iter().find(|version| version.is_used_by(ir)))
+ }
+}
+
+/// Manages versions of IR elements.
+///
+/// The implementation uses the address of the IR when querying for versioned elements.
+/// This should be fine, since the implementation takes shared references to the IR, so
+/// things can't (safely) move or be reassigned while this exists.
+pub struct IrVersions<'ir> {
+ enums: VersionLookup<'ir, ir::Enum>,
+ fieldsets: VersionLookup<'ir, ir::FieldSet>,
+ blocks: VersionLookup<'ir, ir::Block>,
+}
+
+impl<'ir> IrVersions<'ir> {
+ /// Define versions of IR elements from the collection of IRs.
+ pub fn from_irs(irs: &'ir [ir::IR]) -> Self {
+ Self {
+ enums: VersionLookup::from_irs(equivalent_enum, irs, |ir| &ir.enums),
+ fieldsets: VersionLookup::from_irs(equivalent_fieldsets, irs, |ir| &ir.fieldsets),
+ blocks: VersionLookup::from_irs(equivalent_blocks, irs, |ir| &ir.blocks),
+ }
+ }
+ /// Access an enum version that corresponds to this IR.
+ pub fn get_enum(&self, ir: &ir::IR, path: &str) -> Option<&EnumVersion> {
+ self.enums.get(ir, path)
+ }
+ /// Access a fieldset version that corresponds to this IR.
+ pub fn get_fieldset(&self, ir: &ir::IR, path: &str) -> Option<&FieldSetVersion> {
+ self.fieldsets.get(ir, path)
+ }
+ /// Access a block version that corresponds to this IR.
+ pub fn get_block(&self, ir: &ir::IR, path: &str) -> Option<&BlockVersion> {
+ self.blocks.get(ir, path)
+ }
+}
+
+/// Hashing a reference by its address.
+struct RefHash<'a, T>(&'a T);
+
+impl std::hash::Hash for RefHash<'_, T> {
+ fn hash(&self, state: &mut H) {
+ std::ptr::hash(self.0, state);
+ }
+}
+
+impl std::cmp::PartialEq for RefHash<'_, T> {
+ fn eq(&self, other: &Self) -> bool {
+ std::ptr::eq(self.0, other.0)
+ }
+}
+
+impl std::cmp::Eq for RefHash<'_, T> {}
+
+impl Clone for RefHash<'_, T> {
+ fn clone(&self) -> Self {
+ Self(self.0)
+ }
+}
+
+impl Copy for RefHash<'_, T> {}
+
+type RefMap<'a, K, V> = HashMap, V>;
+
+/// Combine all IRs into a single IR.
+pub fn combine(irs: &[ir::IR]) -> ir::IR {
+ assert!(
+ irs.iter().all(|ir| !ir.devices.is_empty()),
+ "Cannot combine an IR with empty devices."
+ );
+ assert!(
+ irs.iter().all(|ir| ir.devices.len() == 1),
+ "Sorry, not ready to combine IRs that were already combined"
+ );
+ {
+ let device_names: Vec<_> = irs
+ .iter()
+ .map(|ir| ir.devices.keys().next().unwrap())
+ .collect();
+ assert!(
+ device_names.len() == irs.len(),
+ "Each IR must describe a unique device."
+ );
+ assert!(
+ device_names.iter().all(|name| !name.is_empty()),
+ "Each device needs a name."
+ );
+ }
+
+ let versions = IrVersions::from_irs(irs);
+
+ let mut consolidated = ir::IR::new();
+
+ // Combine enums.
+ let mut enums: RefMap = RefMap::new();
+ for ir in irs {
+ let device_name = ir.devices.keys().next().expect("Each IR has a name");
+
+ for path in ir.enums.keys() {
+ let version = versions
+ .get_enum(ir, path)
+ .expect("There's definitely a version");
+
+ if let Entry::Vacant(entry) = enums.entry(RefHash(version.element())) {
+ let path = format!("{device_name}::{path}");
+ entry.insert(path.clone());
+ consolidated.enums.insert(path, version.element().clone());
+ }
+ }
+ }
+
+ // Combine fieldsets.
+ let mut fieldsets: RefMap = RefMap::new();
+ for ir in irs {
+ let device_name = ir.devices.keys().next().unwrap();
+
+ for path in ir.fieldsets.keys() {
+ let version = versions.get_fieldset(ir, path).unwrap();
+
+ if let Entry::Vacant(entry) = fieldsets.entry(RefHash(version.element())) {
+ let path = format!("{device_name}::{path}");
+ entry.insert(path.clone());
+
+ let mut fieldset = version.element().clone();
+ // Fix references to enums by looking up the version, then mapping it to
+ // the updated path.
+ for field in &mut fieldset.fields {
+ for name in [
+ field.enum_readwrite.as_mut(),
+ field.enum_read.as_mut(),
+ field.enum_write.as_mut(),
+ ]
+ .into_iter()
+ .flatten()
+ {
+ let version = versions.get_enum(ir, name).unwrap();
+ *name = enums.get(&RefHash(version.element())).unwrap().into();
+ }
+ }
+ consolidated.fieldsets.insert(path, fieldset);
+ }
+ }
+ }
+
+ // Combine blocks.
+ //
+ // Block consolidation uses two passes, since a block
+ // can have a reference to another block. The first pass
+ // manages the version -> rename mapping, and the second
+ // pass does the touch-up.
+ let mut blocks: RefMap = RefMap::new();
+ for ir in irs {
+ let device_name = ir.devices.keys().next().unwrap();
+
+ for path in ir.blocks.keys() {
+ let version = versions.get_block(ir, path).unwrap();
+
+ if let Entry::Vacant(entry) = blocks.entry(RefHash(version.element())) {
+ let path = format!("{device_name}::{path}");
+ entry.insert(path.clone());
+ consolidated.blocks.insert(path, version.element().clone());
+ }
+ }
+ }
+
+ let blocks = blocks;
+ // Remove from this to ensure patches only happens once.
+ let mut filter = blocks.clone();
+ for ir in irs {
+ for path in ir.blocks.keys() {
+ let version = versions.get_block(ir, path).unwrap();
+
+ if let Some(path) = filter.get(&RefHash(version.element())) {
+ let block = consolidated.blocks.get_mut(path).unwrap();
+ for item in &mut block.items {
+ match &mut item.inner {
+ ir::BlockItemInner::Register(reg) => {
+ for fieldset in &mut reg.fieldset {
+ let version = versions.get_fieldset(ir, fieldset).unwrap();
+ *fieldset =
+ fieldsets.get(&RefHash(version.element())).unwrap().into()
+ }
+ }
+ ir::BlockItemInner::Block(ir::BlockItemBlock { block }) => {
+ let version = versions.get_block(ir, block).unwrap();
+ *block = blocks.get(&RefHash(version.element())).unwrap().into();
+ }
+ }
+ }
+ }
+ filter.remove(&RefHash(version.element()));
+ }
+ }
+
+ // Update all devices to point to new blocks.
+ for ir in irs {
+ let mut devices = ir.devices.clone();
+ devices
+ .values_mut()
+ .flat_map(|device| device.peripherals.iter_mut())
+ .flat_map(|peripheral| &mut peripheral.block)
+ .for_each(|name: &mut String| {
+ let version = versions.get_block(ir, name).unwrap();
+ *name = blocks.get(&RefHash(version.element())).unwrap().into();
+ });
+ consolidated.devices.extend(devices);
+ }
+
+ consolidated
+}
diff --git a/raltool/src/generate/block.rs b/raltool/src/generate/block.rs
new file mode 100644
index 000000000000..72d44103810e
--- /dev/null
+++ b/raltool/src/generate/block.rs
@@ -0,0 +1,556 @@
+//! Generates a register block, along with submodules for register fields.
+//!
+//! Recursively expands dependent blocks that are part of the module. This
+//! means that the input to [`render`] is expected to be a root block, or
+//! a block that is not a sub-block of another block.
+
+use std::num::NonZeroUsize;
+
+use anyhow::Result;
+use proc_macro2::TokenStream;
+use proc_macro2::{Ident, Span};
+use quote::quote;
+
+use crate::ir;
+use crate::util;
+
+/// A primitive size for a (reserved) register.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+#[repr(usize)]
+enum Size {
+ U8 = 8,
+ U16 = 16,
+ U32 = 32,
+ U64 = 64,
+}
+
+impl Size {
+ const fn bits(self) -> usize {
+ self as usize
+ }
+ const fn bytes(self) -> usize {
+ self.bits() / 8
+ }
+ fn type_token(self) -> TokenStream {
+ match self {
+ Self::U8 => quote!(u8),
+ Self::U16 => quote!(u16),
+ Self::U32 => quote!(u32),
+ Self::U64 => quote!(u64),
+ }
+ }
+ fn from_bit_size(bit_size: ir::BitSize) -> Self {
+ match bit_size {
+ ir::BitSize(8) => Self::U8,
+ ir::BitSize(16) => Self::U16,
+ ir::BitSize(32) => Self::U32,
+ ir::BitSize(64) => Self::U64,
+ ir::BitSize(invalid) => panic!("Invalid register bit size {invalid}"),
+ }
+ }
+}
+
+/// A register block.
+///
+/// Any stride necessary to meet the requirements of a cluster array
+/// are implicitly expressed with a final reservation member at the back
+/// of the members collection. Meaning: the stride and size of the block
+/// are equal.
+///
+/// A single `Block` allocation holds the layout for all of its dependent
+/// sub-blocks. To find sub-block layouts, scan the `members` for a block,
+/// and recurse.
+#[derive(Debug)]
+struct Block<'a> {
+ /// Module name.
+ module: String,
+ /// Type documentation
+ doc: Option<&'a str>,
+ /// Members.
+ ///
+ /// If a block requires sub-blocks, they're contained within
+ /// this collection.
+ members: Members<'a>,
+}
+
+/// Produces a collection of block members with reservations.
+///
+/// This is where struct layout happens.
+fn layout_members<'ir>(
+ items: &'ir [ir::BlockItem],
+ ir: &'ir ir::IR,
+ reservation_id: &mut usize,
+) -> Members<'ir> {
+ let mut registers: Vec<_> = items
+ .iter()
+ .flat_map(|item| Member::expand(item, ir))
+ .collect();
+
+ // Order by their location in the block.
+ //
+ // If registers are at the same location, prefer the alias that
+ // is read-write.
+ registers.sort_by(|left, right| {
+ let offsets = left.offset().cmp(&right.offset());
+ match (left, right) {
+ (Member::Register(left), Member::Register(right)) => {
+ offsets.then(left.access.cmp(&right.access))
+ }
+ _ => offsets,
+ }
+ });
+
+ // Drop aliasing registers.
+ registers.dedup_by(|left, right| left.offset() == right.offset());
+
+ // Insert reservations.
+ let mut members: Vec = Vec::new();
+ for register in registers {
+ let offset = members
+ .last()
+ .map(|mem| mem.offset() + mem.size_bytes())
+ .unwrap_or(0usize);
+ if offset != register.offset() {
+ assert!(register.offset() > offset);
+ members.push(Member::Reserved {
+ len: register.offset() - offset,
+ offset,
+ id: *reservation_id,
+ });
+ *reservation_id += 1;
+ }
+ members.push(register)
+ }
+
+ members
+}
+
+/// Sanity check of the block layout.
+///
+/// Panics if there's an issue.
+#[cfg(debug_assertions)]
+fn check_layout(members: &[Member], path: &str) {
+ // Expand registers, modeling each as a range.
+ type RegRange = std::ops::Range;
+
+ fn recurse(members: &[Member], registers: &mut Vec, global_offset: usize) {
+ for member in members {
+ match member {
+ Member::Register(reg) => {
+ for idx in 0..reg.len {
+ registers.push(RegRange {
+ start: global_offset + reg.offset + idx * reg.size.bytes(),
+ end: global_offset + reg.offset + (idx + 1) * reg.size.bytes(),
+ })
+ }
+ }
+ Member::Reserved { len, offset, .. } => registers.push(RegRange {
+ start: global_offset + *offset,
+ end: global_offset + *offset + *len,
+ }),
+ Member::Block {
+ block, len, offset, ..
+ } => {
+ for idx in 0..*len {
+ recurse(
+ &block.members,
+ registers,
+ global_offset + *offset + idx * block.size_bytes(),
+ );
+ }
+ }
+ }
+ }
+ }
+
+ let mut registers: Vec = Vec::new();
+ recurse(members, &mut registers, 0);
+
+ for (idx, reg) in registers.iter().enumerate() {
+ for (jdx, seg) in registers.iter().enumerate() {
+ if idx != jdx {
+ for r in reg.clone() {
+ if seg.contains(&r) {
+ panic!(
+ r#"{members:#?}
+There's an issue in the '{path}' block layout(s).
+This routine flattens registers from blocks, ensuring
+that there's no register overlap. If you're reading this
+panic message, it's because there's likely overlap.
+The questionable block is printed above this message.
+Evaluate its layout, and compare it with the SVD.
+"#
+ );
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'ir> Block<'ir> {
+ /// Allocate a new block.
+ ///
+ /// `path` is the IR path, and `block` is the associated block. A stride
+ /// of `None` prevents the routine from inserting any padding at the back
+ /// of the block to meet a cluster stride.
+ fn new(
+ path: &'ir str,
+ block: &'ir ir::Block,
+ ir: &'ir ir::IR,
+ stride: Option,
+ ) -> Self {
+ let module = path.split("::").last().unwrap().to_lowercase();
+ let mut reservation_id = 0usize;
+ let members = layout_members(&block.items, ir, &mut reservation_id);
+
+ let mut block = Self {
+ module,
+ doc: block.description.as_ref().map(String::as_ref),
+ members,
+ };
+
+ // Jam some padding in the back to meet the stride.
+ if let Some(stride) = stride {
+ let size = block.size_bytes();
+ assert!(
+ stride.get() >= size,
+ "Expecting that we need to insert padding or do nothing, but it seems we need to take it away...?"
+ );
+
+ let padding_bytes = stride.get() - size;
+ if padding_bytes > 0 {
+ block.members.push(Member::Reserved {
+ id: reservation_id,
+ len: padding_bytes,
+ offset: block
+ .members
+ .last()
+ .map(|mem| mem.offset() + mem.size_bytes())
+ .unwrap_or(0usize),
+ });
+ }
+ }
+
+ #[cfg(debug_assertions)]
+ check_layout(&block.members, path);
+
+ block
+ }
+}
+
+impl Block<'_> {
+ fn size_bytes(&self) -> usize {
+ self.members.iter().map(|mem| mem.size_bytes()).sum()
+ }
+ fn render_into(&self, tokens: &mut TokenStream) {
+ let members = self
+ .members
+ .iter()
+ .fold(TokenStream::new(), |mut tokens, member| {
+ member.render_into(&mut tokens);
+ tokens
+ });
+ let doc = util::doc(&self.doc.map(ToString::to_string));
+ tokens.extend(quote! {
+ #doc
+ #[repr(C)]
+ pub struct RegisterBlock {
+ #members
+ }
+ });
+ }
+ fn subblocks(&self) -> impl Iterator
- {
+ self.members.iter().filter_map(|mem| match mem {
+ Member::Block { block, .. } => Some(block),
+ _ => None,
+ })
+ }
+ fn registers(&self) -> impl Iterator
- {
+ self.members.iter().filter_map(|mem| match mem {
+ Member::Register(reg) => Some(reg),
+ _ => None,
+ })
+ }
+}
+
+/// A register (array).
+#[derive(Debug)]
+struct Register<'a> {
+ /// Register name.
+ ///
+ /// Expands to the struct member name.
+ name: String,
+ /// Size of the register.
+ size: Size,
+ /// How may registers?
+ ///
+ /// If `len` is one, the implementation emits a scalar (non-array)
+ /// type.
+ len: usize,
+ /// Optional documentation.
+ doc: Option<&'a str>,
+ /// Access.
+ access: ir::Access,
+ /// Key to the associated fieldset.
+ fieldset: Option<&'a str>,
+ /// Offset of this register within the block.
+ offset: usize,
+}
+
+/// A struct member.
+#[derive(Debug)]
+enum Member<'a> {
+ /// A useful register.
+ Register(Register<'a>),
+ /// A reserved register.
+ ///
+ /// Always a byte array with some `len` of bytes.
+ Reserved {
+ /// Arbitrary ID for the reserved register.
+ ///
+ /// Assigned when specifying the block. Only
+ /// used to generate a unique identifier for the
+ /// member name.
+ id: usize,
+ /// How many bytes to reserve.
+ len: usize,
+ /// Byte position in the block.
+ offset: usize,
+ },
+ /// A cluster, or another register subblock.
+ Block {
+ /// Register layout for the block.
+ block: Block<'a>,
+ /// How many subblocks? Always greater than zero.
+ ///
+ /// If one, the implementation emits a single struct
+ /// instead of an array.
+ len: usize,
+ /// The member name.
+ ///
+ /// Differs from the module name; this expands to
+ /// the struct member identifier.
+ name: String,
+ /// The member documentation.
+ ///
+ /// Differs from the type documentation.
+ doc: Option<&'a str>,
+ /// Offset of this block within the parent block.
+ offset: usize,
+ },
+}
+type Members<'a> = Vec>;
+
+impl<'ir> Member<'ir> {
+ /// Expands a block into one or more members.
+ ///
+ /// The returned collection is never empty.
+ fn expand(block_item: &'ir ir::BlockItem, ir: &'ir ir::IR) -> Vec {
+ let name = block_item.name.as_str();
+ let offset = block_item.byte_offset as usize;
+ let doc = block_item.description.as_deref();
+
+ match (&block_item.array, &block_item.inner) {
+ // Individual register.
+ (None, ir::BlockItemInner::Register(reg)) => {
+ vec![Self::Register(Register {
+ name: name.into(),
+ size: Size::from_bit_size(reg.bit_size),
+ len: 1,
+ doc,
+ access: reg.access.clone(),
+ fieldset: reg.fieldset.as_deref(),
+ offset,
+ })]
+ }
+ // Array of registers with contiguous allocation.
+ (
+ Some(ir::Array::Regular(ir::RegularArray { len, stride })),
+ ir::BlockItemInner::Register(reg),
+ ) if ir::BitSize(*stride * 8) == reg.bit_size => {
+ vec![Self::Register(Register {
+ name: name.into(),
+ size: Size::from_bit_size(reg.bit_size),
+ len: *len as usize,
+ doc,
+ access: reg.access.clone(),
+ fieldset: reg.fieldset.as_deref(),
+ offset,
+ })]
+ }
+ // "Array" of registers, but they're not contiguous. Describe them as
+ // individual registers.
+ (
+ Some(ir::Array::Regular(ir::RegularArray { len, stride })),
+ ir::BlockItemInner::Register(reg),
+ ) => (0..*len as usize)
+ .map(|idx| {
+ Self::Register(Register {
+ name: format!("{name}{idx}"),
+ size: Size::from_bit_size(reg.bit_size),
+ len: 1,
+ doc,
+ access: reg.access.clone(),
+ fieldset: reg.fieldset.as_deref(),
+ offset: offset + idx * *stride as usize,
+ })
+ })
+ .collect(),
+ // A cluster.
+ (
+ Some(ir::Array::Regular(ir::RegularArray { len, stride })),
+ ir::BlockItemInner::Block(ir::BlockItemBlock { block }),
+ ) => vec![Self::Block {
+ block: Block::new(
+ block,
+ ir.blocks.get(block).unwrap(),
+ ir,
+ NonZeroUsize::new(*stride as usize),
+ ),
+ len: *len as usize,
+ name: name.to_lowercase(),
+ doc,
+ offset,
+ }],
+ (Some(ir::Array::Cursed(_)), _) => {
+ panic!("Not yet handling a cursed array. I'd rather not spread the curse.");
+ }
+ (None, ir::BlockItemInner::Block(_)) => {
+ panic!("Unexpected cluster without a stride");
+ }
+ }
+ }
+}
+
+impl Member<'_> {
+ /// Returns the size of the member allocation.
+ fn size_bytes(&self) -> usize {
+ match self {
+ Self::Register(Register { size, len, .. }) => size.bytes() * len,
+ Self::Reserved { len, .. } => Size::U8.bytes() * len,
+ Self::Block { block, len, .. } => block.size_bytes() * *len,
+ }
+ }
+
+ fn offset(&self) -> usize {
+ match self {
+ Self::Register(Register { offset, .. }) => *offset,
+ Self::Block { offset, .. } => *offset,
+ Self::Reserved { offset, .. } => *offset,
+ }
+ }
+
+ /// Render this member into a token stream.
+ ///
+ /// This does not render a sub-block; it only inserts a member name and type
+ /// for the parent block (self).
+ fn render_into(&self, tokens: &mut TokenStream) {
+ match self {
+ Self::Reserved { id, len, .. } => {
+ assert!(*len > 0, "There's at least one reservation");
+ let ty = Size::U8.type_token();
+ let reservation = quote::format_ident!("_reserved{}", *id);
+ let len = util::hex(*len as u64);
+ tokens.extend(quote! {
+ #reservation: [#ty; #len],
+ });
+ }
+ Self::Register(Register {
+ name,
+ size,
+ len,
+ doc,
+ access,
+ ..
+ }) => {
+ assert!(*len > 0, "There's at least one register");
+ let register = match access {
+ ir::Access::Read => quote!(crate::RORegister),
+ ir::Access::Write => quote!(crate::WORegister),
+ ir::Access::ReadWrite => quote!(crate::RWRegister),
+ };
+ let reg_ty = size.type_token();
+ let ty = if *len == 1 {
+ quote!(#register<#reg_ty>)
+ } else {
+ quote!([#register<#reg_ty>; #len])
+ };
+ let span = Span::call_site();
+ let name = Ident::new(name, span);
+ let doc = util::doc(&doc.map(ToString::to_string));
+ tokens.extend(quote! {
+ #doc
+ pub #name: #ty,
+ })
+ }
+ Self::Block {
+ len,
+ name,
+ doc,
+ block,
+ ..
+ } => {
+ assert!(*len > 0, "There's at least one block");
+ let span = Span::call_site();
+ let scalar = Ident::new(&block.module, span);
+ let ty = if *len == 1 {
+ quote!(#scalar::RegisterBlock)
+ } else {
+ quote!([#scalar::RegisterBlock; #len])
+ };
+ let doc = util::doc(&doc.map(ToString::to_string));
+ let field = Ident::new(&name.to_uppercase(), span);
+ tokens.extend(quote! {
+ #doc
+ pub #field: #ty,
+ })
+ }
+ }
+ }
+}
+
+/// Renders a block module, recursing to render all sub-block modules.
+fn render_module(block: &Block, ir: &ir::IR) -> Result {
+ let mut tokens = TokenStream::new();
+ block.render_into(&mut tokens);
+ block.registers().try_for_each(|reg| -> Result<()> {
+ if let Some(fieldset) = reg
+ .fieldset
+ .as_ref()
+ .and_then(|fieldset| ir.fieldsets.get(*fieldset))
+ {
+ let span = Span::call_site();
+ let name = Ident::new(®.name, span);
+ let doc = util::doc(®.doc.map(ToString::to_string));
+ let field_modules = super::fieldset::render(ir, fieldset)?;
+ tokens.extend(quote! {
+ #doc
+ pub mod #name {
+ #field_modules
+ }
+ });
+ }
+ Ok(())
+ })?;
+
+ block.subblocks().try_for_each(|block| -> Result<()> {
+ let block_mod = render_module(block, ir)?;
+
+ let span = Span::call_site();
+ let mod_name = Ident::new(&block.module, span);
+ tokens.extend(quote! {
+ pub mod #mod_name {
+ #block_mod
+ }
+ });
+ Ok(())
+ })?;
+
+ Ok(tokens)
+}
+
+pub fn render(ir: &ir::IR, b: &ir::Block, path: &str) -> Result {
+ let block = Block::new(path, b, ir, None);
+ render_module(&block, ir)
+}
diff --git a/raltool/src/generate/device.rs b/raltool/src/generate/device.rs
new file mode 100644
index 000000000000..d8c5f1979b99
--- /dev/null
+++ b/raltool/src/generate/device.rs
@@ -0,0 +1,282 @@
+use std::collections::BTreeMap;
+
+use anyhow::Result;
+use proc_macro2::{Ident, Span, TokenStream};
+use quote::quote;
+
+use crate::ir::*;
+use crate::util::{self, ToSanitizedUpperCase};
+
+pub fn render(_opts: &super::Options, _ir: &IR, d: &Device) -> Result {
+ let num_endings = regex::Regex::new(r"(\d+)$").unwrap();
+ let mut out = TokenStream::new();
+ let span = Span::call_site();
+
+ let mut interrupts_sorted = d.interrupts.clone();
+ interrupts_sorted.sort_by_key(|i| i.value);
+
+ let mut interrupts = TokenStream::new();
+ let mut peripherals = TokenStream::new();
+ let mut vectors = TokenStream::new();
+ let mut names = vec![];
+
+ let mut pos = 0;
+ for i in &interrupts_sorted {
+ while pos < i.value {
+ vectors.extend(quote!(Vector { _reserved: 0 },));
+ pos += 1;
+ }
+ pos += 1;
+
+ let name_uc = Ident::new(&i.name.to_sanitized_upper_case(), span);
+ let description = format!(
+ "{} - {}",
+ i.value,
+ i.description
+ .as_ref()
+ .map(|s| util::respace(s))
+ .as_ref()
+ .map(|s| util::escape_brackets(s))
+ .unwrap_or_else(|| i.name.clone())
+ );
+
+ let value = util::unsuffixed(i.value as u64);
+
+ interrupts.extend(quote! {
+ #[doc = #description]
+ #name_uc = #value,
+ });
+ vectors.extend(quote!(Vector { _handler: #name_uc },));
+ names.push(name_uc);
+ }
+
+ let mut block_to_peripherals = BTreeMap::new();
+ for peripheral in &d.peripherals {
+ let block_name = peripheral
+ .block
+ .as_ref()
+ .expect("All peripherals must have a block");
+ let (block_path, _) = super::split_path(block_name);
+ let mod_name = block_path
+ .last()
+ .expect("There's a final component")
+ .to_string();
+ block_to_peripherals
+ .entry(mod_name)
+ .or_insert_with(|| (block_path, Vec::new()))
+ .1
+ .push(peripheral)
+ }
+
+ for (mod_name, (block_path, periphs)) in &block_to_peripherals {
+ let mut consts = TokenStream::new();
+ for peripheral in periphs.iter() {
+ let name = Ident::new(&peripheral.name, span);
+ let address = util::hex(peripheral.base_address as u64);
+ let doc = util::doc(&peripheral.description);
+
+ consts.extend(quote! {
+ #doc
+ pub const #name: *const RegisterBlock = #address as *const RegisterBlock;
+ });
+ }
+
+ let import = {
+ let block_path = block_path.join("/");
+ const BLOCK_MOD: &str = super::BLOCK_MOD;
+ let module_path = format!("{BLOCK_MOD}/{block_path}.rs");
+ quote! {
+ #[path = #module_path]
+ mod blocks;
+ pub use blocks::*;
+ }
+ };
+
+ let number_fn: TokenStream;
+ let instances = if periphs.len() > 1
+ && periphs
+ .iter()
+ .all(|periph| num_endings.is_match(&periph.name))
+ {
+ let mut instances = TokenStream::new();
+ let mut const_to_num: Vec = Vec::new();
+ for peripheral in periphs.iter() {
+ let name = Ident::new(&peripheral.name, span);
+ let num = num_endings.captures(&peripheral.name).unwrap();
+ let num = util::unsuffixed(
+ num.get(1)
+ .and_then(|num| str::parse(num.as_str()).ok())
+ .unwrap(),
+ );
+ const_to_num.push(quote! { (#name, #num), });
+ instances.extend(quote! {
+ pub type #name = Instance<#num>;
+ impl crate::private::Sealed for #name {}
+ impl crate::Valid for #name {}
+
+ impl #name {
+ /// Acquire a vaild, but possibly aliased, instance.
+ ///
+ /// # Safety
+ ///
+ /// See [the struct-level safety documentation](crate::Instance).
+ #[inline]
+ pub const unsafe fn instance() -> Self {
+ Instance::new(#name)
+ }
+ }
+ });
+ }
+ number_fn = quote! {
+ /// Returns the instance number `N` for a peripheral instance.
+ pub fn number(rb: *const RegisterBlock) -> Option {
+ [#(#const_to_num)*].into_iter()
+ .find(|(ptr, _)| core::ptr::eq(rb, *ptr))
+ .map(|(_, inst)| inst)
+ }
+ };
+ instances
+ } else {
+ assert!(
+ periphs.len() == 1,
+ r#"{periphs:#?}
+Cannot generate this constified API when there's multiple, un-numbered peripherals.
+The implementation doesn't automagically handle this right now. Until this is implemented,
+you should use transforms to rename peripherals, putting numbers at the end of the peripheral
+name."#
+ );
+ let peripheral = periphs.first().unwrap();
+ let name = Ident::new(&peripheral.name, span);
+ number_fn = quote! {
+ /// Returns the instance number `N` for a peripheral instance.
+ pub fn number(rb: *const RegisterBlock) -> Option {
+ core::ptr::eq(rb, #name).then_some(0)
+ }
+ };
+ quote! {
+ pub type #name = Instance<{crate::SOLE_INSTANCE}>;
+ impl crate::private::Sealed for #name {}
+ impl crate::Valid for #name {}
+ impl #name {
+ /// Acquire a vaild, but possibly aliased, instance.
+ ///
+ /// # Safety
+ ///
+ /// See [the struct-level safety documentation](crate::Instance).
+ #[inline]
+ pub const unsafe fn instance() -> Self {
+ Instance::new(#name)
+ }
+ }
+ }
+ };
+
+ let mod_name = Ident::new(mod_name, span);
+ peripherals.extend(quote! {
+ #[path = "."]
+ pub mod #mod_name {
+ #consts
+ #import
+
+ pub type Instance = crate::Instance;
+ #instances
+ #number_fn
+ }
+ })
+ }
+
+ let n = util::unsuffixed(pos as u64);
+ out.extend(quote!(
+ #[derive(Copy, Clone, Debug, PartialEq, Eq)]
+ pub enum Interrupt {
+ #interrupts
+ }
+ pub type interrupt = Interrupt;
+
+ unsafe impl cortex_m::interrupt::InterruptNumber for Interrupt {
+ #[inline(always)]
+ fn number(self) -> u16 {
+ self as u16
+ }
+ }
+
+ #[cfg(feature = "rt")]
+ mod _vectors {
+ extern "C" {
+ #(fn #names();)*
+ }
+
+ pub union Vector {
+ _handler: unsafe extern "C" fn(),
+ _reserved: u32,
+ }
+
+ #[cfg_attr(target_os = "none", link_section = ".vector_table.interrupts")]
+ #[no_mangle]
+ pub static __INTERRUPTS: [Vector; #n] = [
+ #vectors
+ ];
+ }
+
+ #peripherals
+ ));
+
+ let cpu = d.cpu.as_ref().expect("There must be a CPU.");
+ let bits = util::unsuffixed(u64::from(cpu.nvic_priority_bits));
+
+ out.extend(quote! {
+ ///Number available in the NVIC for configuring priority
+ pub const NVIC_PRIO_BITS: u8 = #bits;
+ });
+
+ //
+ // Emit RTIC peripheral struct.
+ //
+ let mut member_decls = TokenStream::new();
+ let mut member_inits = TokenStream::new();
+ for (mod_name, (_, peripherals)) in &block_to_peripherals {
+ for peripheral in peripherals {
+ let name = Ident::new(&peripheral.name, span);
+ let mod_name = Ident::new(mod_name, span);
+ member_decls.extend(quote! {
+ pub #name: #mod_name::#name,
+ });
+ member_inits.extend(quote! {
+ #name: #mod_name::#name::instance(),
+ });
+ }
+ }
+ out.extend(quote! {
+ /// Instances for all of this device's peripherals.
+ ///
+ /// This is exposed for the RTIC framework. RTIC knows how
+ /// to safely acquire all instances so that you don't have
+ /// to use `unsafe`. See the RTIC documentation for more
+ /// information.
+ pub struct Peripherals {
+ #member_decls
+ }
+ impl Peripherals {
+ /// "Steal" all instances.
+ ///
+ /// The name `steal()` is to meet RTIC requirements. Internally,
+ /// this constructor calls `instance()` on each member.
+ ///
+ /// You shouldn't call this; let RTIC call this function.
+ ///
+ /// # Safety
+ ///
+ /// Since this calls `instance()` to initialize each of its members,
+ /// the `instance()` safety contract applies. See [the `Instance` safety
+ /// documentation](crate::Instance) for more information.
+ #[doc(hidden)] // This is only for RTIC.
+ pub const unsafe fn steal() -> Self {
+ Self {
+ #member_inits
+ }
+ }
+ }
+ });
+
+ Ok(out)
+}
diff --git a/raltool/src/generate/fieldset.rs b/raltool/src/generate/fieldset.rs
new file mode 100644
index 000000000000..6d75788f5520
--- /dev/null
+++ b/raltool/src/generate/fieldset.rs
@@ -0,0 +1,69 @@
+use anyhow::Result;
+use proc_macro2::TokenStream;
+use proc_macro2::{Ident, Span};
+use quote::quote;
+
+use crate::ir::*;
+use crate::util;
+
+pub fn render(ir: &IR, fs: &FieldSet) -> Result {
+ let span = Span::call_site();
+ let mut items = TokenStream::new();
+
+ let ty = match fs.bit_size {
+ BitSize(1..=8) => quote!(u8),
+ BitSize(9..=16) => quote!(u16),
+ BitSize(17..=32) => quote!(u32),
+ BitSize(33..=64) => quote!(u64),
+ BitSize(invalid) => anyhow::bail!("Invalid bit_size {invalid}"),
+ };
+
+ for f in &fs.fields {
+ anyhow::ensure!(
+ f.array.is_none(),
+ "Field {} is an array, and that's not supported",
+ f.name
+ );
+
+ let name = Ident::new(&f.name, span);
+ let bit_offset = proc_macro2::Literal::u32_unsuffixed(f.bit_offset);
+ let mask = util::hex(1u64.wrapping_shl(f.bit_size.0).wrapping_sub(1));
+ let doc = util::doc(&f.description);
+
+ let enum_tokenize = |enm: &Option| -> TokenStream {
+ enm.as_ref()
+ .and_then(|path| ir.enums.get(path))
+ .map(|enm| {
+ let mut items = TokenStream::new();
+ for e in &enm.variants {
+ let name = Ident::new(&e.name, span);
+ let value = util::hex(e.value);
+ let doc = util::doc(&e.description);
+ items.extend(quote!(
+ #doc
+ pub const #name: #ty = #value;
+ ));
+ }
+ items
+ })
+ .unwrap_or_else(TokenStream::new)
+ };
+
+ let reads = enum_tokenize(&f.enum_read);
+ let writes = enum_tokenize(&f.enum_write);
+ let reads_writes = enum_tokenize(&f.enum_readwrite);
+
+ items.extend(quote! {
+ #doc
+ pub mod #name {
+ pub const offset: #ty = #bit_offset;
+ pub const mask: #ty = #mask << offset;
+ pub mod R { #reads }
+ pub mod W { #writes }
+ pub mod RW { #reads_writes }
+ }
+ });
+ }
+
+ Ok(quote! { #items })
+}
diff --git a/raltool/src/generate/mod.rs b/raltool/src/generate/mod.rs
new file mode 100644
index 000000000000..c029c8e49ff2
--- /dev/null
+++ b/raltool/src/generate/mod.rs
@@ -0,0 +1,306 @@
+mod block;
+mod device;
+mod fieldset;
+
+use anyhow::Result;
+use proc_macro2::{Ident, Span, TokenStream};
+use quote::quote;
+use std::collections::{BTreeMap, HashSet};
+use std::io::Write;
+use std::path::{Path, PathBuf};
+use std::{fs, io};
+
+use crate::ir::*;
+
+struct Module {
+ items: TokenStream,
+ children: BTreeMap,
+ public: bool,
+ fs_only: bool,
+ reexport: bool,
+ conditional_feature: Option,
+}
+
+impl Module {
+ fn new() -> Self {
+ Self {
+ // Default mod contents
+ items: quote!(),
+ children: BTreeMap::new(),
+ public: true,
+ fs_only: false,
+ reexport: false,
+ conditional_feature: None,
+ }
+ }
+
+ fn mark_private(&mut self) -> &mut Module {
+ self.public = false;
+ self
+ }
+
+ fn mark_fs_only(&mut self) -> &mut Module {
+ self.fs_only = true;
+ self
+ }
+
+ fn mark_reexport(&mut self) -> &mut Module {
+ self.reexport = true;
+ self
+ }
+
+ fn conditional_on(&mut self, feature: &str) -> &mut Module {
+ self.conditional_feature = Some(feature.into());
+ self
+ }
+
+ fn get_by_path(&mut self, path: &[&str]) -> &mut Module {
+ if path.is_empty() {
+ return self;
+ }
+
+ self.children
+ .entry(path[0].to_owned())
+ .or_insert_with(Module::new)
+ .get_by_path(&path[1..])
+ }
+
+ fn render(self, path: &Path) -> Result<()> {
+ let span = Span::call_site();
+
+ let mut res = TokenStream::new();
+ res.extend(self.items);
+
+ for (name, module) in self.children.into_iter() {
+ let name = Ident::new(&name, span);
+
+ let subpath = if let Some(parent) = path.parent() {
+ if path.file_name() == Some(std::ffi::OsStr::new("lib.rs")) {
+ parent.join(format! {"{name}.rs"})
+ } else {
+ parent
+ .join(path.file_stem().unwrap())
+ .join(format!("{name}.rs"))
+ }
+ } else {
+ PathBuf::from(format!("{name}.rs"))
+ };
+
+ if !module.fs_only {
+ let privacy = if module.public { quote!(pub) } else { quote!() };
+ let conditional = if let Some(feature) = &module.conditional_feature {
+ quote!(#[cfg(feature = #feature)])
+ } else {
+ quote!()
+ };
+ let reexport = if module.reexport {
+ quote!(pub use #name::*;)
+ } else {
+ quote!()
+ };
+ module.render(&subpath)?;
+ let file_path = format!("{name}.rs");
+ res.extend(quote! {
+ #conditional
+ #[path = #file_path]
+ #privacy mod #name;
+ #conditional
+ #reexport
+ });
+ } else {
+ module.render(&subpath)?;
+ }
+ }
+
+ if let Some(parent) = path.parent() {
+ fs::create_dir_all(parent)?;
+ }
+ if !self.fs_only {
+ fs::write(path, res.to_string().as_bytes())?;
+ }
+ Ok(())
+ }
+}
+
+pub enum CommonModule {
+ Builtin,
+ External(TokenStream),
+}
+
+pub struct Options {
+ pub module_root: PathBuf,
+ pub weak_syms: bool,
+}
+
+const BLOCK_MOD: &str = "blocks";
+
+pub fn render(ir: &IR, opts: &Options) -> Result<()> {
+ let mut root = Module::new();
+ root.items = TokenStream::new(); // Remove default contents
+ root.get_by_path(&[BLOCK_MOD]).mark_fs_only();
+
+ root.items.extend(quote!(
+ #![no_std]
+ #![allow(non_camel_case_types, non_snake_case, non_upper_case_globals, clippy::self_named_constructors, clippy::module_inception)]
+
+ pub use ral_registers::{RWRegister, RORegister, WORegister, read_reg, write_reg, modify_reg};
+
+ /// An owned peripheral of type `T`, instance `N`.
+ ///
+ /// Fabricating an `Instance` is always `unsafe`. An owner of an
+ /// `Instance` may assume that
+ ///
+ /// - the underlying pointer points to a static register block of type `T`.
+ /// - the instance number `N` properly describes the peripheral instance.
+ /// - they own _all_ registers pointed at by `T`.
+ ///
+ /// Owners use this guarantee to safely access the peripheral registers.
+ /// However, nothing guarantees any of these except for your diligence.
+ ///
+ /// Constructing an `Instance` is zero cost. Additionally, `Instance` is transparent
+ /// and amenable to null-pointer optimizations.
+ ///
+ /// See the package-level documentation for more information on fabricating
+ /// instances.
+ ///
+ /// # Safety of `new()`.
+ ///
+ /// By calling `new()`, you claim
+ ///
+ /// 1. `ptr` points to static memory that can be described by a type `T`.
+ /// 2. The instance number `N` correctly describes `ptr`.
+ /// 3. You are becoming the sole owner of this instance.
+ ///
+ /// # Safety of `instance()`
+ ///
+ /// The various `instance()` methods handle safety concerns 1 and 2 from `new()`.
+ /// By their construction, each `instance()` implementation provides a pointer to valid
+ /// peripheral memory, and associates the correct `N` with that pointer. Therefore,
+ /// you're only responsible for ensuring safety concern 3 from `new()`.
+ #[repr(transparent)]
+ pub struct Instance {
+ ptr: core::ptr::NonNull,
+ }
+
+ impl core::ops::Deref for Instance {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ // Safety: User provided a pointer that points to static MMIO.
+ // This implies non-null, initialized, aligned, and dereferenceable.
+ unsafe { self.ptr.as_ref() }
+ }
+ }
+
+ impl Instance {
+ /// Create an arbitrary `Instance` from a pointer to `T`.
+ ///
+ /// # Safety
+ ///
+ /// See [the struct docs](Instance) for the safety contract.
+ #[inline]
+ pub const unsafe fn new(ptr: *const T) -> Self {
+ // Casting *const _ to *mut _ is OK. The mutable pointer never
+ // escapes Instance.
+ Self { ptr: core::ptr::NonNull::new_unchecked(ptr as *mut _) }
+ }
+ }
+
+ unsafe impl Send for Instance {}
+
+ /// The instance number for a peripheral singleton.
+ ///
+ /// If your peripheral only has one instance, it's given
+ /// this number. The CCM peripheral is a good example of
+ /// a peripheral that uses this constant.
+ ///
+ /// See the package documentation for more information on
+ /// this constant.
+ pub const SOLE_INSTANCE: u8 = 0u8;
+ mod private {
+ pub trait Sealed {}
+ }
+
+ /// Vouches for an `Instance`'s validity.
+ ///
+ /// This trait is implemented for all `Instance` supported
+ /// by your chip. Note that the implementation may change when
+ /// selecting new chip features. For instance, i.MX RT 1011 chips
+ /// do not have LPUART 4 through 8. So, `Valid` is _not_ implemented
+ /// for `lpuart::Instance<4>` through `lpuart::Instance<8>`.
+ ///
+ /// See the package documentation for more information on how
+ /// to use this trait in your APIs.
+ pub trait Valid : private::Sealed {}
+ ));
+
+ let mut root_blocks = HashSet::new();
+ for (p, d) in ir.devices.iter() {
+ root_blocks.extend(
+ d.peripherals
+ .iter()
+ .filter_map(|peripheral| peripheral.block.as_ref()),
+ );
+ let mods = p.split("::").collect::>();
+ root.get_by_path(&mods)
+ .items
+ .extend(device::render(opts, ir, d)?);
+ }
+
+ for root_block in root_blocks {
+ let b = ir.blocks.get(root_block).unwrap();
+ let (mods, _) = split_path(root_block);
+ root.get_by_path(&[BLOCK_MOD])
+ .get_by_path(&mods)
+ .items
+ .extend(block::render(ir, b, root_block)?);
+ }
+
+ for (dev_mod_name, dev_mod) in root.children.iter_mut().filter(|(k, _)| *k != BLOCK_MOD) {
+ dev_mod
+ .mark_private()
+ .conditional_on(dev_mod_name)
+ .mark_reexport();
+ }
+ for block_dev_mod in root.get_by_path(&[BLOCK_MOD]).children.values_mut() {
+ block_dev_mod.mark_fs_only();
+ }
+
+ root.render(&opts.module_root)?;
+ weak_syms(opts, ir)?;
+ Ok(())
+}
+
+fn split_path(s: &str) -> (Vec<&str>, &str) {
+ let mut v: Vec<&str> = s.split("::").collect();
+ let n = v.pop().unwrap();
+ (v, n)
+}
+
+/// Generate a linker script of weak symbols for interrupt handlers.
+fn weak_syms(opts: &Options, ir: &IR) -> Result<()> {
+ if !opts.weak_syms {
+ return Ok(());
+ }
+
+ for (name, device) in &ir.devices {
+ if name.is_empty() {
+ continue;
+ }
+
+ let mut interrupts = device.interrupts.clone();
+ interrupts.sort_by_key(|intr| intr.value);
+
+ let mut path = opts.module_root.parent().unwrap().join(name);
+ path.set_extension("x");
+
+ let file = fs::File::create(path)?;
+ let mut file = io::BufWriter::new(file);
+
+ for intr in interrupts {
+ writeln!(file, "PROVIDE({} = DefaultHandler);", intr.name)?;
+ }
+ }
+
+ Ok(())
+}
diff --git a/raltool/src/ir.rs b/raltool/src/ir.rs
new file mode 100644
index 000000000000..4f86dbce179b
--- /dev/null
+++ b/raltool/src/ir.rs
@@ -0,0 +1,318 @@
+use de::MapAccess;
+use serde::{de, de::Visitor, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
+use std::collections::{BTreeMap, HashMap};
+use std::fmt;
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct IR {
+ pub devices: HashMap,
+ pub blocks: HashMap,
+ pub fieldsets: HashMap,
+ pub enums: HashMap,
+}
+
+impl IR {
+ pub fn new() -> Self {
+ Self {
+ devices: HashMap::new(),
+ blocks: HashMap::new(),
+ fieldsets: HashMap::new(),
+ enums: HashMap::new(),
+ }
+ }
+
+ pub fn merge(&mut self, other: IR) {
+ self.devices.extend(other.devices);
+ self.blocks.extend(other.blocks);
+ self.fieldsets.extend(other.fieldsets);
+ self.enums.extend(other.enums);
+ }
+}
+
+impl Default for IR {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Cpu {
+ pub nvic_priority_bits: u32,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Device {
+ pub peripherals: Vec,
+ pub interrupts: Vec,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub cpu: Option,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Peripheral {
+ pub name: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+ pub base_address: u64,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub array: Option,
+
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub block: Option,
+
+ #[serde(
+ default,
+ skip_serializing_if = "HashMap::is_empty",
+ serialize_with = "ordered_map"
+ )]
+ pub interrupts: HashMap,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Interrupt {
+ pub name: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+ pub value: u32,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Block {
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub extends: Option,
+
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+ pub items: Vec,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct BlockItem {
+ pub name: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub array: Option,
+ pub byte_offset: u32,
+
+ #[serde(flatten)]
+ pub inner: BlockItemInner,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum BlockItemInner {
+ Block(BlockItemBlock),
+ Register(Register),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum Array {
+ Regular(RegularArray),
+ Cursed(CursedArray),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct RegularArray {
+ pub len: u32,
+ pub stride: u32,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct CursedArray {
+ pub offsets: Vec,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct BitSize(pub u32);
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Register {
+ #[serde(default = "default_readwrite", skip_serializing_if = "is_readwrite")]
+ pub access: Access,
+ pub bit_size: BitSize,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub fieldset: Option,
+}
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct BlockItemBlock {
+ pub block: String,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)]
+pub enum Access {
+ ReadWrite,
+ Read,
+ Write,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct FieldSet {
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub extends: Option,
+
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+ pub bit_size: BitSize,
+ pub fields: Vec,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Field {
+ pub name: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+
+ pub bit_offset: u32,
+ pub bit_size: BitSize,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub array: Option,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub enum_read: Option,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub enum_write: Option,
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "enum")]
+ pub enum_readwrite: Option,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Enum {
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+ pub bit_size: BitSize,
+ pub variants: Vec,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub struct EnumVariant {
+ pub name: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub description: Option,
+ pub value: u64,
+}
+
+fn default_readwrite() -> Access {
+ Access::ReadWrite
+}
+fn is_readwrite(x: &Access) -> bool {
+ *x == Access::ReadWrite
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+enum Kind {
+ Block,
+ Fieldset,
+ Enum,
+}
+
+impl Serialize for IR {
+ fn serialize
(&self, serializer: S) -> Result
+ where
+ S: Serializer,
+ {
+ // Sort by block/fieldset/enum, then alphabetically.
+ // This ensures the output's order is deterministic.
+ // - Easier diffing between yamls
+ // - No spurious changes when roundtripping
+ let mut entries = Vec::new();
+ for name in self.blocks.keys() {
+ entries.push((Kind::Block, name));
+ }
+ for name in self.fieldsets.keys() {
+ entries.push((Kind::Fieldset, name));
+ }
+ for name in self.enums.keys() {
+ entries.push((Kind::Enum, name));
+ }
+
+ entries.sort();
+
+ let mut map = serializer.serialize_map(Some(entries.len()))?;
+ for (kind, name) in entries {
+ match kind {
+ Kind::Block => {
+ map.serialize_entry(
+ &format!("block/{}", name),
+ self.blocks.get(name).unwrap(),
+ )?;
+ }
+ Kind::Fieldset => {
+ map.serialize_entry(
+ &format!("fieldset/{}", name),
+ self.fieldsets.get(name).unwrap(),
+ )?;
+ }
+ Kind::Enum => {
+ map.serialize_entry(&format!("enum/{}", name), self.enums.get(name).unwrap())?;
+ }
+ }
+ }
+ map.end()
+ }
+}
+
+struct IRVisitor;
+
+impl<'de> Visitor<'de> for IRVisitor {
+ type Value = IR;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ formatter.write_str("an IR")
+ }
+
+ fn visit_map(self, mut access: M) -> Result
+ where
+ M: MapAccess<'de>,
+ {
+ let mut ir = IR::new();
+
+ // While there are entries remaining in the input, add them
+ // into our map.
+ while let Some(key) = access.next_key()? {
+ let key: String = key;
+ let (kind, name) = key.split_once('/').ok_or_else(|| de::Error::custom("item names must be in form `kind/name`, where kind is `block`, `fieldset` or `enum`"))?;
+ match kind {
+ "block" => {
+ let val: Block = access.next_value()?;
+ if ir.blocks.insert(name.to_string(), val).is_some() {
+ return Err(de::Error::custom(format!("Duplicate item {:?}", key)));
+ }
+ }
+ "fieldset" => {
+ let val: FieldSet = access.next_value()?;
+ if ir.fieldsets.insert(name.to_string(), val).is_some() {
+ return Err(de::Error::custom(format!("Duplicate item {:?}", key)));
+ }
+ }
+ "enum" => {
+ let val: Enum = access.next_value()?;
+ if ir.enums.insert(name.to_string(), val).is_some() {
+ return Err(de::Error::custom(format!("Duplicate item {:?}", key)));
+ }
+ }
+ _ => return Err(de::Error::custom(format!("Unknown kind {:?}", kind))),
+ }
+ }
+
+ Ok(ir)
+ }
+}
+
+impl<'de> Deserialize<'de> for IR {
+ fn deserialize(deserializer: D) -> Result
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_map(IRVisitor)
+ }
+}
+
+fn ordered_map(value: &HashMap, serializer: S) -> Result
+where
+ S: Serializer,
+{
+ let ordered: BTreeMap<_, _> = value.iter().collect();
+ ordered.serialize(serializer)
+}
diff --git a/raltool/src/lib.rs b/raltool/src/lib.rs
new file mode 100755
index 000000000000..40bf901074a4
--- /dev/null
+++ b/raltool/src/lib.rs
@@ -0,0 +1,6 @@
+pub mod combine;
+pub mod generate;
+pub mod ir;
+pub mod svd2ir;
+pub mod transform;
+pub mod util;
diff --git a/raltool/src/main.rs b/raltool/src/main.rs
new file mode 100755
index 000000000000..d2a5839b9e0b
--- /dev/null
+++ b/raltool/src/main.rs
@@ -0,0 +1,389 @@
+#![recursion_limit = "128"]
+
+use anyhow::{bail, Context, Result};
+use clap::Parser;
+use log::*;
+use raltool::{combine, generate, svd2ir};
+use rayon::prelude::*;
+use regex::Regex;
+use std::fs;
+use std::io::Read;
+use std::path::PathBuf;
+use std::{fs::File, io::stdout};
+
+use raltool::ir::IR;
+
+#[derive(Parser)]
+struct Opts {
+ #[clap(subcommand)]
+ subcommand: Subcommand,
+}
+
+#[derive(Parser)]
+enum Subcommand {
+ Generate(Generate),
+ ExtractPeripheral(ExtractPeripheral),
+ Transform(Transform),
+ Fmt(Fmt),
+ Check(Check),
+ GenBlock(GenBlock),
+}
+
+/// Extract peripheral from SVD to YAML
+#[derive(Parser)]
+struct ExtractPeripheral {
+ /// SVD file path
+ #[clap(long)]
+ svd: String,
+ /// Peripheral from the SVD
+ #[clap(long)]
+ peripheral: String,
+ /// Transforms file path
+ #[clap(long)]
+ transform: Option,
+}
+
+/// Apply transform to YAML
+#[derive(Parser)]
+struct Transform {
+ /// Input YAML path
+ #[clap(short, long)]
+ input: String,
+ /// Output YAML path
+ #[clap(short, long)]
+ output: String,
+ /// Transforms file path
+ #[clap(short, long)]
+ transform: String,
+}
+
+/// Generate a PAC directly from a SVD
+#[derive(Parser)]
+struct Generate {
+ /// SVD file path(s)
+ svds: Vec,
+ /// Transforms file path
+ #[clap(long)]
+ transform: Option,
+ /// Directory for the output.
+ #[clap(long, default_value_t = String::from("src"))]
+ output_directory: String,
+}
+
+/// Reformat a YAML
+#[derive(Parser)]
+struct Fmt {
+ /// Peripheral file path
+ files: Vec,
+ /// Error if incorrectly formatted, instead of fixing.
+ #[clap(long)]
+ check: bool,
+}
+
+/// Check a YAML for errors.
+#[derive(Parser)]
+struct Check {
+ /// Peripheral file path
+ files: Vec,
+}
+
+/// Generate Rust code from a YAML register block
+#[derive(Parser)]
+struct GenBlock {
+ /// Input YAML path
+ #[clap(short, long)]
+ input: String,
+ /// Output YAML path
+ #[clap(short, long)]
+ output: String,
+}
+
+fn main() -> Result<()> {
+ env_logger::init();
+
+ let opts: Opts = Opts::parse();
+
+ match opts.subcommand {
+ Subcommand::ExtractPeripheral(x) => extract_peripheral(x),
+ Subcommand::Generate(x) => gen(x),
+ Subcommand::Transform(x) => transform(x),
+ Subcommand::Fmt(x) => fmt(x),
+ Subcommand::Check(x) => check(x),
+ Subcommand::GenBlock(x) => gen_block(x),
+ }
+}
+
+fn load_svd(path: &str) -> Result {
+ let xml = &mut String::new();
+ File::open(path)
+ .context("Cannot open the SVD file")?
+ .read_to_string(xml)
+ .context("Cannot read the SVD file")?;
+
+ let device = svd_parser::parse(xml)?;
+ Ok(device)
+}
+
+fn load_config(path: &str) -> Result {
+ let config = fs::read(path).context("Cannot read the config file")?;
+ serde_yaml::from_slice(&config).context("cannot deserialize config")
+}
+
+fn extract_peripheral(args: ExtractPeripheral) -> Result<()> {
+ let config = match args.transform {
+ Some(s) => load_config(&s)?,
+ None => Config::default(),
+ };
+
+ let svd = load_svd(&args.svd)?;
+ let mut ir = IR::new();
+
+ let peri = args.peripheral;
+ let mut p = svd
+ .peripherals
+ .iter()
+ .find(|p| p.name == peri)
+ .expect("peripheral not found");
+
+ if let Some(f) = &p.derived_from {
+ p = svd
+ .peripherals
+ .iter()
+ .find(|p| p.name == *f)
+ .expect("derivedFrom peripheral not found");
+ }
+
+ raltool::svd2ir::convert_peripheral(&mut ir, p)?;
+
+ // Fix weird newline spam in descriptions.
+ let re = Regex::new("[ \n]+").unwrap();
+ raltool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?;
+
+ for t in &config.transforms {
+ info!("running: {:?}", t);
+ t.run(&mut ir)?;
+ }
+
+ // Ensure consistent sort order in the YAML.
+ raltool::transform::sort::Sort {}.run(&mut ir).unwrap();
+
+ serde_yaml::to_writer(stdout(), &ir).unwrap();
+ Ok(())
+}
+
+fn gen(mut args: Generate) -> Result<()> {
+ let config = match args.transform {
+ Some(s) => load_config(&s)?,
+ None => Config::default(),
+ };
+
+ // Fix weird newline spam in descriptions.
+ let re = Regex::new("[ \n]+").unwrap();
+
+ args.svds.sort_unstable();
+ let irs: Vec = args
+ .svds
+ .par_iter()
+ .map(|svd| -> Result {
+ let svd = load_svd(svd)?;
+ let mut ir = svd2ir::convert_svd(&svd)?;
+
+ raltool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?;
+
+ for t in &config.transforms {
+ t.run(&mut ir)?;
+ }
+ Ok(ir)
+ })
+ .collect::>()?;
+
+ let generate_opts = generate::Options {
+ module_root: PathBuf::from(args.output_directory).join("lib.rs"),
+ weak_syms: true,
+ };
+
+ let combination = combine::combine(&irs);
+ generate::render(&combination, &generate_opts)?;
+
+ Ok(())
+}
+
+fn transform(args: Transform) -> Result<()> {
+ let data = fs::read(&args.input)?;
+ let mut ir: IR = serde_yaml::from_slice(&data)?;
+ let config = load_config(&args.transform)?;
+ for t in &config.transforms {
+ info!("running: {:?}", t);
+ t.run(&mut ir)?;
+ }
+ let data = serde_yaml::to_vec(&ir)?;
+ fs::write(&args.output, data)?;
+
+ Ok(())
+}
+
+fn fmt(args: Fmt) -> Result<()> {
+ for file in args.files {
+ let got_data = fs::read(&file)?;
+ let mut ir: IR = serde_yaml::from_slice(&got_data)?;
+
+ // Ensure consistent sort order in the YAML.
+ raltool::transform::sort::Sort {}.run(&mut ir).unwrap();
+
+ // Trim all descriptions
+
+ let cleanup = |s: &mut Option| {
+ if let Some(s) = s.as_mut() {
+ *s = s.trim().to_string()
+ }
+ };
+
+ for b in ir.blocks.values_mut() {
+ cleanup(&mut b.description);
+ for i in &mut b.items {
+ cleanup(&mut i.description);
+ }
+ }
+
+ for b in ir.fieldsets.values_mut() {
+ cleanup(&mut b.description);
+ for i in &mut b.fields {
+ cleanup(&mut i.description);
+ }
+ }
+
+ for b in ir.enums.values_mut() {
+ cleanup(&mut b.description);
+ for i in &mut b.variants {
+ cleanup(&mut i.description);
+ }
+ }
+
+ let want_data = serde_yaml::to_vec(&ir)?;
+
+ if got_data != want_data {
+ if args.check {
+ bail!("File {} is not correctly formatted", &file);
+ } else {
+ fs::write(&file, want_data)?;
+ }
+ }
+ }
+ Ok(())
+}
+
+fn check(args: Check) -> Result<()> {
+ for file in args.files {
+ let got_data = fs::read(&file)?;
+ let ir: IR = serde_yaml::from_slice(&got_data)?;
+
+ let mut printed = false;
+ let mut error = move |s: String| {
+ if !printed {
+ printed = true;
+ println!("{}:", &file);
+ }
+ println!(" {}", s);
+ };
+
+ for (name, b) in &ir.blocks {
+ for (i1, i2) in Pairs::new(b.items.iter()) {
+ if i1.byte_offset == i2.byte_offset {
+ error(format!(
+ "block {}: registers overlap: {} {}",
+ name, i1.name, i2.name
+ ));
+ }
+ }
+ }
+
+ for (name, e) in &ir.enums {
+ for (i1, i2) in Pairs::new(e.variants.iter()) {
+ if i1.value == i2.value {
+ error(format!(
+ "enum {}: variants with same value: {} {}",
+ name, i1.name, i2.name
+ ));
+ }
+ }
+ }
+
+ for (name, f) in &ir.fieldsets {
+ for (i1, i2) in Pairs::new(f.fields.iter()) {
+ if i2.bit_offset + i2.bit_size.0 > i1.bit_offset
+ && i1.bit_offset + i1.bit_size.0 > i2.bit_offset
+ {
+ error(format!(
+ "fieldset {}: fields overlap: {} {}",
+ name, i1.name, i2.name
+ ));
+ }
+ }
+ }
+ }
+ Ok(())
+}
+
+fn gen_block(args: GenBlock) -> Result<()> {
+ let data = fs::read(&args.input)?;
+ let mut ir: IR = serde_yaml::from_slice(&data)?;
+
+ raltool::transform::Sanitize {}.run(&mut ir).unwrap();
+
+ // Ensure consistent sort order in the YAML.
+ raltool::transform::sort::Sort {}.run(&mut ir).unwrap();
+
+ let generate_opts = generate::Options {
+ module_root: std::path::PathBuf::from(&args.output),
+ weak_syms: false,
+ };
+ generate::render(&ir, &generate_opts)?;
+ Ok(())
+}
+#[derive(serde::Serialize, serde::Deserialize, Default)]
+struct Config {
+ transforms: Vec,
+}
+
+// ==============
+
+struct Pairs {
+ head: Option,
+ tail: U,
+ next: U,
+}
+
+impl Pairs {
+ fn new(mut iter: U) -> Self {
+ let head = iter.next();
+ Pairs {
+ head,
+ tail: iter.clone(),
+ next: iter,
+ }
+ }
+}
+
+impl Iterator for Pairs
+where
+ U::Item: Clone,
+{
+ type Item = (U::Item, U::Item);
+
+ fn next(&mut self) -> Option {
+ let a = self.head.as_ref()?.clone();
+
+ if let Some(b) = self.tail.next() {
+ return Some((a, b));
+ }
+
+ match self.next.next() {
+ Some(new_head) => {
+ self.head = Some(new_head);
+ self.tail = self.next.clone();
+ self.next()
+ }
+ None => None,
+ }
+ }
+}
diff --git a/raltool/src/svd2ir.rs b/raltool/src/svd2ir.rs
new file mode 100644
index 000000000000..993df3df8607
--- /dev/null
+++ b/raltool/src/svd2ir.rs
@@ -0,0 +1,396 @@
+use log::*;
+use std::collections::HashMap;
+use svd_parser as svd;
+
+use crate::util;
+use crate::{ir::*, transform};
+
+struct ProtoBlock {
+ name: Vec,
+ description: Option,
+ registers: Vec,
+}
+
+struct ProtoFieldset {
+ name: Vec,
+ description: Option,
+ bit_size: BitSize,
+ fields: Vec,
+}
+
+struct ProtoEnum {
+ name: Vec,
+ usage: Option,
+ bit_size: BitSize,
+ variants: Vec,
+}
+
+pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<()> {
+ let mut blocks = Vec::new();
+ collect_blocks(
+ &mut blocks,
+ vec![p.name.clone()],
+ p.description.clone(),
+ p.registers.as_ref().unwrap(),
+ );
+
+ let mut fieldsets: Vec = Vec::new();
+ let mut enums: Vec = Vec::new();
+
+ for block in &blocks {
+ for r in &block.registers {
+ if let svd::RegisterCluster::Register(r) = r {
+ if r.derived_from.is_some() {
+ continue;
+ }
+
+ let fieldset_bitsize = match r {
+ svd::Register::Single(info) => info.size,
+ svd::Register::Array(info, _) => info.size,
+ }
+ .map(BitSize)
+ .expect("Unsized register is not supported");
+
+ if let Some(fields) = &r.fields {
+ let mut fieldset_name = block.name.clone();
+ fieldset_name.push(util::replace_suffix(&r.name, ""));
+ fieldsets.push(ProtoFieldset {
+ name: fieldset_name.clone(),
+ description: r.description.clone(),
+ bit_size: fieldset_bitsize,
+ fields: fields.clone(),
+ });
+
+ for f in fields {
+ if f.derived_from.is_some() {
+ continue;
+ }
+
+ let field_name = f.name.clone();
+
+ for e in &f.enumerated_values {
+ if e.derived_from.is_some() {
+ continue;
+ }
+
+ let mut enum_name = fieldset_name.clone();
+ enum_name.push(e.name.clone().unwrap_or_else(|| field_name.clone()));
+ info!("adding enum {:?}", enum_name);
+
+ enums.push(ProtoEnum {
+ name: enum_name,
+ usage: e.usage,
+ bit_size: fieldset_bitsize,
+ variants: e.values.clone(),
+ });
+ }
+ }
+ };
+ }
+ }
+ }
+
+ // Make all collected names unique by prefixing with parents' names if needed.
+ let block_names = unique_names(blocks.iter().map(|x| x.name.clone()).collect());
+ let fieldset_names = unique_names(fieldsets.iter().map(|x| x.name.clone()).collect());
+ let enum_names = unique_names(enums.iter().map(|x| x.name.clone()).collect());
+
+ // Convert blocks
+ for proto in &blocks {
+ let mut block = Block {
+ extends: None,
+ description: proto.description.clone(),
+ items: Vec::new(),
+ };
+
+ for r in &proto.registers {
+ match r {
+ svd::RegisterCluster::Register(r) => {
+ if r.derived_from.is_some() {
+ warn!("unsupported derived_from in registers");
+ continue;
+ }
+
+ let fieldset_name = if r.fields.is_some() {
+ let mut fieldset_name = proto.name.clone();
+ fieldset_name.push(util::replace_suffix(&r.name, ""));
+ Some(fieldset_names.get(&fieldset_name).unwrap().clone())
+ } else {
+ None
+ };
+
+ let array = if let svd::Register::Array(_, dim) = r {
+ Some(Array::Regular(RegularArray {
+ len: dim.dim,
+ stride: dim.dim_increment,
+ }))
+ } else {
+ None
+ };
+
+ let access = match r.access {
+ None => Access::ReadWrite,
+ Some(svd::Access::ReadOnly) => Access::Read,
+ Some(svd::Access::WriteOnly) => Access::Write,
+ Some(svd::Access::WriteOnce) => Access::Write,
+ Some(svd::Access::ReadWrite) => Access::ReadWrite,
+ Some(svd::Access::ReadWriteOnce) => Access::ReadWrite,
+ };
+
+ let block_item = BlockItem {
+ name: util::replace_suffix(&r.name, ""),
+ description: r.description.clone(),
+ array,
+ byte_offset: r.address_offset,
+ inner: BlockItemInner::Register(Register {
+ access, // todo
+ bit_size: BitSize(r.size.expect("Must have a bitsize")),
+ fieldset: fieldset_name.clone(),
+ }),
+ };
+
+ block.items.push(block_item)
+ }
+ svd::RegisterCluster::Cluster(c) => {
+ if c.derived_from.is_some() {
+ warn!("unsupported derived_from in clusters");
+ continue;
+ }
+
+ let cname = util::replace_suffix(&c.name, "");
+
+ let array = if let svd::Cluster::Array(_, dim) = c {
+ Some(Array::Regular(RegularArray {
+ len: dim.dim,
+ stride: dim.dim_increment,
+ }))
+ } else {
+ None
+ };
+
+ let mut block_name = proto.name.clone();
+ block_name.push(util::replace_suffix(&c.name, ""));
+ let block_name = block_names.get(&block_name).unwrap().clone();
+
+ block.items.push(BlockItem {
+ name: cname.clone(),
+ description: c.description.clone(),
+ array,
+ byte_offset: c.address_offset,
+ inner: BlockItemInner::Block(BlockItemBlock { block: block_name }),
+ });
+ }
+ }
+ }
+
+ let block_name = block_names.get(&proto.name).unwrap().clone();
+ assert!(ir.blocks.insert(block_name, block).is_none())
+ }
+
+ // Convert fieldsets
+ for proto in &fieldsets {
+ let mut fieldset = FieldSet {
+ extends: None,
+ description: proto.description.clone(),
+ bit_size: proto.bit_size,
+ fields: Vec::new(),
+ };
+
+ for f in &proto.fields {
+ if f.derived_from.is_some() {
+ warn!("unsupported derived_from in fieldset");
+ }
+
+ let mut field = Field {
+ name: f.name.clone(),
+ description: f.description.clone(),
+ bit_offset: f.bit_range.offset,
+ bit_size: BitSize(f.bit_range.width),
+ array: None,
+ enum_read: None,
+ enum_write: None,
+ enum_readwrite: None,
+ };
+
+ for e in &f.enumerated_values {
+ let mut enum_name = proto.name.clone();
+ enum_name.push(
+ e.derived_from
+ .clone()
+ .or_else(|| e.name.clone())
+ .unwrap_or_else(|| f.name.clone()),
+ );
+ info!("finding enum {:?}", enum_name);
+ let enumm = enums.iter().find(|e| e.name == enum_name).unwrap();
+ let enum_name = enum_names.get(&enum_name).unwrap().clone();
+ info!("found {:?}", enum_name);
+
+ let usage = enumm.usage.unwrap_or(svd::Usage::ReadWrite);
+
+ match usage {
+ svd::Usage::Read => field.enum_read = Some(enum_name.clone()),
+ svd::Usage::Write => field.enum_write = Some(enum_name.clone()),
+ svd::Usage::ReadWrite => field.enum_readwrite = Some(enum_name.clone()),
+ }
+ }
+
+ fieldset.fields.push(field)
+ }
+
+ let fieldset_name = fieldset_names.get(&proto.name).unwrap().clone();
+ assert!(ir.fieldsets.insert(fieldset_name, fieldset).is_none())
+ }
+
+ for proto in &enums {
+ let variants = proto
+ .variants
+ .iter()
+ .map(|v| EnumVariant {
+ description: v.description.clone(),
+ name: v.name.clone(),
+ value: v.value.unwrap() as _, // TODO what are variants without values used for??
+ })
+ .collect();
+
+ let enumm = Enum {
+ description: None,
+ bit_size: proto.bit_size,
+ variants,
+ };
+
+ let enum_name = enum_names.get(&proto.name).unwrap().clone();
+ assert!(ir.enums.insert(enum_name.clone(), enumm).is_none());
+ }
+
+ Ok(())
+}
+
+pub fn convert_svd(svd: &svd::Device) -> anyhow::Result {
+ let mut ir = IR::new();
+ let cpu = svd.cpu.as_ref().map(|cpu| Cpu {
+ nvic_priority_bits: cpu.nvic_priority_bits,
+ });
+ let mut device = Device {
+ peripherals: vec![],
+ interrupts: vec![],
+ cpu,
+ };
+
+ for p in &svd.peripherals {
+ let block_name = p.derived_from.as_ref().unwrap_or(&p.name);
+ let block_name = format!("{}::{}", block_name, block_name);
+ let periname = p.name.to_ascii_uppercase();
+
+ let peri = Peripheral {
+ name: periname.clone(),
+ description: p.description.clone(),
+ base_address: p.base_address,
+ block: Some(block_name),
+ array: None,
+ interrupts: HashMap::new(),
+ };
+
+ let mut irqs: Vec<&svd::Interrupt> = vec![];
+ for i in &p.interrupt {
+ if !irqs.iter().any(|&j| j.name == i.name) {
+ irqs.push(i)
+ }
+ }
+ irqs.sort_by_key(|i| &i.name);
+
+ for (_n, &i) in irqs.iter().enumerate() {
+ let iname = i.name.to_ascii_uppercase();
+
+ if !device.interrupts.iter().any(|j| j.name == iname) {
+ device.interrupts.push(Interrupt {
+ name: iname.clone(),
+ description: i.description.clone(),
+ value: i.value,
+ });
+ }
+
+ /*
+ let name = if iname.len() > periname.len() && iname.starts_with(&periname) {
+ let s = iname.strip_prefix(&periname).unwrap();
+ s.trim_matches('_').to_string()
+ } else if irqs.len() == 1 {
+ "IRQ".to_string()
+ } else {
+ format!("IRQ{}", n)
+ };
+
+ peri.interrupts.insert(name, iname.clone());
+ */
+ }
+
+ device.peripherals.push(peri);
+
+ if p.derived_from.is_none() {
+ let mut pir = IR::new();
+ convert_peripheral(&mut pir, p)?;
+
+ let path = &p.name;
+ transform::map_names(&mut pir, |k, s| match k {
+ transform::NameKind::Block => *s = format!("{}::{}", path, s),
+ transform::NameKind::Fieldset => *s = format!("{}::regs::{}", path, s),
+ transform::NameKind::Enum => *s = format!("{}::vals::{}", path, s),
+ _ => {}
+ });
+
+ ir.merge(pir);
+ }
+ }
+
+ ir.devices.insert(svd.name.clone(), device);
+
+ transform::sort::Sort {}.run(&mut ir).unwrap();
+ transform::Sanitize {}.run(&mut ir).unwrap();
+ transform::SimplifyPaths::new().run(&mut ir).unwrap();
+
+ Ok(ir)
+}
+
+fn collect_blocks(
+ out: &mut Vec,
+ block_name: Vec,
+ description: Option,
+ registers: &[svd::RegisterCluster],
+) {
+ out.push(ProtoBlock {
+ name: block_name.clone(),
+ description,
+ registers: registers.to_owned(),
+ });
+
+ for r in registers {
+ if let svd::RegisterCluster::Cluster(c) = r {
+ if c.derived_from.is_some() {
+ continue;
+ }
+
+ let mut block_name = block_name.clone();
+ block_name.push(util::replace_suffix(&c.name, ""));
+ collect_blocks(out, block_name, c.description.clone(), &c.children);
+ }
+ }
+}
+
+fn unique_names(names: Vec>) -> HashMap, String> {
+ let mut res = HashMap::new();
+
+ let suffix_exists = |n: &[String], i: usize| {
+ names
+ .iter()
+ .enumerate()
+ .filter(|(j, _)| *j != i)
+ .any(|(_, n2)| n2.ends_with(n))
+ };
+ for (i, n) in names.iter().enumerate() {
+ let j = (0..n.len())
+ .rev()
+ .find(|&j| !suffix_exists(&n[j..], i))
+ .unwrap();
+ assert!(res.insert(n.clone(), n[j..].join("_")).is_none());
+ }
+ res
+}
diff --git a/raltool/src/transform/common.rs b/raltool/src/transform/common.rs
new file mode 100644
index 000000000000..15e82bf2b33b
--- /dev/null
+++ b/raltool/src/transform/common.rs
@@ -0,0 +1,279 @@
+use anyhow::bail;
+use serde::{Deserialize, Serialize};
+use std::collections::{HashMap, HashSet};
+
+use crate::ir::*;
+
+pub(crate) fn make_regex(r: &str) -> Result {
+ regex::Regex::new(&format!("^{}$", r))
+}
+
+#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Serialize, Deserialize)]
+pub enum CheckLevel {
+ NoCheck,
+ Layout,
+ Names,
+ Descriptions,
+}
+
+pub(crate) fn check_mergeable_enums(a: &Enum, b: &Enum, level: CheckLevel) -> anyhow::Result<()> {
+ if let Err(e) = check_mergeable_enums_inner(a, b, level) {
+ bail!(
+ "Cannot merge enums.\nfirst: {:#?}\nsecond: {:#?}\ncause: {:?}",
+ a,
+ b,
+ e
+ )
+ }
+ Ok(())
+}
+pub(crate) fn check_mergeable_enums_inner(
+ a: &Enum,
+ b: &Enum,
+ level: CheckLevel,
+) -> anyhow::Result<()> {
+ if a.bit_size != b.bit_size {
+ bail!("Different bit size: {} vs {}", a.bit_size.0, b.bit_size.0)
+ }
+
+ if level >= CheckLevel::Layout {
+ if a.variants.len() != b.variants.len() {
+ bail!("Different variant count")
+ }
+
+ let mut aok = [false; 128];
+ let mut bok = [false; 128];
+
+ for (ia, fa) in a.variants.iter().enumerate() {
+ if let Some((ib, _fb)) = b
+ .variants
+ .iter()
+ .enumerate()
+ .find(|(ib, fb)| !bok[*ib] && mergeable_variants(fa, fb, level))
+ {
+ aok[ia] = true;
+ bok[ib] = true;
+ } else {
+ bail!("Variant in first enum has no match: {:?}", fa);
+ }
+ }
+ }
+
+ Ok(())
+}
+
+pub(crate) fn mergeable_variants(a: &EnumVariant, b: &EnumVariant, level: CheckLevel) -> bool {
+ let mut res = true;
+ if level >= CheckLevel::Layout {
+ res &= a.value == b.value;
+ }
+ if level >= CheckLevel::Names {
+ res &= a.name == b.name;
+ }
+ if level >= CheckLevel::Descriptions {
+ res &= a.description == b.description;
+ }
+ res
+}
+
+impl Default for CheckLevel {
+ fn default() -> Self {
+ Self::Names
+ }
+}
+
+pub(crate) fn check_mergeable_fieldsets(
+ a: &FieldSet,
+ b: &FieldSet,
+ level: CheckLevel,
+) -> anyhow::Result<()> {
+ if let Err(e) = check_mergeable_fieldsets_inner(a, b, level) {
+ bail!(
+ "Cannot merge fieldsets.\nfirst: {:#?}\nsecond: {:#?}\ncause: {:?}",
+ a,
+ b,
+ e
+ )
+ }
+ Ok(())
+}
+
+pub(crate) fn mergeable_fields(a: &Field, b: &Field, level: CheckLevel) -> bool {
+ let mut res = true;
+ if level >= CheckLevel::Layout {
+ res &= a.bit_size == b.bit_size
+ && a.bit_offset == b.bit_offset
+ && a.enum_read == b.enum_read
+ && a.enum_write == b.enum_write
+ && a.enum_readwrite == b.enum_readwrite
+ && a.array == b.array;
+ }
+ if level >= CheckLevel::Names {
+ res &= a.name == b.name;
+ }
+ if level >= CheckLevel::Descriptions {
+ res &= a.description == b.description;
+ }
+ res
+}
+
+pub(crate) fn check_mergeable_fieldsets_inner(
+ a: &FieldSet,
+ b: &FieldSet,
+ level: CheckLevel,
+) -> anyhow::Result<()> {
+ if a.bit_size != b.bit_size {
+ bail!("Different bit size: {} vs {}", a.bit_size.0, b.bit_size.0)
+ }
+
+ if level >= CheckLevel::Layout {
+ if a.fields.len() != b.fields.len() {
+ bail!("Different field count")
+ }
+
+ let mut aok = [false; 128];
+ let mut bok = [false; 128];
+
+ for (ia, fa) in a.fields.iter().enumerate() {
+ if let Some((ib, _fb)) = b
+ .fields
+ .iter()
+ .enumerate()
+ .find(|(ib, fb)| !bok[*ib] && mergeable_fields(fa, fb, level))
+ {
+ aok[ia] = true;
+ bok[ib] = true;
+ } else {
+ bail!("Field in first fieldset has no match: {:?}", fa);
+ }
+ }
+ }
+
+ Ok(())
+}
+
+pub(crate) fn match_all(set: impl Iterator- , re: ®ex::Regex) -> HashSet {
+ let mut ids: HashSet = HashSet::new();
+ for id in set {
+ if re.is_match(&id) {
+ ids.insert(id);
+ }
+ }
+ ids
+}
+
+pub(crate) fn match_groups(
+ set: impl Iterator
- ,
+ re: ®ex::Regex,
+ to: &str,
+) -> HashMap> {
+ let mut groups: HashMap> = HashMap::new();
+ for s in set {
+ if let Some(to) = match_expand(&s, re, to) {
+ if let Some(v) = groups.get_mut(&to) {
+ v.insert(s);
+ } else {
+ let mut v = HashSet::new();
+ v.insert(s);
+ groups.insert(to, v);
+ }
+ }
+ }
+ groups
+}
+
+pub(crate) fn match_expand(s: &str, regex: ®ex::Regex, res: &str) -> Option {
+ let m = regex.captures(s)?;
+ let mut dst = String::new();
+ m.expand(res, &mut dst);
+ Some(dst)
+}
+
+pub(crate) fn replace_enum_ids(ir: &mut IR, from: &HashSet, to: String) {
+ for (_, fs) in ir.fieldsets.iter_mut() {
+ for f in fs.fields.iter_mut() {
+ for id in [&mut f.enum_read, &mut f.enum_write, &mut f.enum_readwrite]
+ .into_iter()
+ .flatten()
+ {
+ if from.contains(id) {
+ *id = to.clone()
+ }
+ }
+ }
+ }
+}
+
+pub(crate) fn replace_fieldset_ids(ir: &mut IR, from: &HashSet, to: String) {
+ for (_, b) in ir.blocks.iter_mut() {
+ for i in b.items.iter_mut() {
+ if let BlockItemInner::Register(r) = &mut i.inner {
+ if let Some(id) = &r.fieldset {
+ if from.contains(id) {
+ r.fieldset = Some(to.clone())
+ }
+ }
+ }
+ }
+ }
+}
+
+pub(crate) fn replace_block_ids(ir: &mut IR, from: &HashSet, to: String) {
+ for (_, d) in ir.devices.iter_mut() {
+ for p in d.peripherals.iter_mut() {
+ if let Some(block) = &mut p.block {
+ if from.contains(block) {
+ *block = to.clone()
+ }
+ }
+ }
+ }
+
+ for (_, b) in ir.blocks.iter_mut() {
+ for i in b.items.iter_mut() {
+ if let BlockItemInner::Block(bi) = &mut i.inner {
+ if from.contains(&bi.block) {
+ bi.block = to.clone()
+ }
+ }
+ }
+ }
+}
+
+pub(crate) fn calc_array(mut offsets: Vec) -> (u32, Array) {
+ offsets.sort_unstable();
+
+ // Guess stride.
+ let start_offset = offsets[0];
+ let len = offsets.len() as u32;
+ let stride = if len == 1 {
+ // If there's only 1 item, we can't know the stride, but it
+ // doesn't really matter!
+ 0
+ } else {
+ offsets[1] - offsets[0]
+ };
+
+ // Check the stride guess is OK
+
+ if offsets
+ .iter()
+ .enumerate()
+ .all(|(n, &i)| i == start_offset + (n as u32) * stride)
+ {
+ // Array is regular,
+ (
+ start_offset,
+ Array::Regular(RegularArray {
+ len: offsets.len() as _,
+ stride,
+ }),
+ )
+ } else {
+ // Array is irregular,
+ for o in &mut offsets {
+ *o -= start_offset
+ }
+ (start_offset, Array::Cursed(CursedArray { offsets }))
+ }
+}
diff --git a/raltool/src/transform/delete.rs b/raltool/src/transform/delete.rs
new file mode 100644
index 000000000000..f4fde5d24a1f
--- /dev/null
+++ b/raltool/src/transform/delete.rs
@@ -0,0 +1,80 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Delete {
+ pub from: String,
+}
+
+impl Delete {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = make_regex(&self.from)?;
+
+ let mut ids: HashSet = HashSet::new();
+ for (id, _fs) in ir.fieldsets.iter() {
+ if re.is_match(id) {
+ info!("deleting fieldset {}", id);
+ ids.insert(id.clone());
+ }
+ }
+
+ super::delete_fieldsets::remove_fieldset_ids(ir, &ids);
+
+ for id in ids {
+ ir.fieldsets.remove(&id);
+ }
+
+ let mut ids: HashSet = HashSet::new();
+ for (id, _e) in ir.enums.iter() {
+ if re.is_match(id) {
+ info!("deleting enum {}", id);
+ ids.insert(id.clone());
+ }
+ }
+
+ super::delete_enums::remove_enum_ids(ir, &ids);
+
+ for id in ids {
+ ir.enums.remove(&id);
+ }
+
+ let mut ids: HashSet = HashSet::new();
+ for (id, _b) in ir.blocks.iter() {
+ if re.is_match(id) {
+ info!("deleting block {}", id);
+ ids.insert(id.clone());
+ }
+ }
+
+ remove_block_ids(ir, &ids);
+
+ for id in ids {
+ ir.blocks.remove(&id);
+ }
+
+ Ok(())
+ }
+}
+
+pub(crate) fn remove_block_ids(ir: &mut IR, from: &HashSet) {
+ for (_, b) in ir.blocks.iter_mut() {
+ b.items.retain(|i| {
+ if let BlockItemInner::Block(bi) = &i.inner {
+ !from.contains(&bi.block)
+ } else {
+ true
+ }
+ });
+ }
+
+ for (_, d) in ir.devices.iter_mut() {
+ d.peripherals.retain(|p| match &p.block {
+ Some(block) => !from.contains(block),
+ None => true,
+ });
+ }
+}
diff --git a/raltool/src/transform/delete_enums.rs b/raltool/src/transform/delete_enums.rs
new file mode 100644
index 000000000000..e5334648497a
--- /dev/null
+++ b/raltool/src/transform/delete_enums.rs
@@ -0,0 +1,53 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct DeleteEnums {
+ pub from: String,
+ pub bit_size: Option,
+ #[serde(default)]
+ pub soft: bool,
+}
+
+impl DeleteEnums {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = make_regex(&self.from)?;
+
+ let mut ids: HashSet = HashSet::new();
+ for (id, e) in ir.enums.iter() {
+ let bit_size_matches = self.bit_size.map_or(true, |s| s == e.bit_size);
+ if re.is_match(id) && bit_size_matches {
+ info!("deleting enum {}", id);
+ ids.insert(id.clone());
+ }
+ }
+
+ remove_enum_ids(ir, &ids);
+
+ if !self.soft {
+ for id in ids {
+ ir.enums.remove(&id);
+ }
+ }
+
+ Ok(())
+ }
+}
+
+pub(crate) fn remove_enum_ids(ir: &mut IR, from: &HashSet) {
+ for (_, fs) in ir.fieldsets.iter_mut() {
+ for f in fs.fields.iter_mut() {
+ for e in [&mut f.enum_read, &mut f.enum_write, &mut f.enum_readwrite].into_iter() {
+ if let Some(id) = e {
+ if from.contains(id) {
+ *e = None
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/raltool/src/transform/delete_fieldsets.rs b/raltool/src/transform/delete_fieldsets.rs
new file mode 100644
index 000000000000..477e39460650
--- /dev/null
+++ b/raltool/src/transform/delete_fieldsets.rs
@@ -0,0 +1,67 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct DeleteFieldsets {
+ pub from: String,
+ #[serde(default)]
+ pub useless: bool,
+ #[serde(default)]
+ pub soft: bool,
+}
+
+impl DeleteFieldsets {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = make_regex(&self.from)?;
+
+ let mut ids: HashSet = HashSet::new();
+ for (id, fs) in ir.fieldsets.iter() {
+ if re.is_match(id) && (!self.useless | is_useless(fs)) {
+ info!("deleting fieldset {}", id);
+ ids.insert(id.clone());
+ }
+ }
+
+ remove_fieldset_ids(ir, &ids);
+
+ if !self.soft {
+ for id in ids {
+ ir.fieldsets.remove(&id);
+ }
+ }
+
+ Ok(())
+ }
+}
+
+fn is_useless(fs: &FieldSet) -> bool {
+ match &fs.fields[..] {
+ [] => true,
+ [f] => {
+ fs.bit_size == f.bit_size
+ && f.bit_offset == 0
+ && f.enum_read.is_none()
+ && f.enum_write.is_none()
+ && f.enum_readwrite.is_none()
+ }
+ _ => false,
+ }
+}
+
+pub(crate) fn remove_fieldset_ids(ir: &mut IR, from: &HashSet) {
+ for (_, b) in ir.blocks.iter_mut() {
+ for i in b.items.iter_mut() {
+ if let BlockItemInner::Register(reg) = &mut i.inner {
+ if let Some(id) = ®.fieldset {
+ if from.contains(id) {
+ reg.fieldset = None
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/raltool/src/transform/expand_extends.rs b/raltool/src/transform/expand_extends.rs
new file mode 100644
index 000000000000..137a3afa54af
--- /dev/null
+++ b/raltool/src/transform/expand_extends.rs
@@ -0,0 +1,82 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::{HashMap, HashSet};
+
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct ExpandExtends {}
+
+impl ExpandExtends {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ // Expand blocks
+ let deps = ir
+ .blocks
+ .iter()
+ .map(|(k, v)| (k.clone(), v.extends.clone()))
+ .collect();
+ for name in topological_sort(deps) {
+ let block = ir.blocks.get(&name).unwrap();
+ if let Some(parent_name) = &block.extends {
+ let parent = ir.blocks.get(parent_name).unwrap();
+
+ let items = parent.items.clone();
+ let block = ir.blocks.get_mut(&name).unwrap();
+
+ for i in items {
+ if !block.items.iter().any(|j| j.name == i.name) {
+ block.items.push(i);
+ }
+ }
+ }
+ }
+ // Expand fiedsets
+ let deps = ir
+ .fieldsets
+ .iter()
+ .map(|(k, v)| (k.clone(), v.extends.clone()))
+ .collect();
+ for name in topological_sort(deps) {
+ let fieldset = ir.fieldsets.get(&name).unwrap();
+ if let Some(parent_name) = &fieldset.extends {
+ let parent = ir.fieldsets.get(parent_name).unwrap();
+
+ let items = parent.fields.clone();
+ let fieldset = ir.fieldsets.get_mut(&name).unwrap();
+
+ for i in items {
+ if !fieldset.fields.iter().any(|j| j.name == i.name) {
+ fieldset.fields.push(i);
+ }
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
+
+fn topological_sort(vals: HashMap>) -> Vec {
+ for (name, dep) in &vals {
+ info!("{:?} => {:?}", name, dep);
+ }
+
+ let mut done = HashSet::new();
+ let mut res = Vec::new();
+ while done.len() != vals.len() {
+ for (name, dep) in &vals {
+ if done.contains(name) {
+ continue;
+ }
+ if let Some(dep) = dep {
+ if !done.contains(dep) {
+ continue;
+ }
+ }
+ info!("doing {:?} ", name);
+ done.insert(name.clone());
+ res.push(name.clone());
+ }
+ }
+ res
+}
diff --git a/raltool/src/transform/find_duplicate_enums.rs b/raltool/src/transform/find_duplicate_enums.rs
new file mode 100644
index 000000000000..a16979d037c5
--- /dev/null
+++ b/raltool/src/transform/find_duplicate_enums.rs
@@ -0,0 +1,38 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::{HashMap, HashSet};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct FindDuplicateEnums {}
+impl FindDuplicateEnums {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let mut suggested = HashSet::new();
+
+ for (id1, e1) in ir.enums.iter() {
+ if suggested.contains(&id1) {
+ continue;
+ }
+
+ let mut ids = Vec::new();
+ for (id2, e2) in ir.enums.iter() {
+ if id1 != id2 && mergeable_enums(e1, e2) {
+ ids.push(id2)
+ }
+ }
+
+ if !ids.is_empty() {
+ ids.push(id1);
+ info!("Duplicated enums:");
+ for id in ids {
+ suggested.insert(id);
+ info!(" {}", ir.enums.get(id).path);
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/find_duplicate_fieldsets.rs b/raltool/src/transform/find_duplicate_fieldsets.rs
new file mode 100644
index 000000000000..4d0c2118dacf
--- /dev/null
+++ b/raltool/src/transform/find_duplicate_fieldsets.rs
@@ -0,0 +1,38 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::{HashMap, HashSet};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct FindDuplicateFieldsets {}
+impl FindDuplicateFieldsets {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let mut suggested = HashSet::new();
+
+ for (id1, fs1) in ir.fieldsets.iter() {
+ if suggested.contains(&id1) {
+ continue;
+ }
+
+ let mut ids = Vec::new();
+ for (id2, fs2) in ir.fieldsets.iter() {
+ if id1 != id2 && check_mergeable_fieldsets(fs1, fs2, CheckLevel::Names).is_ok() {
+ ids.push(id2)
+ }
+ }
+
+ if !ids.is_empty() {
+ ids.push(id1);
+ info!("Duplicated fieldsets:");
+ for id in ids {
+ suggested.insert(id);
+ info!(" {}", ir.fieldsets.get(id).path);
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/make_block.rs b/raltool/src/transform/make_block.rs
new file mode 100644
index 000000000000..f97cb35cc9e1
--- /dev/null
+++ b/raltool/src/transform/make_block.rs
@@ -0,0 +1,78 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct MakeBlock {
+ pub blocks: String,
+ pub from: String,
+ pub to_outer: String,
+ pub to_block: String,
+ pub to_inner: String,
+}
+
+impl MakeBlock {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let path_re = make_regex(&self.blocks)?;
+ let re = make_regex(&self.from)?;
+ for id in match_all(ir.blocks.keys().cloned(), &path_re) {
+ let b = ir.blocks.get_mut(&id).unwrap();
+ let groups = match_groups(b.items.iter().map(|f| f.name.clone()), &re, &self.to_outer);
+ for (to, group) in groups {
+ let b = ir.blocks.get_mut(&id).unwrap();
+ info!("blockifizing to {}", to);
+
+ // Grab all items into a vec
+ let mut items = Vec::new();
+ for i in b.items.iter().filter(|i| group.contains(&i.name)) {
+ items.push(i);
+ }
+
+ // Sort by offs
+ items.sort_by_key(|i| i.byte_offset);
+ for i in &items {
+ info!(" {}", i.name);
+ }
+
+ // todo check they're mergeable
+ // todo check they're not arrays (arrays of arrays not supported)
+
+ let byte_offset = items[0].byte_offset;
+
+ let b2 = Block {
+ extends: None,
+ description: None,
+ items: items
+ .iter()
+ .map(|&i| {
+ let mut i = i.clone();
+ i.name = match_expand(&i.name, &re, &self.to_inner).unwrap();
+ i.byte_offset -= byte_offset;
+ i
+ })
+ .collect(),
+ };
+
+ // TODO if destination block exists, check mergeable
+ let dest = self.to_block.clone(); // todo regex
+ ir.blocks.insert(dest.clone(), b2);
+
+ // Remove all items
+ let b = ir.blocks.get_mut(&id).unwrap();
+ b.items.retain(|i| !group.contains(&i.name));
+
+ // Create the new block item
+ b.items.push(BlockItem {
+ name: to,
+ description: None,
+ array: None,
+ byte_offset,
+ inner: BlockItemInner::Block(BlockItemBlock { block: dest }),
+ });
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/make_field_array.rs b/raltool/src/transform/make_field_array.rs
new file mode 100644
index 000000000000..ec9d7df29aeb
--- /dev/null
+++ b/raltool/src/transform/make_field_array.rs
@@ -0,0 +1,62 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct MakeFieldArray {
+ pub fieldsets: String,
+ pub from: String,
+ pub to: String,
+ #[serde(default)]
+ pub allow_cursed: bool,
+}
+
+impl MakeFieldArray {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let path_re = make_regex(&self.fieldsets)?;
+ let re = make_regex(&self.from)?;
+ for id in match_all(ir.fieldsets.keys().cloned(), &path_re) {
+ let b = ir.fieldsets.get_mut(&id).unwrap();
+ let groups = match_groups(b.fields.iter().map(|f| f.name.clone()), &re, &self.to);
+ for (to, group) in groups {
+ info!("arrayizing to {}", to);
+
+ // Grab all items into a vec
+ let mut items = Vec::new();
+ for i in b.fields.iter().filter(|i| group.contains(&i.name)) {
+ items.push(i);
+ }
+
+ // todo check they're mergeable
+ // todo check they're not arrays (arrays of arrays not supported)
+
+ // Sort by offs
+ items.sort_by_key(|i| i.bit_offset);
+ for i in &items {
+ info!(" {}", i.name);
+ }
+
+ let (offset, array) = calc_array(items.iter().map(|x| x.bit_offset).collect());
+ if let Array::Cursed(_) = &array {
+ if !self.allow_cursed {
+ panic!("arrayize: items are not evenly spaced. Set `allow_cursed: true` to allow this.")
+ }
+ }
+
+ let mut item = items[0].clone();
+
+ // Remove all
+ b.fields.retain(|i| !group.contains(&i.name));
+
+ // Create the new array item
+ item.name = to;
+ item.array = Some(array);
+ item.bit_offset = offset;
+ b.fields.push(item);
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/make_register_array.rs b/raltool/src/transform/make_register_array.rs
new file mode 100644
index 000000000000..b6f1feaa6ef6
--- /dev/null
+++ b/raltool/src/transform/make_register_array.rs
@@ -0,0 +1,62 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct MakeRegisterArray {
+ pub blocks: String,
+ pub from: String,
+ pub to: String,
+ #[serde(default)]
+ pub allow_cursed: bool,
+}
+
+impl MakeRegisterArray {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let path_re = make_regex(&self.blocks)?;
+ let re = make_regex(&self.from)?;
+ for id in match_all(ir.blocks.keys().cloned(), &path_re) {
+ let b = ir.blocks.get_mut(&id).unwrap();
+ let groups = match_groups(b.items.iter().map(|f| f.name.clone()), &re, &self.to);
+ for (to, group) in groups {
+ info!("arrayizing to {}", to);
+
+ // Grab all items into a vec
+ let mut items = Vec::new();
+ for i in b.items.iter().filter(|i| group.contains(&i.name)) {
+ items.push(i);
+ }
+
+ // todo check they're mergeable
+ // todo check they're not arrays (arrays of arrays not supported)
+
+ // Sort by offs
+ items.sort_by_key(|i| i.byte_offset);
+ for i in &items {
+ info!(" {}", i.name);
+ }
+
+ let (offset, array) = calc_array(items.iter().map(|x| x.byte_offset).collect());
+ if let Array::Cursed(_) = &array {
+ if !self.allow_cursed {
+ panic!("arrayize: items are not evenly spaced. Set `allow_cursed: true` to allow this.")
+ }
+ }
+
+ let mut item = items[0].clone();
+
+ // Remove all
+ b.items.retain(|i| !group.contains(&i.name));
+
+ // Create the new array item
+ item.name = to;
+ item.array = Some(array);
+ item.byte_offset = offset;
+ b.items.push(item);
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/merge_blocks.rs b/raltool/src/transform/merge_blocks.rs
new file mode 100644
index 000000000000..f59bbd01bec0
--- /dev/null
+++ b/raltool/src/transform/merge_blocks.rs
@@ -0,0 +1,66 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct MergeBlocks {
+ pub from: String,
+ pub to: String,
+ pub main: Option,
+ #[serde(default)]
+ pub check: CheckLevel,
+}
+
+impl MergeBlocks {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = make_regex(&self.from)?;
+ let groups = match_groups(ir.blocks.keys().cloned(), &re, &self.to);
+
+ for (to, group) in groups {
+ info!("Merging blocks, dest: {}", to);
+ for id in &group {
+ info!(" {}", id);
+ }
+ self.merge_blocks(ir, group, to, self.main.as_ref())?;
+ }
+
+ Ok(())
+ }
+
+ fn merge_blocks(
+ &self,
+ ir: &mut IR,
+ ids: HashSet,
+ to: String,
+ main: Option<&String>,
+ ) -> anyhow::Result<()> {
+ let mut main_id = ids.iter().next().unwrap().clone();
+ if let Some(main) = main {
+ let re = make_regex(main)?;
+ for id in ids.iter() {
+ if re.is_match(id) {
+ main_id = id.clone();
+ break;
+ }
+ }
+ }
+ let b = ir.blocks.get(&main_id).unwrap().clone();
+
+ // todo
+ //for id in &ids {
+ // let b2 = ir.blocks.get(id).unwrap();
+ // check_mergeable_blocks(&b, b2, self.check)?;
+ //}
+
+ replace_block_ids(ir, &ids, to.clone());
+ for id in &ids {
+ ir.blocks.remove(id);
+ }
+ ir.blocks.insert(to, b);
+
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/merge_enums.rs b/raltool/src/transform/merge_enums.rs
new file mode 100644
index 000000000000..bb0bcaecf912
--- /dev/null
+++ b/raltool/src/transform/merge_enums.rs
@@ -0,0 +1,57 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct MergeEnums {
+ pub from: String,
+ pub to: String,
+ #[serde(default)]
+ pub check: CheckLevel,
+ #[serde(default)]
+ pub skip_unmergeable: bool,
+}
+
+impl MergeEnums {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = make_regex(&self.from)?;
+ let groups = match_groups(ir.enums.keys().cloned(), &re, &self.to);
+
+ for (to, group) in groups {
+ info!("Merging enums, dest: {}", to);
+ for id in &group {
+ info!(" {}", id);
+ }
+ self.merge_enums(ir, group, to)?;
+ }
+
+ Ok(())
+ }
+
+ fn merge_enums(&self, ir: &mut IR, ids: HashSet, to: String) -> anyhow::Result<()> {
+ let e = ir.enums.get(ids.iter().next().unwrap()).unwrap().clone();
+
+ for id in &ids {
+ let e2 = ir.enums.get(id).unwrap();
+ if let Err(e) = check_mergeable_enums(&e, e2, self.check) {
+ if self.skip_unmergeable {
+ info!("skipping: {:?}", to);
+ return Ok(());
+ } else {
+ return Err(e);
+ }
+ }
+ }
+ for id in &ids {
+ ir.enums.remove(id);
+ }
+
+ assert!(ir.enums.insert(to.clone(), e).is_none());
+ replace_enum_ids(ir, &ids, to);
+
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/merge_fieldsets.rs b/raltool/src/transform/merge_fieldsets.rs
new file mode 100644
index 000000000000..6e1cbfa10669
--- /dev/null
+++ b/raltool/src/transform/merge_fieldsets.rs
@@ -0,0 +1,66 @@
+use log::*;
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct MergeFieldsets {
+ pub from: String,
+ pub to: String,
+ pub main: Option,
+ #[serde(default)]
+ pub check: CheckLevel,
+}
+
+impl MergeFieldsets {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = make_regex(&self.from)?;
+ let groups = match_groups(ir.fieldsets.keys().cloned(), &re, &self.to);
+
+ for (to, group) in groups {
+ info!("Merging fieldsets, dest: {}", to);
+ for id in &group {
+ info!(" {}", id);
+ }
+ self.merge_fieldsets(ir, group, to, self.main.as_ref())?;
+ }
+
+ Ok(())
+ }
+
+ fn merge_fieldsets(
+ &self,
+ ir: &mut IR,
+ ids: HashSet,
+ to: String,
+ main: Option<&String>,
+ ) -> anyhow::Result<()> {
+ let mut main_id = ids.iter().next().unwrap().clone();
+ if let Some(main) = main {
+ let re = make_regex(main)?;
+ for id in ids.iter() {
+ if re.is_match(id) {
+ main_id = id.clone();
+ break;
+ }
+ }
+ }
+ let fs = ir.fieldsets.get(&main_id).unwrap().clone();
+
+ for id in &ids {
+ let fs2 = ir.fieldsets.get(id).unwrap();
+ check_mergeable_fieldsets(&fs, fs2, self.check)?;
+ }
+
+ for id in &ids {
+ ir.fieldsets.remove(id);
+ }
+
+ assert!(ir.fieldsets.insert(to.clone(), fs).is_none());
+ replace_fieldset_ids(ir, &ids, to);
+
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/mod.rs b/raltool/src/transform/mod.rs
new file mode 100644
index 000000000000..96011440b453
--- /dev/null
+++ b/raltool/src/transform/mod.rs
@@ -0,0 +1,324 @@
+use serde::{Deserialize, Serialize};
+use std::collections::{HashMap, HashSet};
+
+use crate::ir::*;
+use crate::util::{ToSanitizedPascalCase, ToSanitizedSnakeCase, ToSanitizedUpperCase};
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Sanitize {}
+
+impl Sanitize {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ map_names(ir, |k, p| match k {
+ NameKind::Device => *p = sanitize_path(p),
+ NameKind::DevicePeripheral => *p = p.to_sanitized_upper_case().to_string(),
+ NameKind::DeviceInterrupt => *p = p.to_sanitized_upper_case().to_string(),
+ NameKind::Block => *p = sanitize_path(p),
+ NameKind::Fieldset => *p = sanitize_path(p),
+ NameKind::Enum => *p = sanitize_path(p),
+ NameKind::BlockItem => *p = p.to_sanitized_upper_case().to_string(),
+ NameKind::Field => *p = p.to_sanitized_upper_case().to_string(),
+ NameKind::EnumVariant => *p = p.to_sanitized_upper_case().to_string(),
+ });
+ Ok(())
+ }
+}
+
+#[derive(PartialEq, Eq)]
+pub enum NameKind {
+ Device,
+ DevicePeripheral,
+ DeviceInterrupt,
+ Block,
+ BlockItem,
+ Fieldset,
+ Field,
+ Enum,
+ EnumVariant,
+}
+
+fn rename_opt(s: &mut Option, f: impl Fn(&mut String)) {
+ if let Some(s) = s {
+ f(s)
+ }
+}
+
+pub fn map_block_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ remap_names(&mut ir.blocks, &f);
+
+ for (_, d) in ir.devices.iter_mut() {
+ for p in &mut d.peripherals {
+ rename_opt(&mut p.block, &f);
+ }
+ }
+
+ for (_, b) in ir.blocks.iter_mut() {
+ for i in b.items.iter_mut() {
+ match &mut i.inner {
+ BlockItemInner::Block(p) => f(&mut p.block),
+ BlockItemInner::Register(_r) => {}
+ }
+ }
+ }
+}
+
+pub fn map_fieldset_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ remap_names(&mut ir.fieldsets, &f);
+
+ for (_, b) in ir.blocks.iter_mut() {
+ for i in b.items.iter_mut() {
+ match &mut i.inner {
+ BlockItemInner::Block(_p) => {}
+ BlockItemInner::Register(r) => rename_opt(&mut r.fieldset, &f),
+ }
+ }
+ }
+}
+
+pub fn map_enum_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ remap_names(&mut ir.enums, &f);
+
+ for (_, fs) in ir.fieldsets.iter_mut() {
+ for ff in fs.fields.iter_mut() {
+ rename_opt(&mut ff.enum_read, &f);
+ rename_opt(&mut ff.enum_write, &f);
+ rename_opt(&mut ff.enum_readwrite, &f);
+ }
+ }
+}
+
+pub fn map_device_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ remap_names(&mut ir.devices, &f);
+}
+
+pub fn map_device_interrupt_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ for (_, d) in ir.devices.iter_mut() {
+ for i in &mut d.interrupts {
+ f(&mut i.name);
+ }
+ }
+}
+
+pub fn map_device_peripheral_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ for (_, d) in ir.devices.iter_mut() {
+ for p in &mut d.peripherals {
+ f(&mut p.name);
+ }
+ }
+}
+
+pub fn map_block_item_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ for (_, b) in ir.blocks.iter_mut() {
+ for i in b.items.iter_mut() {
+ f(&mut i.name)
+ }
+ }
+}
+
+pub fn map_field_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ for (_, fs) in ir.fieldsets.iter_mut() {
+ for ff in fs.fields.iter_mut() {
+ f(&mut ff.name)
+ }
+ }
+}
+
+pub fn map_enum_variant_names(ir: &mut IR, f: impl Fn(&mut String)) {
+ for (_, e) in ir.enums.iter_mut() {
+ for v in e.variants.iter_mut() {
+ f(&mut v.name)
+ }
+ }
+}
+
+pub fn map_names(ir: &mut IR, f: impl Fn(NameKind, &mut String)) {
+ map_device_names(ir, |s| f(NameKind::Device, s));
+ map_device_peripheral_names(ir, |s| f(NameKind::DevicePeripheral, s));
+ map_device_interrupt_names(ir, |s| f(NameKind::DeviceInterrupt, s));
+ map_block_names(ir, |s| f(NameKind::Block, s));
+ map_block_item_names(ir, |s| f(NameKind::BlockItem, s));
+ map_fieldset_names(ir, |s| f(NameKind::Fieldset, s));
+ map_field_names(ir, |s| f(NameKind::Field, s));
+ map_enum_names(ir, |s| f(NameKind::Enum, s));
+ map_enum_variant_names(ir, |s| f(NameKind::EnumVariant, s));
+}
+
+pub fn map_descriptions(ir: &mut IR, mut ff: impl FnMut(&str) -> String) -> anyhow::Result<()> {
+ let mut mapit = |d: &mut Option| {
+ *d = d.as_ref().map(|p| ff(p));
+ };
+
+ for (_, b) in ir.blocks.iter_mut() {
+ mapit(&mut b.description);
+ for i in b.items.iter_mut() {
+ mapit(&mut i.description);
+ }
+ }
+
+ for (_, fs) in ir.fieldsets.iter_mut() {
+ mapit(&mut fs.description);
+ for f in fs.fields.iter_mut() {
+ mapit(&mut f.description);
+ }
+ }
+
+ for (_, e) in ir.enums.iter_mut() {
+ mapit(&mut e.description);
+ for v in e.variants.iter_mut() {
+ mapit(&mut v.description);
+ }
+ }
+
+ Ok(())
+}
+
+fn remap_names(x: &mut HashMap, f: impl Fn(&mut String)) {
+ let mut res = HashMap::new();
+ for (mut name, val) in x.drain() {
+ f(&mut name);
+ assert!(res.insert(name, val).is_none())
+ }
+ *x = res
+}
+
+fn sanitize_path(p: &str) -> String {
+ let v = p.split("::").collect::>();
+ let len = v.len();
+ v.into_iter()
+ .enumerate()
+ .map(|(i, s)| {
+ if i == len - 1 {
+ s.to_sanitized_pascal_case()
+ } else {
+ s.to_sanitized_snake_case()
+ }
+ })
+ .collect::>()
+ .join("::")
+}
+
+mod common;
+
+pub mod delete;
+pub mod delete_enums;
+pub mod delete_fieldsets;
+//pub mod find_duplicate_enums;
+//pub mod find_duplicate_fieldsets;
+pub mod expand_extends;
+pub mod make_block;
+pub mod make_field_array;
+pub mod make_register_array;
+pub mod merge_blocks;
+pub mod merge_enums;
+pub mod merge_fieldsets;
+pub mod modify_byte_offset;
+pub mod rename;
+pub mod rename_fields;
+pub mod rename_registers;
+pub mod sort;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub enum Transform {
+ Sanitize(Sanitize),
+ Sort(sort::Sort),
+ Delete(delete::Delete),
+ DeleteEnums(delete_enums::DeleteEnums),
+ DeleteFieldsets(delete_fieldsets::DeleteFieldsets),
+ MergeBlocks(merge_blocks::MergeBlocks),
+ MergeEnums(merge_enums::MergeEnums),
+ MergeFieldsets(merge_fieldsets::MergeFieldsets),
+ Rename(rename::Rename),
+ RenameFields(rename_fields::RenameFields),
+ RenameRegisters(rename_registers::RenameRegisters),
+ MakeRegisterArray(make_register_array::MakeRegisterArray),
+ MakeFieldArray(make_field_array::MakeFieldArray),
+ MakeBlock(make_block::MakeBlock),
+ ModifyByteOffset(modify_byte_offset::ModifyByteOffset),
+ //FindDuplicateEnums(find_duplicate_enums::FindDuplicateEnums),
+ //FindDuplicateFieldsets(find_duplicate_fieldsets::FindDuplicateFieldsets),
+}
+
+impl Transform {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ match self {
+ Self::Sanitize(t) => t.run(ir),
+ Self::Sort(t) => t.run(ir),
+ Self::Delete(t) => t.run(ir),
+ Self::DeleteEnums(t) => t.run(ir),
+ Self::DeleteFieldsets(t) => t.run(ir),
+ Self::MergeBlocks(t) => t.run(ir),
+ Self::MergeEnums(t) => t.run(ir),
+ Self::MergeFieldsets(t) => t.run(ir),
+ Self::Rename(t) => t.run(ir),
+ Self::RenameFields(t) => t.run(ir),
+ Self::RenameRegisters(t) => t.run(ir),
+ Self::MakeRegisterArray(t) => t.run(ir),
+ Self::MakeFieldArray(t) => t.run(ir),
+ Self::MakeBlock(t) => t.run(ir),
+ Self::ModifyByteOffset(t) => t.run(ir),
+ //Self::FindDuplicateEnums(t) => t.run(ir),
+ //Self::FindDuplicateFieldsets(t) => t.run(ir),
+ }
+ }
+}
+
+/// A transform that removes extraneous numbers
+/// from block paths that have multiple instances.
+///
+/// If the IR uses paths that look like
+///
+/// - `lpuart1::Lpuart2`
+/// - `lpuart1::Lpuart7`
+/// - etc.
+///
+/// this transformer changes `lpuart1` to `lpuart`,
+/// dropping the '1'.
+pub struct SimplifyPaths(());
+impl SimplifyPaths {
+ pub fn new() -> Self {
+ SimplifyPaths(())
+ }
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = regex::Regex::new(r"\d+$")?;
+
+ let mut block_to_peripherals: HashMap<&str, usize> = HashMap::new();
+ for device in ir.devices.values() {
+ for peripheral in &device.peripherals {
+ *block_to_peripherals
+ .entry(peripheral.block.as_ref().unwrap())
+ .or_insert(0) += 1;
+ }
+ }
+
+ let renames: HashSet = block_to_peripherals
+ .into_iter()
+ .filter(|(_, count)| *count > 1)
+ .filter(|(path, _)| {
+ let root = path.split("::").next().unwrap();
+ re.is_match(root)
+ })
+ .map(|(path, _)| path.split("::").next().unwrap().into())
+ .collect();
+
+ map_names(ir, |_, name| {
+ let mut parts = name.split("::");
+ if let Some(root) = parts.next() {
+ if renames.contains(root) {
+ let new_root = re.replace(root, "");
+ *name = std::iter::once(&*new_root)
+ .chain(parts)
+ .collect::>()
+ .join("::");
+ }
+ }
+ });
+
+ Ok(())
+ }
+}
+
+impl Default for SimplifyPaths {
+ fn default() -> Self {
+ Self::new()
+ }
+}
diff --git a/raltool/src/transform/modify_byte_offset.rs b/raltool/src/transform/modify_byte_offset.rs
new file mode 100644
index 000000000000..a9ed456f340c
--- /dev/null
+++ b/raltool/src/transform/modify_byte_offset.rs
@@ -0,0 +1,23 @@
+use serde::{Deserialize, Serialize};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct ModifyByteOffset {
+ pub block: String,
+ pub add_offset: u32,
+}
+
+impl ModifyByteOffset {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let path_re = make_regex(&self.block)?;
+ for id in match_all(ir.blocks.keys().cloned(), &path_re) {
+ let b = ir.blocks.get_mut(&id).unwrap();
+ for i in &mut b.items {
+ i.byte_offset += self.add_offset;
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/rename.rs b/raltool/src/transform/rename.rs
new file mode 100644
index 000000000000..b92884d67785
--- /dev/null
+++ b/raltool/src/transform/rename.rs
@@ -0,0 +1,30 @@
+use serde::{Deserialize, Serialize};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Rename {
+ pub from: String,
+ pub to: String,
+}
+
+impl Rename {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let re = make_regex(&self.from)?;
+
+ let renamer = |name: &mut String| {
+ if let Some(res) = match_expand(name, &re, &self.to) {
+ *name = res
+ }
+ };
+
+ super::map_device_names(ir, &renamer);
+ super::map_block_names(ir, &renamer);
+ super::map_fieldset_names(ir, &renamer);
+ super::map_enum_names(ir, &renamer);
+ super::map_device_peripheral_names(ir, &renamer);
+
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/rename_fields.rs b/raltool/src/transform/rename_fields.rs
new file mode 100644
index 000000000000..df76c155b200
--- /dev/null
+++ b/raltool/src/transform/rename_fields.rs
@@ -0,0 +1,27 @@
+use serde::{Deserialize, Serialize};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct RenameFields {
+ pub fieldset: String,
+ pub from: String,
+ pub to: String,
+}
+
+impl RenameFields {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let path_re = make_regex(&self.fieldset)?;
+ let re = make_regex(&self.from)?;
+ for id in match_all(ir.fieldsets.keys().cloned(), &path_re) {
+ let fs = ir.fieldsets.get_mut(&id).unwrap();
+ for f in &mut fs.fields {
+ if let Some(name) = match_expand(&f.name, &re, &self.to) {
+ f.name = name;
+ }
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/rename_registers.rs b/raltool/src/transform/rename_registers.rs
new file mode 100644
index 000000000000..5fa0f1e3c10b
--- /dev/null
+++ b/raltool/src/transform/rename_registers.rs
@@ -0,0 +1,27 @@
+use serde::{Deserialize, Serialize};
+
+use super::common::*;
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct RenameRegisters {
+ pub block: String,
+ pub from: String,
+ pub to: String,
+}
+
+impl RenameRegisters {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ let path_re = make_regex(&self.block)?;
+ let re = make_regex(&self.from)?;
+ for id in match_all(ir.blocks.keys().cloned(), &path_re) {
+ let b = ir.blocks.get_mut(&id).unwrap();
+ for i in &mut b.items {
+ if let Some(name) = match_expand(&i.name, &re, &self.to) {
+ i.name = name;
+ }
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/raltool/src/transform/sort.rs b/raltool/src/transform/sort.rs
new file mode 100644
index 000000000000..03173ec11c38
--- /dev/null
+++ b/raltool/src/transform/sort.rs
@@ -0,0 +1,22 @@
+use serde::{Deserialize, Serialize};
+
+use crate::ir::*;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Sort {}
+
+impl Sort {
+ pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> {
+ for z in ir.blocks.values_mut() {
+ z.items.sort_by_key(|i| (i.byte_offset, i.name.clone()))
+ }
+ for z in ir.fieldsets.values_mut() {
+ z.fields.sort_by_key(|i| (i.bit_offset, i.name.clone()))
+ }
+ for z in ir.enums.values_mut() {
+ z.variants.sort_by_key(|i| (i.value, i.name.clone()))
+ }
+
+ Ok(())
+ }
+}
diff --git a/raltool/src/util.rs b/raltool/src/util.rs
new file mode 100644
index 000000000000..c0a89a914f9f
--- /dev/null
+++ b/raltool/src/util.rs
@@ -0,0 +1,324 @@
+use anyhow::{anyhow, Result};
+use inflections::Inflect;
+use proc_macro2::{Ident, Literal, Span, TokenStream};
+use quote::{quote, ToTokens};
+use std::{borrow::Cow, str::FromStr};
+
+pub const BITS_PER_BYTE: u32 = 8;
+
+/// List of chars that some vendors use in their peripheral/field names but
+/// that are not valid in Rust ident
+const BLACKLIST_CHARS: &[char] = &['(', ')', '[', ']', '/', ' ', '-'];
+
+pub trait ToSanitizedPascalCase {
+ fn to_sanitized_pascal_case(&self) -> Cow;
+}
+
+pub trait ToSanitizedUpperCase {
+ fn to_sanitized_upper_case(&self) -> Cow;
+}
+
+pub trait ToSanitizedSnakeCase {
+ fn to_sanitized_snake_case(&self) -> Cow;
+}
+
+impl ToSanitizedSnakeCase for str {
+ fn to_sanitized_snake_case(&self) -> Cow {
+ macro_rules! keywords {
+ ($s:expr, $($kw:ident),+,) => {
+ Cow::from(match &$s.to_lowercase()[..] {
+ $(stringify!($kw) => concat!(stringify!($kw), "_")),+,
+ _ => return Cow::from($s.to_snake_case())
+ })
+ }
+ }
+
+ let s = self.replace(BLACKLIST_CHARS, "");
+
+ match s.chars().next().unwrap_or('\0') {
+ '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => {
+ Cow::from(format!("_{}", s.to_snake_case()))
+ }
+ _ => {
+ keywords! {
+ s,
+ abstract,
+ alignof,
+ as,
+ async,
+ await,
+ become,
+ box,
+ break,
+ const,
+ continue,
+ crate,
+ do,
+ else,
+ enum,
+ extern,
+ false,
+ final,
+ fn,
+ for,
+ if,
+ impl,
+ in,
+ let,
+ loop,
+ macro,
+ match,
+ mod,
+ move,
+ mut,
+ offsetof,
+ override,
+ priv,
+ proc,
+ pub,
+ pure,
+ ref,
+ return,
+ self,
+ sizeof,
+ static,
+ struct,
+ super,
+ trait,
+ true,
+ try,
+ type,
+ typeof,
+ unsafe,
+ unsized,
+ use,
+ virtual,
+ where,
+ while,
+ yield,
+ set_bit,
+ clear_bit,
+ bit,
+ bits,
+ }
+ }
+ }
+ }
+}
+
+impl ToSanitizedUpperCase for str {
+ fn to_sanitized_upper_case(&self) -> Cow {
+ let s = self.replace(BLACKLIST_CHARS, "");
+
+ match s.chars().next().unwrap_or('\0') {
+ '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => {
+ Cow::from(format!("_{}", s.to_upper_case()))
+ }
+ _ => Cow::from(s.to_upper_case()),
+ }
+ }
+}
+
+impl ToSanitizedPascalCase for str {
+ fn to_sanitized_pascal_case(&self) -> Cow {
+ let s = self.replace(BLACKLIST_CHARS, "");
+
+ match s.chars().next().unwrap_or('\0') {
+ '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => {
+ Cow::from(format!("_{}", s.to_pascal_case()))
+ }
+ _ => Cow::from(s.to_pascal_case()),
+ }
+ }
+}
+
+pub fn respace(s: &str) -> String {
+ s.split_whitespace().collect::>().join(" ")
+}
+
+pub fn escape_brackets(s: &str) -> String {
+ s.split('[')
+ .fold("".to_string(), |acc, x| {
+ if acc.is_empty() {
+ x.to_string()
+ } else if acc.ends_with('\\') {
+ acc + "[" + x
+ } else {
+ acc + "\\[" + x
+ }
+ })
+ .split(']')
+ .fold("".to_string(), |acc, x| {
+ if acc.is_empty() {
+ x.to_string()
+ } else if acc.ends_with('\\') {
+ acc + "]" + x
+ } else {
+ acc + "\\]" + x
+ }
+ })
+}
+
+pub fn replace_suffix(name: &str, suffix: &str) -> String {
+ if name.contains("[%s]") {
+ name.replace("[%s]", suffix)
+ } else {
+ name.replace("%s", suffix)
+ }
+}
+
+/// Turns `n` into an unsuffixed separated hex token
+pub fn hex(n: u64) -> TokenStream {
+ let (h4, h3, h2, h1) = (
+ (n >> 48) & 0xffff,
+ (n >> 32) & 0xffff,
+ (n >> 16) & 0xffff,
+ n & 0xffff,
+ );
+ TokenStream::from_str(
+ &(if h4 != 0 {
+ format!("0x{:04x}_{:04x}_{:04x}_{:04x}", h4, h3, h2, h1)
+ } else if h3 != 0 {
+ format!("0x{:04x}_{:04x}_{:04x}", h3, h2, h1)
+ } else if h2 != 0 {
+ format!("0x{:04x}_{:04x}", h2, h1)
+ } else if h1 & 0xff00 != 0 {
+ format!("0x{:04x}", h1)
+ } else if h1 != 0 {
+ format!("0x{:02x}", h1 & 0xff)
+ } else {
+ "0".to_string()
+ }),
+ )
+ .unwrap()
+}
+
+/// Turns `n` into an unsuffixed token
+pub fn unsuffixed(n: u64) -> TokenStream {
+ Literal::u64_unsuffixed(n).into_token_stream()
+}
+
+pub fn unsuffixed_or_bool(n: u64, width: u32) -> TokenStream {
+ if width == 1 {
+ Ident::new(if n == 0 { "false" } else { "true" }, Span::call_site()).into_token_stream()
+ } else {
+ unsuffixed(n)
+ }
+}
+
+pub trait U32Ext {
+ fn to_ty(&self) -> Result;
+ fn to_ty_width(&self) -> Result;
+}
+
+impl U32Ext for u32 {
+ fn to_ty(&self) -> Result {
+ Ok(Ident::new(
+ match *self {
+ 1 => "bool",
+ 2..=8 => "u8",
+ 9..=16 => "u16",
+ 17..=32 => "u32",
+ 33..=64 => "u64",
+ _ => {
+ return Err(anyhow!(
+ "can't convert {} bits into a Rust integral type",
+ *self
+ ))
+ }
+ },
+ Span::call_site(),
+ ))
+ }
+
+ fn to_ty_width(&self) -> Result {
+ Ok(match *self {
+ 1 => 1,
+ 2..=8 => 8,
+ 9..=16 => 16,
+ 17..=32 => 32,
+ 33..=64 => 64,
+ _ => {
+ return Err(anyhow!(
+ "can't convert {} bits into a Rust integral type width",
+ *self
+ ))
+ }
+ })
+ }
+}
+
+pub fn build_rs() -> TokenStream {
+ quote! {
+ use std::env;
+ use std::fs::File;
+ use std::io::Write;
+ use std::path::PathBuf;
+
+ fn main() {
+ if env::var_os("CARGO_FEATURE_RT").is_some() {
+ // Put the linker script somewhere the linker can find it
+ let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
+ File::create(out.join("device.x"))
+ .unwrap()
+ .write_all(include_bytes!("device.x"))
+ .unwrap();
+ println!("cargo:rustc-link-search={}", out.display());
+
+ println!("cargo:rerun-if-changed=device.x");
+ }
+
+ println!("cargo:rerun-if-changed=build.rs");
+ }
+ }
+}
+/// Return a relative path to access a from b.
+pub fn relative_path(a: &str, b: &str) -> TokenStream {
+ let a: Vec<&str> = a.split("::").collect();
+ let b: Vec<&str> = b.split("::").collect();
+
+ let mut ma = &a[..a.len() - 1];
+ let mut mb = &b[..b.len() - 1];
+ while !ma.is_empty() && !mb.is_empty() && ma[0] == mb[0] {
+ ma = &ma[1..];
+ mb = &mb[1..];
+ }
+
+ let mut res = TokenStream::new();
+
+ // for each item left in b, append a `super`
+ for _ in mb {
+ res.extend(quote!(super::));
+ }
+
+ // for each item in a, append it
+ for ident in ma {
+ let ident = Ident::new(ident, Span::call_site());
+ res.extend(quote!(#ident::));
+ }
+
+ let ident = Ident::new(a[a.len() - 1], Span::call_site());
+ res.extend(quote!(#ident));
+
+ res
+}
+
+pub fn absolute_path(path: &str) -> TokenStream {
+ path.split("::")
+ .map(|part| Ident::new(part, Span::call_site()))
+ .fold(quote!(crate), |mut path, ident| {
+ path.extend(quote!(::#ident));
+ path
+ })
+}
+
+pub fn doc(doc: &Option) -> TokenStream {
+ if let Some(doc) = doc {
+ let doc = doc.replace("\\n", "\n");
+ let doc = respace(&doc);
+ let doc = escape_brackets(&doc);
+ quote!(#[doc=#doc])
+ } else {
+ quote!()
+ }
+}