From 0ceb38148dd666895640afd1fc4e6ae265f7676c Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Fri, 4 Nov 2022 16:28:39 -0400 Subject: [PATCH 01/15] Squashed 'raltool/' content from commit 73b33d9deeb git-subtree-dir: raltool git-subtree-split: 73b33d9deebf77f64a9aa66c18d0f8b5653c5907 --- .github/bors.toml | 4 + .github/workflows/ci.yaml | 20 + .gitignore | 4 + .vscode/settings.json | 8 + Cargo.lock | 532 ++++++++++++++++++++++ Cargo.toml | 18 + LICENSE-APACHE | 201 ++++++++ LICENSE-MIT | 25 + README.md | 260 +++++++++++ build.rs | 51 +++ rust-toolchain.toml | 5 + src/generate/block.rs | 106 +++++ src/generate/common.rs | 86 ++++ src/generate/device.rs | 118 +++++ src/generate/enumm.rs | 47 ++ src/generate/fieldset.rs | 122 +++++ src/generate/mod.rs | 171 +++++++ src/ir.rs | 311 +++++++++++++ src/lib.rs | 5 + src/main.rs | 383 ++++++++++++++++ src/svd2ir.rs | 385 ++++++++++++++++ src/transform/common.rs | 279 ++++++++++++ src/transform/delete.rs | 80 ++++ src/transform/delete_enums.rs | 53 +++ src/transform/delete_fieldsets.rs | 67 +++ src/transform/expand_extends.rs | 82 ++++ src/transform/find_duplicate_enums.rs | 38 ++ src/transform/find_duplicate_fieldsets.rs | 38 ++ src/transform/make_block.rs | 78 ++++ src/transform/make_field_array.rs | 62 +++ src/transform/make_register_array.rs | 62 +++ src/transform/merge_blocks.rs | 66 +++ src/transform/merge_enums.rs | 57 +++ src/transform/merge_fieldsets.rs | 66 +++ src/transform/mod.rs | 262 +++++++++++ src/transform/modify_byte_offset.rs | 23 + src/transform/rename.rs | 29 ++ src/transform/rename_fields.rs | 27 ++ src/transform/rename_registers.rs | 27 ++ src/transform/sort.rs | 22 + src/util.rs | 316 +++++++++++++ 41 files changed, 4596 insertions(+) create mode 100644 .github/bors.toml create mode 100644 .github/workflows/ci.yaml create mode 100644 .gitignore create mode 100644 .vscode/settings.json create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT create mode 100644 README.md create mode 100644 build.rs create mode 100644 rust-toolchain.toml create mode 100644 src/generate/block.rs create mode 100644 src/generate/common.rs create mode 100644 src/generate/device.rs create mode 100644 src/generate/enumm.rs create mode 100644 src/generate/fieldset.rs create mode 100644 src/generate/mod.rs create mode 100644 src/ir.rs create mode 100755 src/lib.rs create mode 100755 src/main.rs create mode 100644 src/svd2ir.rs create mode 100644 src/transform/common.rs create mode 100644 src/transform/delete.rs create mode 100644 src/transform/delete_enums.rs create mode 100644 src/transform/delete_fieldsets.rs create mode 100644 src/transform/expand_extends.rs create mode 100644 src/transform/find_duplicate_enums.rs create mode 100644 src/transform/find_duplicate_fieldsets.rs create mode 100644 src/transform/make_block.rs create mode 100644 src/transform/make_field_array.rs create mode 100644 src/transform/make_register_array.rs create mode 100644 src/transform/merge_blocks.rs create mode 100644 src/transform/merge_enums.rs create mode 100644 src/transform/merge_fieldsets.rs create mode 100644 src/transform/mod.rs create mode 100644 src/transform/modify_byte_offset.rs create mode 100644 src/transform/rename.rs create mode 100644 src/transform/rename_fields.rs create mode 100644 src/transform/rename_registers.rs create mode 100644 src/transform/sort.rs create mode 100644 src/util.rs diff --git a/.github/bors.toml b/.github/bors.toml new file mode 100644 index 000000000000..27f77ded18f5 --- /dev/null +++ b/.github/bors.toml @@ -0,0 +1,4 @@ +status = [ + "build", +] +delete_merged_branches = true diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 000000000000..870ec9be64bc --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,20 @@ +name: ci +on: + push: + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Cache Dependencies + uses: Swatinem/rust-cache@v1.3.0 + + - name: Check + run: | + cargo check + \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000000..26a07695f3dc --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +[._]*.sw[a-p] +*.org +*.rs.bk +target \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000000..054f9f3bda87 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "rust-analyzer.cargo.runBuildScripts": true, + "rust-analyzer.procMacro.enable": true, + "rust-analyzer.experimental.procAttrMacros": false, + "rust-analyzer.assist.importGranularity": "module", + "rust-analyzer.assist.importEnforceGranularity": true, + "editor.formatOnSave": true +} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 000000000000..8d6097d99bed --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,532 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +dependencies = [ + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chiptool" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "env_logger", + "inflections", + "log", + "proc-macro2", + "quote", + "regex", + "serde", + "serde_yaml", + "svd-parser", +] + +[[package]] +name = "clap" +version = "3.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c93436c21e4698bacadf42917db28b23017027a4deccb35dbe47a7e7840123" +dependencies = [ + "atty", + "bitflags", + "clap_derive", + "indexmap", + "lazy_static", + "os_str_bytes", + "strsim", + "termcolor", + "textwrap", +] + +[[package]] +name = "clap_derive" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da95d038ede1a964ce99f49cbe27a7fb538d1da595e4b4f70b8c8f338d17bf16" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdbfe11fe19ff083c48923cf179540e8cd0535903dc35e178a1fdeeb59aef51f" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +dependencies = [ + "cfg-if", + "lazy_static", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "env_logger" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "indexmap" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "inflections" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a257582fdcde896fd96463bf2d40eefea0580021c0712a0e2b028b60b47a837a" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09" + +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + +[[package]] +name = "log" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "memchr" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" + +[[package]] +name = "os_str_bytes" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +dependencies = [ + "memchr", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +dependencies = [ + "autocfg", + "crossbeam-deque", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "lazy_static", + "num_cpus", +] + +[[package]] +name = "regex" +version = "1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" + +[[package]] +name = "ryu" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "serde" +version = "1.0.136" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.136" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_yaml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" +dependencies = [ + "indexmap", + "ryu", + "serde", + "yaml-rust", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "svd-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697e7645ad9f5311fe3d872d094b135627b1616aea9e1573dddd28ca522579b9" +dependencies = [ + "anyhow", + "once_cell", + "rayon", + "regex", + "thiserror", + "xmltree", +] + +[[package]] +name = "syn" +version = "1.0.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebd69e719f31e88618baa1eaa6ee2de5c9a1c004f1e9ecdb58e8352a13f20a01" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" + +[[package]] +name = "thiserror" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "xml-rs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c1cb601d29fe2c2ac60a2b2e5e293994d87a1f6fa9687a31a15270f909be9c2" +dependencies = [ + "bitflags", +] + +[[package]] +name = "xmltree" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff8eaee9d17062850f1e6163b509947969242990ee59a35801af437abe041e70" +dependencies = [ + "xml-rs", +] + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000000..612bb3e742c3 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "chiptool" +license = "MIT OR Apache-2.0" +version = "0.1.0" +edition = "2021" + +[dependencies] +clap = { version = "3.1.6", features = ["derive"] } +env_logger = "0.9.0" +inflections = "1.1" +log = { version = "~0.4", features = ["std"] } +quote = "1.0" +proc-macro2 = "1.0" +anyhow = "1.0.19" +regex = "1.4.3" +serde = { version = "1.0.123", features = [ "derive" ]} +serde_yaml = "0.8.15" +svd-parser = { version = "0.10.2", features = ["derive-from"] } diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000000..a43445e6cd1b --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Jorge Aparicio + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 000000000000..d8dbf2b741e5 --- /dev/null +++ b/README.md @@ -0,0 +1,260 @@ +# chiptool + +`chiptool` is an experimental fork of `svd2rust` to experiment with: + +- Different API for the generated code. +- Integrating "transforms" in the generation process +- New workflow for storing register definitions in standalone YAML files. + +## Example + +Tested with the RP2040 SVD. Other SVDs might not work quite right yet. + +- svd: https://github.com/Dirbaio/svd2rust/blob/master/svd/rp2040.svd +- yaml: https://github.com/Dirbaio/svd2rust/blob/master/svd/rp2040.yaml +- repo: https://github.com/Dirbaio/rp2040-pac/settings +- docs: https://dirbaio.github.io/rp2040-pac/rp2040_pac/index.html + +## Changes from svd2rust main + +### No owned structs + +Original svd2rust generates an owned struct for each peripheral. This has turned out to have some severe downsides: + +1. there are many cases where the HAL wants to "split up" a peripheral into multiple owned parts. Examples: + - Many pins in a GPIO port peripheral. + - The RX and TX halfs of a UART peripheral. + - Different clocks/PLLs in a clock control peripheral. + - Channels/streams in a DMA controller + - PWM channels + + Virtually all existing HALs run into this issue, and have to unsafely bypass the ownership rules. [nrf gpio](https://github.com/nrf-rs/nrf-hal/blob/6fc5061509d5f3efaa2db15d4af7e3bced4a2e83/nrf-hal-common/src/gpio.rs#L135), [nrf i2c](https://github.com/nrf-rs/nrf-hal/blob/1d6e228f11b7df3847d33d66b01ff772501beb3c/nrf-hal-common/src/twi.rs#L28), [nrf ppi](https://github.com/nrf-rs/nrf-hal/blob/8a28455ab93eb47be4e4edb62ebe96939e1a7ebd/nrf-hal-common/src/ppi/mod.rs#L122), [stm32f4 gpio](https://github.com/stm32-rs/stm32f4xx-hal/blob/9b6aad4b3365a48ae652c315730ab47522e57cfb/src/gpio.rs#L302), [stm32f4 dma](https://github.com/stm32-rs/stm32f4xx-hal/blob/9b6aad4b3365a48ae652c315730ab47522e57cfb/src/dma/mod.rs#L359), [stm32f4 pwm](https://github.com/stm32-rs/stm32f4xx-hal/blob/bb214b6017d84a9c8dd2e8c9fd1f915141e167cc/src/pwm.rs#L228), [atsamd gpio](https://github.com/atsamd-rs/atsamd/blob/4816bb13a12a604e51f929d17b286071a0082c82/hal/src/common/gpio/v2/pin.rs#L669) ... + + Since HALs in practice always bypass the PAC ownership rules and create their own safe abstractions, there's not much advantage in having ownership rules in the PAC in the first place. Not having them makes HAL code cleaner. + +2. sometimes "ownership" is not so clear-cut: + - Multicore. Some peripherals are "core-local", they have an instance per core. Constant address, which instance you access depends on which core you're running on. For example Cortex-M core peripherals, and SIO in RP2040. + - Mutually-exclusive peripherals. In nRF you can only use one of (UART0, SPIM0, SPIS0, TWIM0, TWIS0) at the same time, one of (UART1, SPIM1, SPIS1, TWIM1, TWIS1) at the same time... They're the same peripheral in different "modes". Current nRF PACs get this wrong, allowing you to use e.g. SPIM0 and TWIM0 at the same time, which breaks. +3. Ownership in PACs means upgrading the PAC is ALWAYS a breaking change. + + To guarantee you can't get two singletons for the same peripheral, PACs deliberately sabotage building a binary containing two PAC major versions (with this [no\_mangle thing](https://github.com/nrf-rs/nrf-pacs/blob/8f9da05ca1b496bd743f223ed1122dfe9220956c/pacs/nrf52840-pac/src/lib.rs#L2279-L2280)). + + This means the HAL major-bumping the PAC dep version is a breaking change, so the HAL would have to be major-bumped as well. And all PAC bumps are breaking, and they're VERY common... + +### All register access is unsafe + +Reasons: + +- Since there are no owned structs, there can be data races when writing to a register from multiple contexts (eg main thread and interrupt). Ensuring no data races is left to the HALs (HALs are already doing this anyway, see above) +- DMA registers can be turned into arbitrary pointer dereferencing. +- Controls for low-level chip features such as RAM power control or clock control can break safety in interesting ways. + +### Structs representing register values (sets of fields) + +Current svd2rust provides "read proxy" and "write proxy" structs with methods to access register fields when reading/writing. However: + +- There's no type-safe way to save the _value_ of a register in a variable to write later. (there's `.bits()`, but it's not typesafe) +- There's no way to read/modify register fields on a saved value (if using `.bits()`, the user has a raw u32, they need to extract the fields manually with bitwise manipulation) + +Solution: for each register with fields, a "fieldset" struct is generated. This struct wraps the raw `u32` and allows getting/setting individual fields. + +```rust +let val = pac::watchdog::fields::Tick(0); +val.set_cycles(XOSC_MHZ as u16); +val.set_enable(true); +info!("enabled: {:bool}", val.enable()); +``` + +On a register, `.read()` and `.write_value()` can get and set such fieldset values: + +```rust +let val = pac::WATCHDOG.tick().read(); +val.set_enable(false); +// We could save val in a variable somewhere else +// then get it and write it back later +pac::WATCHDOG.tick().write_value(val); +``` + +Closure-based `.write()` and `.modify()` are provided too, like the current svd2rust. + +```rust +pac::WATCHDOG.tick().write(|w| { + w.set_cycles(XOSC_MHZ as u16); + w.set_enable(true); +}); +``` + +### Structs representing enumerated values + +For each EnumeratedValues in a field, a struct is generated. + +This struct is _not_ a Rust enum, it is a struct with associated constants. + +### Possibility to share items (blocks, fieldsets, enums) + +Many peripherals have multiple registers with the same fields (same names, same bit offsets). This tool allows the user to merge them via YAML config. Same for enums and register blocks. + +Fieldsets and enums can be shared across different registers, different register blocks, even different peripherals. + +Example: the RP2040 chip has two GPIO banks: `BANK0` and `QSPI`. These share many enums and field sets. Example of merging some: + +```yaml +- MergeEnums: + from: io_[^:]+::values::Gpio.+Ctrl(.+)over + to: io::values::${1}over +``` + +This merges all `INOVER`, `OUTOVER`, `OEOVER` and `IRQOVER` enums (144 enums!) into just 4. + +- huge reduction in generated code, mitigating long compile times which is one of the top complaints of current PACs. +- Better code sharing in HALs since they can use a single enum/fieldset to read/write to multiple registers. + +### Automatic cluster creation + +```yaml +- MakeBlock: + block: pio0::Pio0 + from: sm(\d+)_(.+) + to_outer: sm$1 + to_inner: $2 + to_block: pio0::StateMachine +``` + +This collapses all `smX_*` registers into a single cluster: + + // before: + RegisterBlock: + sm0_clkdiv + sm0_execctrl + sm0_shiftctrl + sm0_addr + sm0_instr + sm0_pinctrl + sm1_clkdiv + sm1_execctrl + sm1_shiftctrl + sm1_addr + sm1_instr + sm1_pinctrl + sm2_clkdiv + sm2_execctrl + sm2_shiftctrl + sm2_addr + sm2_instr + sm2_pinctrl + sm3_clkdiv + sm3_execctrl + sm3_shiftctrl + sm3_addr + sm3_instr + sm3_pinctrl + + // after: + RegisterBlock: + sm0 + sm1 + sm2 + sm3 + + StateMachine block: + clkdiv + execctrl + shiftctrl + addr + instr + pinctrl + +### Automatic array creation + +example: + +```yaml +- MakeRegisterArray: + block: pio0::Pio0 + from: sm\d+ + to: sm +``` + + // before: + RegisterBlock: + sm0 + sm1 + sm2 + sm3 + + // after: + RegisterBlock: + sm (array of length 4) + +### RegisterBlocks and Registers wrap pointers + +```rust +// a RegisterBlock +pub struct Resets(*mut u8); + +impl Resets { + // A register access function. This is just pointer arithmetic + pub fn reset_done(self) -> Reg { + unsafe { Reg::new(self.0.add(8usize))) } + } +} + +// the Reg struct +pub struct Reg { + ptr: *mut u8, + ... +} +``` + +- No need to calculate and fill padding holes in RegisterBlock structs +- No problem if registers overlap (currently svd2rust has to check for this, and falls back to a function-based codegen similar to this one) +- Pointer provenance is not erased. Previous codegen causes pointers to become references (&), so it's undefined behavior to do arithmetic with a register pointer to write somewhere else. This is useful in a few niche situations: + - calculating a pointer to a particular register bit in the bitbanding region + - The RP2040 chip has register aliases that atomically set/clear/xor register bits at addr + 0x1000/0x2000/0x3000 + +This generates the same assembly code as original svd2rust when optimizations are enabled. + +## Running + + mkdir -p out + mkdir -p out/src + cargo run -- -i svd/rp2040.svd -c svd/rp2040.yaml + rustfmt out/src/lib.rs + (cd out; cargo build && cargo doc) + +## To-Do + +Missing features: + +- Clusters in input SVD file +- registers with bit width other than 32 + +Nice to have features: + +- More transforms (deletes, renames, move entire module...) +- clean up doc comments better + +## License + +Licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the +work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. + +## Code of Conduct + +Contribution to this crate is organized under the terms of the [Rust Code of +Conduct][coc], the maintainer of this crate, the [Tools team][team], promises +to intervene to uphold that code of conduct. + +[coc]: CODE_OF_CONDUCT.md +[team]: https://github.com/rust-embedded/wg#the-tools-team diff --git a/build.rs b/build.rs new file mode 100644 index 000000000000..56ee3e0a19a6 --- /dev/null +++ b/build.rs @@ -0,0 +1,51 @@ +use std::env; +use std::error::Error; +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; +use std::process::Command; + +struct IgnoredError {} + +impl From for IgnoredError +where + E: Error, +{ + fn from(_: E) -> IgnoredError { + IgnoredError {} + } +} + +fn main() { + let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); + + File::create(out_dir.join("commit-info.txt")) + .unwrap() + .write_all(commit_info().as_bytes()) + .unwrap(); +} + +fn commit_info() -> String { + match (commit_hash(), commit_date()) { + (Ok(hash), Ok(date)) => format!(" ({} {})", hash.trim(), date.trim()), + _ => String::new(), + } +} + +fn commit_hash() -> Result { + Ok(String::from_utf8( + Command::new("git") + .args(&["rev-parse", "--short", "HEAD"]) + .output()? + .stdout, + )?) +} + +fn commit_date() -> Result { + Ok(String::from_utf8( + Command::new("git") + .args(&["log", "-1", "--date=short", "--pretty=format:%cd"]) + .output()? + .stdout, + )?) +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 000000000000..06d6aa18d7e9 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,5 @@ +# Before upgrading check that everything is available on all tier1 targets here: +# https://rust-lang.github.io/rustup-components-history +[toolchain] +channel = "nightly-2022-03-10" +components = [ "rust-src", "rustfmt" ] diff --git a/src/generate/block.rs b/src/generate/block.rs new file mode 100644 index 000000000000..115104f41246 --- /dev/null +++ b/src/generate/block.rs @@ -0,0 +1,106 @@ +use anyhow::Result; +use proc_macro2::TokenStream; +use proc_macro2::{Ident, Span}; +use quote::quote; + +use crate::ir::*; +use crate::util; + +pub fn render(opts: &super::Options, ir: &IR, b: &Block, path: &str) -> Result { + let common_path = opts.common_path(); + + let span = Span::call_site(); + let mut items = TokenStream::new(); + + for i in &b.items { + let name = Ident::new(&i.name, span); + let offset = i.byte_offset as usize; + + let doc = util::doc(&i.description); + + match &i.inner { + BlockItemInner::Register(r) => { + let reg_ty = if let Some(fieldset_path) = &r.fieldset { + let _f = ir.fieldsets.get(fieldset_path).unwrap(); + util::relative_path(fieldset_path, path) + } else { + match r.bit_size { + 8 => quote!(u8), + 16 => quote!(u16), + 32 => quote!(u32), + 64 => quote!(u64), + _ => panic!("Invalid register bit size {}", r.bit_size), + } + }; + + let access = match r.access { + Access::Read => quote!(#common_path::R), + Access::Write => quote!(#common_path::W), + Access::ReadWrite => quote!(#common_path::RW), + }; + + let ty = quote!(#common_path::Reg<#reg_ty, #access>); + if let Some(array) = &i.array { + let (len, offs_expr) = super::process_array(array); + items.extend(quote!( + #doc + #[inline(always)] + pub fn #name(self, n: usize) -> #ty { + assert!(n < #len); + unsafe { #common_path::Reg::from_ptr(self.0.add(#offset + #offs_expr)) } + } + )); + } else { + items.extend(quote!( + #doc + #[inline(always)] + pub fn #name(self) -> #ty { + unsafe { #common_path::Reg::from_ptr(self.0.add(#offset)) } + } + )); + } + } + BlockItemInner::Block(b) => { + let block_path = &b.block; + let _b2 = ir.blocks.get(block_path).unwrap(); + let ty = util::relative_path(block_path, path); + if let Some(array) = &i.array { + let (len, offs_expr) = super::process_array(array); + + items.extend(quote!( + #doc + #[inline(always)] + pub fn #name(self, n: usize) -> #ty { + assert!(n < #len); + unsafe { #ty(self.0.add(#offset + #offs_expr)) } + } + )); + } else { + items.extend(quote!( + #doc + #[inline(always)] + pub fn #name(self) -> #ty { + unsafe { #ty(self.0.add(#offset)) } + } + )); + } + } + } + } + + let (_, name) = super::split_path(path); + let name = Ident::new(name, span); + let doc = util::doc(&b.description); + let out = quote! { + #doc + #[derive(Copy, Clone, Eq, PartialEq)] + pub struct #name (pub *mut u8); + unsafe impl Send for #name {} + unsafe impl Sync for #name {} + impl #name { + #items + } + }; + + Ok(out) +} diff --git a/src/generate/common.rs b/src/generate/common.rs new file mode 100644 index 000000000000..1b4282d98146 --- /dev/null +++ b/src/generate/common.rs @@ -0,0 +1,86 @@ +use core::marker::PhantomData; + +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct RW; +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct R; +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct W; + +mod sealed { + use super::*; + pub trait Access {} + impl Access for R {} + impl Access for W {} + impl Access for RW {} +} + +pub trait Access: sealed::Access + Copy {} +impl Access for R {} +impl Access for W {} +impl Access for RW {} + +pub trait Read: Access {} +impl Read for RW {} +impl Read for R {} + +pub trait Write: Access {} +impl Write for RW {} +impl Write for W {} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct Reg { + ptr: *mut u8, + phantom: PhantomData<*mut (T, A)>, +} +unsafe impl Send for Reg {} +unsafe impl Sync for Reg {} + +impl Reg { + #[inline(always)] + pub fn from_ptr(ptr: *mut u8) -> Self { + Self { + ptr, + phantom: PhantomData, + } + } + + #[inline(always)] + pub fn ptr(&self) -> *mut T { + self.ptr as _ + } +} + +impl Reg { + #[inline(always)] + pub unsafe fn read(&self) -> T { + (self.ptr as *mut T).read_volatile() + } +} + +impl Reg { + #[inline(always)] + pub unsafe fn write_value(&self, val: T) { + (self.ptr as *mut T).write_volatile(val) + } +} + +impl Reg { + #[inline(always)] + pub unsafe fn write(&self, f: impl FnOnce(&mut T) -> R) -> R { + let mut val = Default::default(); + let res = f(&mut val); + self.write_value(val); + res + } +} + +impl Reg { + #[inline(always)] + pub unsafe fn modify(&self, f: impl FnOnce(&mut T) -> R) -> R { + let mut val = self.read(); + let res = f(&mut val); + self.write_value(val); + res + } +} diff --git a/src/generate/device.rs b/src/generate/device.rs new file mode 100644 index 000000000000..1428a8b1427f --- /dev/null +++ b/src/generate/device.rs @@ -0,0 +1,118 @@ +use anyhow::Result; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::ir::*; +use crate::util::{self, ToSanitizedUpperCase}; + +pub fn render(_opts: &super::Options, ir: &IR, d: &Device, path: &str) -> Result { + let mut out = TokenStream::new(); + let span = Span::call_site(); + + let mut interrupts_sorted = d.interrupts.clone(); + interrupts_sorted.sort_by_key(|i| i.value); + + let mut interrupts = TokenStream::new(); + let mut peripherals = TokenStream::new(); + let mut vectors = TokenStream::new(); + let mut names = vec![]; + + let mut pos = 0; + for i in &interrupts_sorted { + while pos < i.value { + vectors.extend(quote!(Vector { _reserved: 0 },)); + pos += 1; + } + pos += 1; + + let name_uc = Ident::new(&i.name.to_sanitized_upper_case(), span); + let description = format!( + "{} - {}", + i.value, + i.description + .as_ref() + .map(|s| util::respace(s)) + .as_ref() + .map(|s| util::escape_brackets(s)) + .unwrap_or_else(|| i.name.clone()) + ); + + let value = util::unsuffixed(i.value as u64); + + interrupts.extend(quote! { + #[doc = #description] + #name_uc = #value, + }); + vectors.extend(quote!(Vector { _handler: #name_uc },)); + names.push(name_uc); + } + + for p in &d.peripherals { + let name = Ident::new(&p.name, span); + let address = util::hex(p.base_address as u64); + let doc = util::doc(&p.description); + + if let Some(block_name) = &p.block { + let _b = ir.blocks.get(block_name); + let path = util::relative_path(block_name, path); + + peripherals.extend(quote! { + #doc + pub const #name: #path = #path(#address as u32 as _); + }); + } else { + peripherals.extend(quote! { + #doc + pub const #name: *mut () = #address as u32 as _; + }); + } + } + + let n = util::unsuffixed(pos as u64); + out.extend(quote!( + #[derive(Copy, Clone, Debug, PartialEq, Eq)] + pub enum Interrupt { + #interrupts + } + + unsafe impl cortex_m::interrupt::InterruptNumber for Interrupt { + #[inline(always)] + fn number(self) -> u16 { + self as u16 + } + } + + #[cfg(feature = "rt")] + mod _vectors { + extern "C" { + #(fn #names();)* + } + + pub union Vector { + _handler: unsafe extern "C" fn(), + _reserved: u32, + } + + #[link_section = ".vector_table.interrupts"] + #[no_mangle] + pub static __INTERRUPTS: [Vector; #n] = [ + #vectors + ]; + } + + #peripherals + )); + + /* + if let Some(cpu) = d.cpu.as_ref() { + let bits = util::unsuffixed(u64::from(cpu.nvic_priority_bits)); + + out.extend(quote! { + ///Number available in the NVIC for configuring priority + pub const NVIC_PRIO_BITS: u8 = #bits; + }); + } + */ + + Ok(out) +} diff --git a/src/generate/enumm.rs b/src/generate/enumm.rs new file mode 100644 index 000000000000..e51bb6f110e9 --- /dev/null +++ b/src/generate/enumm.rs @@ -0,0 +1,47 @@ +use anyhow::Result; +use proc_macro2::TokenStream; +use proc_macro2::{Ident, Span}; +use quote::quote; + +use crate::ir::*; +use crate::util; + +pub fn render(_opts: &super::Options, _ir: &IR, e: &Enum, path: &str) -> Result { + let span = Span::call_site(); + let mut items = TokenStream::new(); + + let ty = match e.bit_size { + 1..=8 => quote!(u8), + 9..=16 => quote!(u16), + 17..=32 => quote!(u32), + 33..=64 => quote!(u64), + _ => panic!("Invalid bit_size {}", e.bit_size), + }; + + for f in &e.variants { + let name = Ident::new(&f.name, span); + let value = util::hex(f.value); + let doc = util::doc(&f.description); + items.extend(quote!( + #doc + pub const #name: Self = Self(#value); + )); + } + + let (_, name) = super::split_path(path); + let name = Ident::new(name, span); + let doc = util::doc(&e.description); + + let out = quote! { + #doc + #[repr(transparent)] + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] + pub struct #name (pub #ty); + + impl #name { + #items + } + }; + + Ok(out) +} diff --git a/src/generate/fieldset.rs b/src/generate/fieldset.rs new file mode 100644 index 000000000000..262eab8b9023 --- /dev/null +++ b/src/generate/fieldset.rs @@ -0,0 +1,122 @@ +use anyhow::Result; +use proc_macro2::TokenStream; +use proc_macro2::{Ident, Span}; +use quote::quote; + +use crate::ir::*; +use crate::util; + +pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Result { + let span = Span::call_site(); + let mut items = TokenStream::new(); + + let ty = match fs.bit_size { + 1..=8 => quote!(u8), + 9..=16 => quote!(u16), + 17..=32 => quote!(u32), + 33..=64 => quote!(u64), + _ => panic!("Invalid bit_size {}", fs.bit_size), + }; + + for f in &fs.fields { + let name = Ident::new(&f.name, span); + let name_set = Ident::new(&format!("set_{}", f.name), span); + let bit_offset = f.bit_offset as usize; + let _bit_size = f.bit_size as usize; + let mask = util::hex(1u64.wrapping_shl(f.bit_size).wrapping_sub(1)); + let doc = util::doc(&f.description); + let field_ty: TokenStream; + let to_bits: TokenStream; + let from_bits: TokenStream; + + if let Some(e_path) = &f.enum_readwrite { + let e = ir.enums.get(e_path).unwrap(); + + let enum_ty = match e.bit_size { + 1..=8 => quote!(u8), + 9..=16 => quote!(u16), + 17..=32 => quote!(u32), + 33..=64 => quote!(u64), + _ => panic!("Invalid bit_size {}", e.bit_size), + }; + + field_ty = util::relative_path(e_path, path); + to_bits = quote!(val.0 as #ty); + from_bits = quote!(#field_ty(val as #enum_ty)); + } else { + field_ty = match f.bit_size { + 1 => quote!(bool), + 2..=8 => quote!(u8), + 9..=16 => quote!(u16), + 17..=32 => quote!(u32), + 33..=64 => quote!(u64), + _ => panic!("Invalid bit_size {}", f.bit_size), + }; + to_bits = quote!(val as #ty); + from_bits = if f.bit_size == 1 { + quote!(val != 0) + } else { + quote!(val as #field_ty) + } + } + + if let Some(array) = &f.array { + let (len, offs_expr) = super::process_array(array); + items.extend(quote!( + #doc + #[inline(always)] + pub fn #name(&self, n: usize) -> #field_ty{ + assert!(n < #len); + let offs = #bit_offset + #offs_expr; + let val = (self.0 >> offs) & #mask; + #from_bits + } + #doc + #[inline(always)] + pub fn #name_set(&mut self, n: usize, val: #field_ty) { + assert!(n < #len); + let offs = #bit_offset + #offs_expr; + self.0 = (self.0 & !(#mask << offs)) | (((#to_bits) & #mask) << offs); + } + )); + } else { + items.extend(quote!( + #doc + #[inline(always)] + pub const fn #name(&self) -> #field_ty{ + let val = (self.0 >> #bit_offset) & #mask; + #from_bits + } + #doc + #[inline(always)] + pub fn #name_set(&mut self, val: #field_ty) { + self.0 = (self.0 & !(#mask << #bit_offset)) | (((#to_bits) & #mask) << #bit_offset); + } + )); + } + } + + let (_, name) = super::split_path(path); + let name = Ident::new(name, span); + let doc = util::doc(&fs.description); + + let out = quote! { + #doc + #[repr(transparent)] + #[derive(Copy, Clone, Eq, PartialEq)] + pub struct #name (pub #ty); + + impl #name { + #items + } + + impl Default for #name { + #[inline(always)] + fn default() -> #name { + #name(0) + } + } + }; + + Ok(out) +} diff --git a/src/generate/mod.rs b/src/generate/mod.rs new file mode 100644 index 000000000000..227283ff4a99 --- /dev/null +++ b/src/generate/mod.rs @@ -0,0 +1,171 @@ +mod block; +mod device; +mod enumm; +mod fieldset; + +use anyhow::Result; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; +use std::collections::HashMap; +use std::str::FromStr; + +use crate::ir::*; + +pub const COMMON_MODULE: &[u8] = include_bytes!("common.rs"); + +struct Module { + items: TokenStream, + children: HashMap, +} + +impl Module { + fn new() -> Self { + Self { + // Default mod contents + items: quote!(), + children: HashMap::new(), + } + } + + fn get_by_path(&mut self, path: &[&str]) -> &mut Module { + if path.is_empty() { + return self; + } + + self.children + .entry(path[0].to_owned()) + .or_insert_with(Module::new) + .get_by_path(&path[1..]) + } + + fn render(self) -> Result { + let span = Span::call_site(); + + let mut res = TokenStream::new(); + res.extend(self.items); + + for (name, module) in self.children.into_iter() { + let name = Ident::new(&name, span); + let contents = module.render()?; + res.extend(quote! { + pub mod #name { + #contents + } + }); + } + Ok(res) + } +} + +pub enum CommonModule { + Builtin, + External(TokenStream), +} + +pub struct Options { + pub common_module: CommonModule, +} + +impl Options { + fn common_path(&self) -> TokenStream { + match &self.common_module { + CommonModule::Builtin => TokenStream::from_str("crate::common").unwrap(), + CommonModule::External(path) => path.clone(), + } + } +} + +pub fn render(ir: &IR, opts: &Options) -> Result { + let mut root = Module::new(); + root.items = TokenStream::new(); // Remove default contents + + let commit_info = { + let tmp = include_str!(concat!(env!("OUT_DIR"), "/commit-info.txt")); + + if tmp.is_empty() { + " (untracked)" + } else { + tmp + } + }; + + let doc = format!( + "Peripheral access API (generated using chiptool v{}{})", + env!("CARGO_PKG_VERSION"), + commit_info + ); + + root.items.extend(quote!( + #![no_std] + #![doc=#doc] + )); + + for (p, d) in ir.devices.iter() { + let (mods, _) = split_path(p); + root.get_by_path(&mods) + .items + .extend(device::render(opts, ir, d, p)?); + } + + for (p, b) in ir.blocks.iter() { + let (mods, _) = split_path(p); + root.get_by_path(&mods) + .items + .extend(block::render(opts, ir, b, p)?); + } + + for (p, fs) in ir.fieldsets.iter() { + let (mods, _) = split_path(p); + root.get_by_path(&mods) + .items + .extend(fieldset::render(opts, ir, fs, p)?); + } + + for (p, e) in ir.enums.iter() { + let (mods, _) = split_path(p); + root.get_by_path(&mods) + .items + .extend(enumm::render(opts, ir, e, p)?); + } + + match &opts.common_module { + CommonModule::Builtin => { + let tokens = + TokenStream::from_str(std::str::from_utf8(COMMON_MODULE).unwrap()).unwrap(); + + let module = root.get_by_path(&["common"]); + module.items = TokenStream::new(); // Remove default contents + module.items.extend(tokens); + } + CommonModule::External(_) => {} + } + + root.render() +} + +fn split_path(s: &str) -> (Vec<&str>, &str) { + let mut v: Vec<&str> = s.split("::").collect(); + let n = v.pop().unwrap(); + (v, n) +} + +fn process_array(array: &Array) -> (usize, TokenStream) { + match array { + Array::Regular(array) => { + let len = array.len as usize; + let stride = array.stride as usize; + let offs_expr = quote!(n*#stride); + (len, offs_expr) + } + Array::Cursed(array) => { + let len = array.offsets.len(); + let offsets = array + .offsets + .iter() + .map(|&x| x as usize) + .collect::>(); + let offs_expr = quote!(([#(#offsets),*][n] as usize)); + (len, offs_expr) + } + } +} diff --git a/src/ir.rs b/src/ir.rs new file mode 100644 index 000000000000..62045d2710e6 --- /dev/null +++ b/src/ir.rs @@ -0,0 +1,311 @@ +use de::MapAccess; +use serde::{de, de::Visitor, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; +use std::collections::{BTreeMap, HashMap}; +use std::fmt; + +#[derive(Clone, Debug, PartialEq)] +pub struct IR { + pub devices: HashMap, + pub blocks: HashMap, + pub fieldsets: HashMap, + pub enums: HashMap, +} + +impl IR { + pub fn new() -> Self { + Self { + devices: HashMap::new(), + blocks: HashMap::new(), + fieldsets: HashMap::new(), + enums: HashMap::new(), + } + } + + pub fn merge(&mut self, other: IR) { + self.devices.extend(other.devices); + self.blocks.extend(other.blocks); + self.fieldsets.extend(other.fieldsets); + self.enums.extend(other.enums); + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Device { + pub peripherals: Vec, + pub interrupts: Vec, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Peripheral { + pub name: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + pub base_address: u64, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub array: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub block: Option, + + #[serde( + default, + skip_serializing_if = "HashMap::is_empty", + serialize_with = "ordered_map" + )] + pub interrupts: HashMap, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Interrupt { + pub name: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + pub value: u32, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Block { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extends: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + pub items: Vec, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockItem { + pub name: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub array: Option, + pub byte_offset: u32, + + #[serde(flatten)] + pub inner: BlockItemInner, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum BlockItemInner { + Block(BlockItemBlock), + Register(Register), +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum Array { + Regular(RegularArray), + Cursed(CursedArray), +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct RegularArray { + pub len: u32, + pub stride: u32, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct CursedArray { + pub offsets: Vec, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Register { + #[serde(default = "default_readwrite", skip_serializing_if = "is_readwrite")] + pub access: Access, + #[serde(default = "default_32", skip_serializing_if = "is_32")] + pub bit_size: u32, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub fieldset: Option, +} +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockItemBlock { + pub block: String, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum Access { + ReadWrite, + Read, + Write, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct FieldSet { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extends: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(default = "default_32", skip_serializing_if = "is_32")] + pub bit_size: u32, + pub fields: Vec, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Field { + pub name: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + + pub bit_offset: u32, + pub bit_size: u32, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub array: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub enum_read: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub enum_write: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "enum")] + pub enum_readwrite: Option, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Enum { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + pub bit_size: u32, + pub variants: Vec, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct EnumVariant { + pub name: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + pub value: u64, +} + +fn default_32() -> u32 { + 32 +} +fn is_32(x: &u32) -> bool { + *x == 32 +} + +fn default_readwrite() -> Access { + Access::ReadWrite +} +fn is_readwrite(x: &Access) -> bool { + *x == Access::ReadWrite +} + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +enum Kind { + Block, + Fieldset, + Enum, +} + +impl Serialize for IR { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Sort by block/fieldset/enum, then alphabetically. + // This ensures the output's order is deterministic. + // - Easier diffing between yamls + // - No spurious changes when roundtripping + let mut entries = Vec::new(); + for name in self.blocks.keys() { + entries.push((Kind::Block, name)); + } + for name in self.fieldsets.keys() { + entries.push((Kind::Fieldset, name)); + } + for name in self.enums.keys() { + entries.push((Kind::Enum, name)); + } + + entries.sort(); + + let mut map = serializer.serialize_map(Some(entries.len()))?; + for (kind, name) in entries { + match kind { + Kind::Block => { + map.serialize_entry( + &format!("block/{}", name), + self.blocks.get(name).unwrap(), + )?; + } + Kind::Fieldset => { + map.serialize_entry( + &format!("fieldset/{}", name), + self.fieldsets.get(name).unwrap(), + )?; + } + Kind::Enum => { + map.serialize_entry(&format!("enum/{}", name), self.enums.get(name).unwrap())?; + } + } + } + map.end() + } +} + +struct IRVisitor; + +impl<'de> Visitor<'de> for IRVisitor { + type Value = IR; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an IR") + } + + fn visit_map(self, mut access: M) -> Result + where + M: MapAccess<'de>, + { + let mut ir = IR::new(); + + // While there are entries remaining in the input, add them + // into our map. + while let Some(key) = access.next_key()? { + let key: String = key; + let (kind, name) = key.split_once("/").ok_or(de::Error::custom("item names must be in form `kind/name`, where kind is `block`, `fieldset` or `enum`"))?; + match kind { + "block" => { + let val: Block = access.next_value()?; + if ir.blocks.insert(name.to_string(), val).is_some() { + return Err(de::Error::custom(format!("Duplicate item {:?}", key))); + } + } + "fieldset" => { + let val: FieldSet = access.next_value()?; + if ir.fieldsets.insert(name.to_string(), val).is_some() { + return Err(de::Error::custom(format!("Duplicate item {:?}", key))); + } + } + "enum" => { + let val: Enum = access.next_value()?; + if ir.enums.insert(name.to_string(), val).is_some() { + return Err(de::Error::custom(format!("Duplicate item {:?}", key))); + } + } + _ => return Err(de::Error::custom(format!("Unknown kind {:?}", kind))), + } + } + + Ok(ir) + } +} + +impl<'de> Deserialize<'de> for IR { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_map(IRVisitor) + } +} + +fn ordered_map(value: &HashMap, serializer: S) -> Result +where + S: Serializer, +{ + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100755 index 000000000000..f7aff2166151 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,5 @@ +pub mod generate; +pub mod ir; +pub mod svd2ir; +pub mod transform; +pub mod util; diff --git a/src/main.rs b/src/main.rs new file mode 100755 index 000000000000..0b35c0846b49 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,383 @@ +#![recursion_limit = "128"] + +use anyhow::{bail, Context, Result}; +use chiptool::{generate, svd2ir}; +use clap::Parser; +use log::*; +use regex::Regex; +use std::fs; +use std::io::Read; +use std::{fs::File, io::stdout}; + +use chiptool::ir::IR; + +#[derive(Parser)] +#[clap(version = "1.0", author = "Dirbaio ")] +struct Opts { + #[clap(subcommand)] + subcommand: Subcommand, +} + +#[derive(Parser)] +enum Subcommand { + Generate(Generate), + ExtractPeripheral(ExtractPeripheral), + Transform(Transform), + Fmt(Fmt), + Check(Check), + GenBlock(GenBlock), +} + +/// Extract peripheral from SVD to YAML +#[derive(Parser)] +struct ExtractPeripheral { + /// SVD file path + #[clap(long)] + svd: String, + /// Peripheral from the SVD + #[clap(long)] + peripheral: String, + /// Transforms file path + #[clap(long)] + transform: Option, +} + +/// Apply transform to YAML +#[derive(Parser)] +struct Transform { + /// Input YAML path + #[clap(short, long)] + input: String, + /// Output YAML path + #[clap(short, long)] + output: String, + /// Transforms file path + #[clap(short, long)] + transform: String, +} + +/// Generate a PAC directly from a SVD +#[derive(Parser)] +struct Generate { + /// SVD file path + #[clap(long)] + svd: String, + /// Transforms file path + #[clap(long)] + transform: Option, +} + +/// Reformat a YAML +#[derive(Parser)] +struct Fmt { + /// Peripheral file path + files: Vec, + /// Error if incorrectly formatted, instead of fixing. + #[clap(long)] + check: bool, +} + +/// Check a YAML for errors. +#[derive(Parser)] +struct Check { + /// Peripheral file path + files: Vec, +} + +/// Generate Rust code from a YAML register block +#[derive(Parser)] +struct GenBlock { + /// Input YAML path + #[clap(short, long)] + input: String, + /// Output YAML path + #[clap(short, long)] + output: String, +} + +fn main() -> Result<()> { + env_logger::init(); + + let opts: Opts = Opts::parse(); + + match opts.subcommand { + Subcommand::ExtractPeripheral(x) => extract_peripheral(x), + Subcommand::Generate(x) => gen(x), + Subcommand::Transform(x) => transform(x), + Subcommand::Fmt(x) => fmt(x), + Subcommand::Check(x) => check(x), + Subcommand::GenBlock(x) => gen_block(x), + } +} + +fn load_svd(path: &str) -> Result { + let xml = &mut String::new(); + File::open(path) + .context("Cannot open the SVD file")? + .read_to_string(xml) + .context("Cannot read the SVD file")?; + + let device = svd_parser::parse(xml)?; + Ok(device) +} + +fn load_config(path: &str) -> Result { + let config = fs::read(path).context("Cannot read the config file")?; + serde_yaml::from_slice(&config).context("cannot deserialize config") +} + +fn extract_peripheral(args: ExtractPeripheral) -> Result<()> { + let config = match args.transform { + Some(s) => load_config(&s)?, + None => Config::default(), + }; + + let svd = load_svd(&args.svd)?; + let mut ir = IR::new(); + + let peri = args.peripheral; + let mut p = svd + .peripherals + .iter() + .find(|p| p.name == peri) + .expect("peripheral not found"); + + if let Some(f) = &p.derived_from { + p = svd + .peripherals + .iter() + .find(|p| p.name == *f) + .expect("derivedFrom peripheral not found"); + } + + chiptool::svd2ir::convert_peripheral(&mut ir, p)?; + + // Fix weird newline spam in descriptions. + let re = Regex::new("[ \n]+").unwrap(); + chiptool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; + + for t in &config.transforms { + info!("running: {:?}", t); + t.run(&mut ir)?; + } + + // Ensure consistent sort order in the YAML. + chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); + + serde_yaml::to_writer(stdout(), &ir).unwrap(); + Ok(()) +} + +fn gen(args: Generate) -> Result<()> { + let config = match args.transform { + Some(s) => load_config(&s)?, + None => Config::default(), + }; + + let svd = load_svd(&args.svd)?; + let mut ir = svd2ir::convert_svd(&svd)?; + + // Fix weird newline spam in descriptions. + let re = Regex::new("[ \n]+").unwrap(); + chiptool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; + + for t in &config.transforms { + info!("running: {:?}", t); + t.run(&mut ir)?; + } + + let generate_opts = generate::Options { + common_module: generate::CommonModule::Builtin, + }; + let items = generate::render(&ir, &generate_opts).unwrap(); + fs::write("lib.rs", items.to_string())?; + + Ok(()) +} + +fn transform(args: Transform) -> Result<()> { + let data = fs::read(&args.input)?; + let mut ir: IR = serde_yaml::from_slice(&data)?; + let config = load_config(&args.transform)?; + for t in &config.transforms { + info!("running: {:?}", t); + t.run(&mut ir)?; + } + let data = serde_yaml::to_vec(&ir)?; + fs::write(&args.output, data)?; + + Ok(()) +} + +fn fmt(args: Fmt) -> Result<()> { + for file in args.files { + let got_data = fs::read(&file)?; + let mut ir: IR = serde_yaml::from_slice(&got_data)?; + + // Ensure consistent sort order in the YAML. + chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); + + // Trim all descriptions + + let cleanup = |s: &mut Option| { + if let Some(s) = s.as_mut() { + *s = s.trim().to_string() + } + }; + + for (_, b) in &mut ir.blocks { + cleanup(&mut b.description); + for i in &mut b.items { + cleanup(&mut i.description); + } + } + + for (_, b) in &mut ir.fieldsets { + cleanup(&mut b.description); + for i in &mut b.fields { + cleanup(&mut i.description); + } + } + + for (_, b) in &mut ir.enums { + cleanup(&mut b.description); + for i in &mut b.variants { + cleanup(&mut i.description); + } + } + + let want_data = serde_yaml::to_vec(&ir)?; + + if got_data != want_data { + if args.check { + bail!("File {} is not correctly formatted", &file); + } else { + fs::write(&file, want_data)?; + } + } + } + Ok(()) +} + +fn check(args: Check) -> Result<()> { + for file in args.files { + let got_data = fs::read(&file)?; + let ir: IR = serde_yaml::from_slice(&got_data)?; + + let mut printed = false; + let mut error = move |s: String| { + if !printed { + printed = true; + println!("{}:", &file); + } + println!(" {}", s); + }; + + for (name, b) in &ir.blocks { + for (i1, i2) in Pairs::new(b.items.iter()) { + if i1.byte_offset == i2.byte_offset { + error(format!( + "block {}: registers overlap: {} {}", + name, i1.name, i2.name + )); + } + } + } + + for (name, e) in &ir.enums { + for (i1, i2) in Pairs::new(e.variants.iter()) { + if i1.value == i2.value { + error(format!( + "enum {}: variants with same value: {} {}", + name, i1.name, i2.name + )); + } + } + } + + for (name, f) in &ir.fieldsets { + for (i1, i2) in Pairs::new(f.fields.iter()) { + if i2.bit_offset + i2.bit_size > i1.bit_offset + && i1.bit_offset + i1.bit_size > i2.bit_offset + { + error(format!( + "fieldset {}: fields overlap: {} {}", + name, i1.name, i2.name + )); + } + } + } + } + Ok(()) +} + +fn gen_block(args: GenBlock) -> Result<()> { + let data = fs::read(&args.input)?; + let mut ir: IR = serde_yaml::from_slice(&data)?; + + chiptool::transform::Sanitize {}.run(&mut ir).unwrap(); + + // Ensure consistent sort order in the YAML. + chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); + + let generate_opts = generate::Options { + common_module: generate::CommonModule::Builtin, + }; + let items = generate::render(&ir, &generate_opts).unwrap(); + fs::write(&args.output, items.to_string())?; + + Ok(()) +} +#[derive(serde::Serialize, serde::Deserialize)] +struct Config { + transforms: Vec, +} + +impl Default for Config { + fn default() -> Self { + Self { transforms: vec![] } + } +} + +// ============== + +struct Pairs { + head: Option, + tail: U, + next: U, +} + +impl Pairs { + fn new(mut iter: U) -> Self { + let head = iter.next(); + Pairs { + head, + tail: iter.clone(), + next: iter, + } + } +} + +impl Iterator for Pairs +where + U::Item: Clone, +{ + type Item = (U::Item, U::Item); + + fn next(&mut self) -> Option { + let a = self.head.as_ref()?.clone(); + + if let Some(b) = self.tail.next() { + return Some((a, b)); + } + + match self.next.next() { + Some(new_head) => { + self.head = Some(new_head); + self.tail = self.next.clone(); + self.next() + } + None => None, + } + } +} diff --git a/src/svd2ir.rs b/src/svd2ir.rs new file mode 100644 index 000000000000..6cc3ea2402bf --- /dev/null +++ b/src/svd2ir.rs @@ -0,0 +1,385 @@ +use log::*; +use std::collections::HashMap; +use svd_parser as svd; + +use crate::util; +use crate::{ir::*, transform}; + +struct ProtoBlock { + name: Vec, + description: Option, + registers: Vec, +} + +struct ProtoFieldset { + name: Vec, + description: Option, + bit_size: u32, + fields: Vec, +} + +struct ProtoEnum { + name: Vec, + usage: Option, + bit_size: u32, + variants: Vec, +} + +pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<()> { + let mut blocks = Vec::new(); + collect_blocks( + &mut blocks, + vec![p.name.clone()], + p.description.clone(), + p.registers.as_ref().unwrap(), + ); + + let mut fieldsets: Vec = Vec::new(); + let mut enums: Vec = Vec::new(); + + for block in &blocks { + for r in &block.registers { + if let svd::RegisterCluster::Register(r) = r { + if r.derived_from.is_some() { + continue; + } + + if let Some(fields) = &r.fields { + let mut fieldset_name = block.name.clone(); + fieldset_name.push(util::replace_suffix(&r.name, "")); + fieldsets.push(ProtoFieldset { + name: fieldset_name.clone(), + description: r.description.clone(), + bit_size: 32, // todo + fields: fields.clone(), + }); + + for f in fields { + if f.derived_from.is_some() { + continue; + } + + let field_name = f.name.clone(); + + for e in &f.enumerated_values { + if e.derived_from.is_some() { + continue; + } + + let mut enum_name = fieldset_name.clone(); + enum_name.push(e.name.clone().unwrap_or_else(|| field_name.clone())); + info!("adding enum {:?}", enum_name); + + enums.push(ProtoEnum { + name: enum_name, + usage: e.usage, + bit_size: f.bit_range.width, + variants: e.values.clone(), + }); + } + } + }; + } + } + } + + // Make all collected names unique by prefixing with parents' names if needed. + let block_names = unique_names(blocks.iter().map(|x| x.name.clone()).collect()); + let fieldset_names = unique_names(fieldsets.iter().map(|x| x.name.clone()).collect()); + let enum_names = unique_names(enums.iter().map(|x| x.name.clone()).collect()); + + // Convert blocks + for proto in &blocks { + let mut block = Block { + extends: None, + description: proto.description.clone(), + items: Vec::new(), + }; + + for r in &proto.registers { + match r { + svd::RegisterCluster::Register(r) => { + if r.derived_from.is_some() { + warn!("unsupported derived_from in registers"); + continue; + } + + let fieldset_name = if r.fields.is_some() { + let mut fieldset_name = proto.name.clone(); + fieldset_name.push(util::replace_suffix(&r.name, "")); + Some(fieldset_names.get(&fieldset_name).unwrap().clone()) + } else { + None + }; + + let array = if let svd::Register::Array(_, dim) = r { + Some(Array::Regular(RegularArray { + len: dim.dim, + stride: dim.dim_increment, + })) + } else { + None + }; + + let access = match r.access { + None => Access::ReadWrite, + Some(svd::Access::ReadOnly) => Access::Read, + Some(svd::Access::WriteOnly) => Access::Write, + Some(svd::Access::WriteOnce) => Access::Write, + Some(svd::Access::ReadWrite) => Access::ReadWrite, + Some(svd::Access::ReadWriteOnce) => Access::ReadWrite, + }; + + let block_item = BlockItem { + name: util::replace_suffix(&r.name, ""), + description: r.description.clone(), + array, + byte_offset: r.address_offset, + inner: BlockItemInner::Register(Register { + access, // todo + bit_size: r.size.unwrap_or(32), + fieldset: fieldset_name.clone(), + }), + }; + + block.items.push(block_item) + } + svd::RegisterCluster::Cluster(c) => { + if c.derived_from.is_some() { + warn!("unsupported derived_from in clusters"); + continue; + } + + let cname = util::replace_suffix(&c.name, ""); + + let array = if let svd::Cluster::Array(_, dim) = c { + Some(Array::Regular(RegularArray { + len: dim.dim, + stride: dim.dim_increment, + })) + } else { + None + }; + + let mut block_name = proto.name.clone(); + block_name.push(util::replace_suffix(&c.name, "")); + let block_name = block_names.get(&block_name).unwrap().clone(); + + block.items.push(BlockItem { + name: cname.clone(), + description: c.description.clone(), + array, + byte_offset: c.address_offset, + inner: BlockItemInner::Block(BlockItemBlock { block: block_name }), + }); + } + } + } + + let block_name = block_names.get(&proto.name).unwrap().clone(); + assert!(ir.blocks.insert(block_name, block).is_none()) + } + + // Convert fieldsets + for proto in &fieldsets { + let mut fieldset = FieldSet { + extends: None, + description: proto.description.clone(), + bit_size: proto.bit_size, + fields: Vec::new(), + }; + + for f in &proto.fields { + if f.derived_from.is_some() { + warn!("unsupported derived_from in fieldset"); + } + + let mut field = Field { + name: f.name.clone(), + description: f.description.clone(), + bit_offset: f.bit_range.offset, + bit_size: f.bit_range.width, + array: None, + enum_read: None, + enum_write: None, + enum_readwrite: None, + }; + + for e in &f.enumerated_values { + let mut enum_name = proto.name.clone(); + enum_name.push( + e.derived_from + .clone() + .or_else(|| e.name.clone()) + .unwrap_or_else(|| f.name.clone()), + ); + info!("finding enum {:?}", enum_name); + let enumm = enums.iter().find(|e| e.name == enum_name).unwrap(); + let enum_name = enum_names.get(&enum_name).unwrap().clone(); + info!("found {:?}", enum_name); + + let usage = enumm.usage.unwrap_or(svd::Usage::ReadWrite); + + match usage { + svd::Usage::Read => field.enum_read = Some(enum_name.clone()), + svd::Usage::Write => field.enum_write = Some(enum_name.clone()), + svd::Usage::ReadWrite => field.enum_readwrite = Some(enum_name.clone()), + } + } + + fieldset.fields.push(field) + } + + let fieldset_name = fieldset_names.get(&proto.name).unwrap().clone(); + assert!(ir.fieldsets.insert(fieldset_name, fieldset).is_none()) + } + + for proto in &enums { + let variants = proto + .variants + .iter() + .map(|v| EnumVariant { + description: v.description.clone(), + name: v.name.clone(), + value: v.value.unwrap() as _, // TODO what are variants without values used for?? + }) + .collect(); + + let enumm = Enum { + description: None, + bit_size: proto.bit_size, + variants, + }; + + let enum_name = enum_names.get(&proto.name).unwrap().clone(); + assert!(ir.enums.insert(enum_name.clone(), enumm).is_none()); + } + + Ok(()) +} + +pub fn convert_svd(svd: &svd::Device) -> anyhow::Result { + let mut ir = IR::new(); + + let mut device = Device { + peripherals: vec![], + interrupts: vec![], + }; + + for p in &svd.peripherals { + let block_name = p.derived_from.as_ref().unwrap_or(&p.name); + let block_name = format!("{}::{}", block_name, block_name); + let periname = p.name.to_ascii_uppercase(); + + let peri = Peripheral { + name: periname.clone(), + description: p.description.clone(), + base_address: p.base_address, + block: Some(block_name), + array: None, + interrupts: HashMap::new(), + }; + + let mut irqs: Vec<&svd::Interrupt> = vec![]; + for i in &p.interrupt { + if !irqs.iter().any(|&j| j.name == i.name) { + irqs.push(i) + } + } + irqs.sort_by_key(|i| &i.name); + + for (_n, &i) in irqs.iter().enumerate() { + let iname = i.name.to_ascii_uppercase(); + + if !device.interrupts.iter().any(|j| j.name == iname) { + device.interrupts.push(Interrupt { + name: iname.clone(), + description: i.description.clone(), + value: i.value, + }); + } + + /* + let name = if iname.len() > periname.len() && iname.starts_with(&periname) { + let s = iname.strip_prefix(&periname).unwrap(); + s.trim_matches('_').to_string() + } else if irqs.len() == 1 { + "IRQ".to_string() + } else { + format!("IRQ{}", n) + }; + + peri.interrupts.insert(name, iname.clone()); + */ + } + + device.peripherals.push(peri); + + if p.derived_from.is_none() { + let mut pir = IR::new(); + convert_peripheral(&mut pir, p)?; + + let path = &p.name; + transform::map_names(&mut pir, |k, s| match k { + transform::NameKind::Block => *s = format!("{}::{}", path, s), + transform::NameKind::Fieldset => *s = format!("{}::regs::{}", path, s), + transform::NameKind::Enum => *s = format!("{}::vals::{}", path, s), + _ => {} + }); + + ir.merge(pir); + } + } + + ir.devices.insert("".to_string(), device); + + transform::sort::Sort {}.run(&mut ir).unwrap(); + transform::Sanitize {}.run(&mut ir).unwrap(); + + Ok(ir) +} + +fn collect_blocks( + out: &mut Vec, + block_name: Vec, + description: Option, + registers: &[svd::RegisterCluster], +) { + out.push(ProtoBlock { + name: block_name.clone(), + description, + registers: registers.to_owned(), + }); + + for r in registers { + if let svd::RegisterCluster::Cluster(c) = r { + if c.derived_from.is_some() { + continue; + } + + let mut block_name = block_name.clone(); + block_name.push(util::replace_suffix(&c.name, "")); + collect_blocks(out, block_name, c.description.clone(), &c.children); + } + } +} + +fn unique_names(names: Vec>) -> HashMap, String> { + let mut res = HashMap::new(); + + let suffix_exists = |n: &[String], i: usize| { + names + .iter() + .enumerate() + .filter(|(j, _)| *j != i) + .any(|(_, n2)| n2.ends_with(n)) + }; + for (i, n) in names.iter().enumerate() { + let j = (0..n.len()) + .rev() + .find(|&j| !suffix_exists(&n[j..], i)) + .unwrap(); + assert!(res.insert(n.clone(), n[j..].join("_")).is_none()); + } + res +} diff --git a/src/transform/common.rs b/src/transform/common.rs new file mode 100644 index 000000000000..29bc4e0fe9eb --- /dev/null +++ b/src/transform/common.rs @@ -0,0 +1,279 @@ +use anyhow::bail; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; + +use crate::ir::*; + +pub(crate) fn make_regex(r: &str) -> Result { + regex::Regex::new(&format!("^{}$", r)) +} + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Serialize, Deserialize)] +pub enum CheckLevel { + NoCheck, + Layout, + Names, + Descriptions, +} + +pub(crate) fn check_mergeable_enums(a: &Enum, b: &Enum, level: CheckLevel) -> anyhow::Result<()> { + if let Err(e) = check_mergeable_enums_inner(a, b, level) { + bail!( + "Cannot merge enums.\nfirst: {:#?}\nsecond: {:#?}\ncause: {:?}", + a, + b, + e + ) + } + Ok(()) +} +pub(crate) fn check_mergeable_enums_inner( + a: &Enum, + b: &Enum, + level: CheckLevel, +) -> anyhow::Result<()> { + if a.bit_size != b.bit_size { + bail!("Different bit size: {} vs {}", a.bit_size, b.bit_size) + } + + if level >= CheckLevel::Layout { + if a.variants.len() != b.variants.len() { + bail!("Different variant count") + } + + let mut aok = [false; 128]; + let mut bok = [false; 128]; + + for (ia, fa) in a.variants.iter().enumerate() { + if let Some((ib, _fb)) = b + .variants + .iter() + .enumerate() + .find(|(ib, fb)| !bok[*ib] && mergeable_variants(fa, fb, level)) + { + aok[ia] = true; + bok[ib] = true; + } else { + bail!("Variant in first enum has no match: {:?}", fa); + } + } + } + + Ok(()) +} + +pub(crate) fn mergeable_variants(a: &EnumVariant, b: &EnumVariant, level: CheckLevel) -> bool { + let mut res = true; + if level >= CheckLevel::Layout { + res &= a.value == b.value; + } + if level >= CheckLevel::Names { + res &= a.name == b.name; + } + if level >= CheckLevel::Descriptions { + res &= a.description == b.description; + } + res +} + +impl Default for CheckLevel { + fn default() -> Self { + Self::Names + } +} + +pub(crate) fn check_mergeable_fieldsets( + a: &FieldSet, + b: &FieldSet, + level: CheckLevel, +) -> anyhow::Result<()> { + if let Err(e) = check_mergeable_fieldsets_inner(a, b, level) { + bail!( + "Cannot merge fieldsets.\nfirst: {:#?}\nsecond: {:#?}\ncause: {:?}", + a, + b, + e + ) + } + Ok(()) +} + +pub(crate) fn mergeable_fields(a: &Field, b: &Field, level: CheckLevel) -> bool { + let mut res = true; + if level >= CheckLevel::Layout { + res &= a.bit_size == b.bit_size + && a.bit_offset == b.bit_offset + && a.enum_read == b.enum_read + && a.enum_write == b.enum_write + && a.enum_readwrite == b.enum_readwrite + && a.array == b.array; + } + if level >= CheckLevel::Names { + res &= a.name == b.name; + } + if level >= CheckLevel::Descriptions { + res &= a.description == b.description; + } + res +} + +pub(crate) fn check_mergeable_fieldsets_inner( + a: &FieldSet, + b: &FieldSet, + level: CheckLevel, +) -> anyhow::Result<()> { + if a.bit_size != b.bit_size { + bail!("Different bit size: {} vs {}", a.bit_size, b.bit_size) + } + + if level >= CheckLevel::Layout { + if a.fields.len() != b.fields.len() { + bail!("Different field count") + } + + let mut aok = [false; 128]; + let mut bok = [false; 128]; + + for (ia, fa) in a.fields.iter().enumerate() { + if let Some((ib, _fb)) = b + .fields + .iter() + .enumerate() + .find(|(ib, fb)| !bok[*ib] && mergeable_fields(fa, fb, level)) + { + aok[ia] = true; + bok[ib] = true; + } else { + bail!("Field in first fieldset has no match: {:?}", fa); + } + } + } + + Ok(()) +} + +pub(crate) fn match_all(set: impl Iterator, re: ®ex::Regex) -> HashSet { + let mut ids: HashSet = HashSet::new(); + for id in set { + if re.is_match(&id) { + ids.insert(id); + } + } + ids +} + +pub(crate) fn match_groups( + set: impl Iterator, + re: ®ex::Regex, + to: &str, +) -> HashMap> { + let mut groups: HashMap> = HashMap::new(); + for s in set { + if let Some(to) = match_expand(&s, re, to) { + if let Some(v) = groups.get_mut(&to) { + v.insert(s); + } else { + let mut v = HashSet::new(); + v.insert(s); + groups.insert(to, v); + } + } + } + groups +} + +pub(crate) fn match_expand(s: &str, regex: ®ex::Regex, res: &str) -> Option { + let m = regex.captures(s)?; + let mut dst = String::new(); + m.expand(res, &mut dst); + Some(dst) +} + +pub(crate) fn replace_enum_ids(ir: &mut IR, from: &HashSet, to: String) { + for (_, fs) in ir.fieldsets.iter_mut() { + for f in fs.fields.iter_mut() { + for id in [&mut f.enum_read, &mut f.enum_write, &mut f.enum_readwrite] + .into_iter() + .flatten() + { + if from.contains(id) { + *id = to.clone() + } + } + } + } +} + +pub(crate) fn replace_fieldset_ids(ir: &mut IR, from: &HashSet, to: String) { + for (_, b) in ir.blocks.iter_mut() { + for i in b.items.iter_mut() { + if let BlockItemInner::Register(r) = &mut i.inner { + if let Some(id) = &r.fieldset { + if from.contains(id) { + r.fieldset = Some(to.clone()) + } + } + } + } + } +} + +pub(crate) fn replace_block_ids(ir: &mut IR, from: &HashSet, to: String) { + for (_, d) in ir.devices.iter_mut() { + for p in d.peripherals.iter_mut() { + if let Some(block) = &mut p.block { + if from.contains(block) { + *block = to.clone() + } + } + } + } + + for (_, b) in ir.blocks.iter_mut() { + for i in b.items.iter_mut() { + if let BlockItemInner::Block(bi) = &mut i.inner { + if from.contains(&bi.block) { + bi.block = to.clone() + } + } + } + } +} + +pub(crate) fn calc_array(mut offsets: Vec) -> (u32, Array) { + offsets.sort_unstable(); + + // Guess stride. + let start_offset = offsets[0]; + let len = offsets.len() as u32; + let stride = if len == 1 { + // If there's only 1 item, we can't know the stride, but it + // doesn't really matter! + 0 + } else { + offsets[1] - offsets[0] + }; + + // Check the stride guess is OK + + if offsets + .iter() + .enumerate() + .all(|(n, &i)| i == start_offset + (n as u32) * stride) + { + // Array is regular, + ( + start_offset, + Array::Regular(RegularArray { + len: offsets.len() as _, + stride, + }), + ) + } else { + // Array is irregular, + for o in &mut offsets { + *o -= start_offset + } + (start_offset, Array::Cursed(CursedArray { offsets })) + } +} diff --git a/src/transform/delete.rs b/src/transform/delete.rs new file mode 100644 index 000000000000..f4fde5d24a1f --- /dev/null +++ b/src/transform/delete.rs @@ -0,0 +1,80 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Delete { + pub from: String, +} + +impl Delete { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = make_regex(&self.from)?; + + let mut ids: HashSet = HashSet::new(); + for (id, _fs) in ir.fieldsets.iter() { + if re.is_match(id) { + info!("deleting fieldset {}", id); + ids.insert(id.clone()); + } + } + + super::delete_fieldsets::remove_fieldset_ids(ir, &ids); + + for id in ids { + ir.fieldsets.remove(&id); + } + + let mut ids: HashSet = HashSet::new(); + for (id, _e) in ir.enums.iter() { + if re.is_match(id) { + info!("deleting enum {}", id); + ids.insert(id.clone()); + } + } + + super::delete_enums::remove_enum_ids(ir, &ids); + + for id in ids { + ir.enums.remove(&id); + } + + let mut ids: HashSet = HashSet::new(); + for (id, _b) in ir.blocks.iter() { + if re.is_match(id) { + info!("deleting block {}", id); + ids.insert(id.clone()); + } + } + + remove_block_ids(ir, &ids); + + for id in ids { + ir.blocks.remove(&id); + } + + Ok(()) + } +} + +pub(crate) fn remove_block_ids(ir: &mut IR, from: &HashSet) { + for (_, b) in ir.blocks.iter_mut() { + b.items.retain(|i| { + if let BlockItemInner::Block(bi) = &i.inner { + !from.contains(&bi.block) + } else { + true + } + }); + } + + for (_, d) in ir.devices.iter_mut() { + d.peripherals.retain(|p| match &p.block { + Some(block) => !from.contains(block), + None => true, + }); + } +} diff --git a/src/transform/delete_enums.rs b/src/transform/delete_enums.rs new file mode 100644 index 000000000000..3db1906c0f6a --- /dev/null +++ b/src/transform/delete_enums.rs @@ -0,0 +1,53 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct DeleteEnums { + pub from: String, + pub bit_size: Option, + #[serde(default)] + pub soft: bool, +} + +impl DeleteEnums { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = make_regex(&self.from)?; + + let mut ids: HashSet = HashSet::new(); + for (id, e) in ir.enums.iter() { + let bit_size_matches = self.bit_size.map_or(true, |s| s == e.bit_size); + if re.is_match(id) && bit_size_matches { + info!("deleting enum {}", id); + ids.insert(id.clone()); + } + } + + remove_enum_ids(ir, &ids); + + if !self.soft { + for id in ids { + ir.enums.remove(&id); + } + } + + Ok(()) + } +} + +pub(crate) fn remove_enum_ids(ir: &mut IR, from: &HashSet) { + for (_, fs) in ir.fieldsets.iter_mut() { + for f in fs.fields.iter_mut() { + for e in [&mut f.enum_read, &mut f.enum_write, &mut f.enum_readwrite].into_iter() { + if let Some(id) = e { + if from.contains(id) { + *e = None + } + } + } + } + } +} diff --git a/src/transform/delete_fieldsets.rs b/src/transform/delete_fieldsets.rs new file mode 100644 index 000000000000..477e39460650 --- /dev/null +++ b/src/transform/delete_fieldsets.rs @@ -0,0 +1,67 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct DeleteFieldsets { + pub from: String, + #[serde(default)] + pub useless: bool, + #[serde(default)] + pub soft: bool, +} + +impl DeleteFieldsets { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = make_regex(&self.from)?; + + let mut ids: HashSet = HashSet::new(); + for (id, fs) in ir.fieldsets.iter() { + if re.is_match(id) && (!self.useless | is_useless(fs)) { + info!("deleting fieldset {}", id); + ids.insert(id.clone()); + } + } + + remove_fieldset_ids(ir, &ids); + + if !self.soft { + for id in ids { + ir.fieldsets.remove(&id); + } + } + + Ok(()) + } +} + +fn is_useless(fs: &FieldSet) -> bool { + match &fs.fields[..] { + [] => true, + [f] => { + fs.bit_size == f.bit_size + && f.bit_offset == 0 + && f.enum_read.is_none() + && f.enum_write.is_none() + && f.enum_readwrite.is_none() + } + _ => false, + } +} + +pub(crate) fn remove_fieldset_ids(ir: &mut IR, from: &HashSet) { + for (_, b) in ir.blocks.iter_mut() { + for i in b.items.iter_mut() { + if let BlockItemInner::Register(reg) = &mut i.inner { + if let Some(id) = ®.fieldset { + if from.contains(id) { + reg.fieldset = None + } + } + } + } + } +} diff --git a/src/transform/expand_extends.rs b/src/transform/expand_extends.rs new file mode 100644 index 000000000000..137a3afa54af --- /dev/null +++ b/src/transform/expand_extends.rs @@ -0,0 +1,82 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; + +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct ExpandExtends {} + +impl ExpandExtends { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + // Expand blocks + let deps = ir + .blocks + .iter() + .map(|(k, v)| (k.clone(), v.extends.clone())) + .collect(); + for name in topological_sort(deps) { + let block = ir.blocks.get(&name).unwrap(); + if let Some(parent_name) = &block.extends { + let parent = ir.blocks.get(parent_name).unwrap(); + + let items = parent.items.clone(); + let block = ir.blocks.get_mut(&name).unwrap(); + + for i in items { + if !block.items.iter().any(|j| j.name == i.name) { + block.items.push(i); + } + } + } + } + // Expand fiedsets + let deps = ir + .fieldsets + .iter() + .map(|(k, v)| (k.clone(), v.extends.clone())) + .collect(); + for name in topological_sort(deps) { + let fieldset = ir.fieldsets.get(&name).unwrap(); + if let Some(parent_name) = &fieldset.extends { + let parent = ir.fieldsets.get(parent_name).unwrap(); + + let items = parent.fields.clone(); + let fieldset = ir.fieldsets.get_mut(&name).unwrap(); + + for i in items { + if !fieldset.fields.iter().any(|j| j.name == i.name) { + fieldset.fields.push(i); + } + } + } + } + + Ok(()) + } +} + +fn topological_sort(vals: HashMap>) -> Vec { + for (name, dep) in &vals { + info!("{:?} => {:?}", name, dep); + } + + let mut done = HashSet::new(); + let mut res = Vec::new(); + while done.len() != vals.len() { + for (name, dep) in &vals { + if done.contains(name) { + continue; + } + if let Some(dep) = dep { + if !done.contains(dep) { + continue; + } + } + info!("doing {:?} ", name); + done.insert(name.clone()); + res.push(name.clone()); + } + } + res +} diff --git a/src/transform/find_duplicate_enums.rs b/src/transform/find_duplicate_enums.rs new file mode 100644 index 000000000000..a16979d037c5 --- /dev/null +++ b/src/transform/find_duplicate_enums.rs @@ -0,0 +1,38 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct FindDuplicateEnums {} +impl FindDuplicateEnums { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let mut suggested = HashSet::new(); + + for (id1, e1) in ir.enums.iter() { + if suggested.contains(&id1) { + continue; + } + + let mut ids = Vec::new(); + for (id2, e2) in ir.enums.iter() { + if id1 != id2 && mergeable_enums(e1, e2) { + ids.push(id2) + } + } + + if !ids.is_empty() { + ids.push(id1); + info!("Duplicated enums:"); + for id in ids { + suggested.insert(id); + info!(" {}", ir.enums.get(id).path); + } + } + } + + Ok(()) + } +} diff --git a/src/transform/find_duplicate_fieldsets.rs b/src/transform/find_duplicate_fieldsets.rs new file mode 100644 index 000000000000..4d0c2118dacf --- /dev/null +++ b/src/transform/find_duplicate_fieldsets.rs @@ -0,0 +1,38 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct FindDuplicateFieldsets {} +impl FindDuplicateFieldsets { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let mut suggested = HashSet::new(); + + for (id1, fs1) in ir.fieldsets.iter() { + if suggested.contains(&id1) { + continue; + } + + let mut ids = Vec::new(); + for (id2, fs2) in ir.fieldsets.iter() { + if id1 != id2 && check_mergeable_fieldsets(fs1, fs2, CheckLevel::Names).is_ok() { + ids.push(id2) + } + } + + if !ids.is_empty() { + ids.push(id1); + info!("Duplicated fieldsets:"); + for id in ids { + suggested.insert(id); + info!(" {}", ir.fieldsets.get(id).path); + } + } + } + + Ok(()) + } +} diff --git a/src/transform/make_block.rs b/src/transform/make_block.rs new file mode 100644 index 000000000000..f97cb35cc9e1 --- /dev/null +++ b/src/transform/make_block.rs @@ -0,0 +1,78 @@ +use log::*; +use serde::{Deserialize, Serialize}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct MakeBlock { + pub blocks: String, + pub from: String, + pub to_outer: String, + pub to_block: String, + pub to_inner: String, +} + +impl MakeBlock { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let path_re = make_regex(&self.blocks)?; + let re = make_regex(&self.from)?; + for id in match_all(ir.blocks.keys().cloned(), &path_re) { + let b = ir.blocks.get_mut(&id).unwrap(); + let groups = match_groups(b.items.iter().map(|f| f.name.clone()), &re, &self.to_outer); + for (to, group) in groups { + let b = ir.blocks.get_mut(&id).unwrap(); + info!("blockifizing to {}", to); + + // Grab all items into a vec + let mut items = Vec::new(); + for i in b.items.iter().filter(|i| group.contains(&i.name)) { + items.push(i); + } + + // Sort by offs + items.sort_by_key(|i| i.byte_offset); + for i in &items { + info!(" {}", i.name); + } + + // todo check they're mergeable + // todo check they're not arrays (arrays of arrays not supported) + + let byte_offset = items[0].byte_offset; + + let b2 = Block { + extends: None, + description: None, + items: items + .iter() + .map(|&i| { + let mut i = i.clone(); + i.name = match_expand(&i.name, &re, &self.to_inner).unwrap(); + i.byte_offset -= byte_offset; + i + }) + .collect(), + }; + + // TODO if destination block exists, check mergeable + let dest = self.to_block.clone(); // todo regex + ir.blocks.insert(dest.clone(), b2); + + // Remove all items + let b = ir.blocks.get_mut(&id).unwrap(); + b.items.retain(|i| !group.contains(&i.name)); + + // Create the new block item + b.items.push(BlockItem { + name: to, + description: None, + array: None, + byte_offset, + inner: BlockItemInner::Block(BlockItemBlock { block: dest }), + }); + } + } + Ok(()) + } +} diff --git a/src/transform/make_field_array.rs b/src/transform/make_field_array.rs new file mode 100644 index 000000000000..ec9d7df29aeb --- /dev/null +++ b/src/transform/make_field_array.rs @@ -0,0 +1,62 @@ +use log::*; +use serde::{Deserialize, Serialize}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct MakeFieldArray { + pub fieldsets: String, + pub from: String, + pub to: String, + #[serde(default)] + pub allow_cursed: bool, +} + +impl MakeFieldArray { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let path_re = make_regex(&self.fieldsets)?; + let re = make_regex(&self.from)?; + for id in match_all(ir.fieldsets.keys().cloned(), &path_re) { + let b = ir.fieldsets.get_mut(&id).unwrap(); + let groups = match_groups(b.fields.iter().map(|f| f.name.clone()), &re, &self.to); + for (to, group) in groups { + info!("arrayizing to {}", to); + + // Grab all items into a vec + let mut items = Vec::new(); + for i in b.fields.iter().filter(|i| group.contains(&i.name)) { + items.push(i); + } + + // todo check they're mergeable + // todo check they're not arrays (arrays of arrays not supported) + + // Sort by offs + items.sort_by_key(|i| i.bit_offset); + for i in &items { + info!(" {}", i.name); + } + + let (offset, array) = calc_array(items.iter().map(|x| x.bit_offset).collect()); + if let Array::Cursed(_) = &array { + if !self.allow_cursed { + panic!("arrayize: items are not evenly spaced. Set `allow_cursed: true` to allow this.") + } + } + + let mut item = items[0].clone(); + + // Remove all + b.fields.retain(|i| !group.contains(&i.name)); + + // Create the new array item + item.name = to; + item.array = Some(array); + item.bit_offset = offset; + b.fields.push(item); + } + } + Ok(()) + } +} diff --git a/src/transform/make_register_array.rs b/src/transform/make_register_array.rs new file mode 100644 index 000000000000..b6f1feaa6ef6 --- /dev/null +++ b/src/transform/make_register_array.rs @@ -0,0 +1,62 @@ +use log::*; +use serde::{Deserialize, Serialize}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct MakeRegisterArray { + pub blocks: String, + pub from: String, + pub to: String, + #[serde(default)] + pub allow_cursed: bool, +} + +impl MakeRegisterArray { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let path_re = make_regex(&self.blocks)?; + let re = make_regex(&self.from)?; + for id in match_all(ir.blocks.keys().cloned(), &path_re) { + let b = ir.blocks.get_mut(&id).unwrap(); + let groups = match_groups(b.items.iter().map(|f| f.name.clone()), &re, &self.to); + for (to, group) in groups { + info!("arrayizing to {}", to); + + // Grab all items into a vec + let mut items = Vec::new(); + for i in b.items.iter().filter(|i| group.contains(&i.name)) { + items.push(i); + } + + // todo check they're mergeable + // todo check they're not arrays (arrays of arrays not supported) + + // Sort by offs + items.sort_by_key(|i| i.byte_offset); + for i in &items { + info!(" {}", i.name); + } + + let (offset, array) = calc_array(items.iter().map(|x| x.byte_offset).collect()); + if let Array::Cursed(_) = &array { + if !self.allow_cursed { + panic!("arrayize: items are not evenly spaced. Set `allow_cursed: true` to allow this.") + } + } + + let mut item = items[0].clone(); + + // Remove all + b.items.retain(|i| !group.contains(&i.name)); + + // Create the new array item + item.name = to; + item.array = Some(array); + item.byte_offset = offset; + b.items.push(item); + } + } + Ok(()) + } +} diff --git a/src/transform/merge_blocks.rs b/src/transform/merge_blocks.rs new file mode 100644 index 000000000000..f59bbd01bec0 --- /dev/null +++ b/src/transform/merge_blocks.rs @@ -0,0 +1,66 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct MergeBlocks { + pub from: String, + pub to: String, + pub main: Option, + #[serde(default)] + pub check: CheckLevel, +} + +impl MergeBlocks { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = make_regex(&self.from)?; + let groups = match_groups(ir.blocks.keys().cloned(), &re, &self.to); + + for (to, group) in groups { + info!("Merging blocks, dest: {}", to); + for id in &group { + info!(" {}", id); + } + self.merge_blocks(ir, group, to, self.main.as_ref())?; + } + + Ok(()) + } + + fn merge_blocks( + &self, + ir: &mut IR, + ids: HashSet, + to: String, + main: Option<&String>, + ) -> anyhow::Result<()> { + let mut main_id = ids.iter().next().unwrap().clone(); + if let Some(main) = main { + let re = make_regex(main)?; + for id in ids.iter() { + if re.is_match(id) { + main_id = id.clone(); + break; + } + } + } + let b = ir.blocks.get(&main_id).unwrap().clone(); + + // todo + //for id in &ids { + // let b2 = ir.blocks.get(id).unwrap(); + // check_mergeable_blocks(&b, b2, self.check)?; + //} + + replace_block_ids(ir, &ids, to.clone()); + for id in &ids { + ir.blocks.remove(id); + } + ir.blocks.insert(to, b); + + Ok(()) + } +} diff --git a/src/transform/merge_enums.rs b/src/transform/merge_enums.rs new file mode 100644 index 000000000000..bb0bcaecf912 --- /dev/null +++ b/src/transform/merge_enums.rs @@ -0,0 +1,57 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct MergeEnums { + pub from: String, + pub to: String, + #[serde(default)] + pub check: CheckLevel, + #[serde(default)] + pub skip_unmergeable: bool, +} + +impl MergeEnums { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = make_regex(&self.from)?; + let groups = match_groups(ir.enums.keys().cloned(), &re, &self.to); + + for (to, group) in groups { + info!("Merging enums, dest: {}", to); + for id in &group { + info!(" {}", id); + } + self.merge_enums(ir, group, to)?; + } + + Ok(()) + } + + fn merge_enums(&self, ir: &mut IR, ids: HashSet, to: String) -> anyhow::Result<()> { + let e = ir.enums.get(ids.iter().next().unwrap()).unwrap().clone(); + + for id in &ids { + let e2 = ir.enums.get(id).unwrap(); + if let Err(e) = check_mergeable_enums(&e, e2, self.check) { + if self.skip_unmergeable { + info!("skipping: {:?}", to); + return Ok(()); + } else { + return Err(e); + } + } + } + for id in &ids { + ir.enums.remove(id); + } + + assert!(ir.enums.insert(to.clone(), e).is_none()); + replace_enum_ids(ir, &ids, to); + + Ok(()) + } +} diff --git a/src/transform/merge_fieldsets.rs b/src/transform/merge_fieldsets.rs new file mode 100644 index 000000000000..6e1cbfa10669 --- /dev/null +++ b/src/transform/merge_fieldsets.rs @@ -0,0 +1,66 @@ +use log::*; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct MergeFieldsets { + pub from: String, + pub to: String, + pub main: Option, + #[serde(default)] + pub check: CheckLevel, +} + +impl MergeFieldsets { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = make_regex(&self.from)?; + let groups = match_groups(ir.fieldsets.keys().cloned(), &re, &self.to); + + for (to, group) in groups { + info!("Merging fieldsets, dest: {}", to); + for id in &group { + info!(" {}", id); + } + self.merge_fieldsets(ir, group, to, self.main.as_ref())?; + } + + Ok(()) + } + + fn merge_fieldsets( + &self, + ir: &mut IR, + ids: HashSet, + to: String, + main: Option<&String>, + ) -> anyhow::Result<()> { + let mut main_id = ids.iter().next().unwrap().clone(); + if let Some(main) = main { + let re = make_regex(main)?; + for id in ids.iter() { + if re.is_match(id) { + main_id = id.clone(); + break; + } + } + } + let fs = ir.fieldsets.get(&main_id).unwrap().clone(); + + for id in &ids { + let fs2 = ir.fieldsets.get(id).unwrap(); + check_mergeable_fieldsets(&fs, fs2, self.check)?; + } + + for id in &ids { + ir.fieldsets.remove(id); + } + + assert!(ir.fieldsets.insert(to.clone(), fs).is_none()); + replace_fieldset_ids(ir, &ids, to); + + Ok(()) + } +} diff --git a/src/transform/mod.rs b/src/transform/mod.rs new file mode 100644 index 000000000000..4e8715b80b07 --- /dev/null +++ b/src/transform/mod.rs @@ -0,0 +1,262 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +use crate::ir::*; +use crate::util::{ToSanitizedPascalCase, ToSanitizedSnakeCase, ToSanitizedUpperCase}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Sanitize {} + +impl Sanitize { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + map_names(ir, |k, p| match k { + NameKind::Device => *p = sanitize_path(p), + NameKind::DevicePeripheral => *p = p.to_sanitized_upper_case().to_string(), + NameKind::DeviceInterrupt => *p = p.to_sanitized_upper_case().to_string(), + NameKind::Block => *p = sanitize_path(p), + NameKind::Fieldset => *p = sanitize_path(p), + NameKind::Enum => *p = sanitize_path(p), + NameKind::BlockItem => *p = p.to_sanitized_snake_case().to_string(), + NameKind::Field => *p = p.to_sanitized_snake_case().to_string(), + NameKind::EnumVariant => *p = p.to_sanitized_upper_case().to_string(), + }); + Ok(()) + } +} + +pub enum NameKind { + Device, + DevicePeripheral, + DeviceInterrupt, + Block, + BlockItem, + Fieldset, + Field, + Enum, + EnumVariant, +} + +fn rename_opt(s: &mut Option, f: impl Fn(&mut String)) { + if let Some(s) = s { + f(s) + } +} + +pub fn map_block_names(ir: &mut IR, f: impl Fn(&mut String)) { + remap_names(&mut ir.blocks, &f); + + for (_, d) in ir.devices.iter_mut() { + for p in &mut d.peripherals { + rename_opt(&mut p.block, &f); + } + } + + for (_, b) in ir.blocks.iter_mut() { + for i in b.items.iter_mut() { + match &mut i.inner { + BlockItemInner::Block(p) => f(&mut p.block), + BlockItemInner::Register(_r) => {} + } + } + } +} + +pub fn map_fieldset_names(ir: &mut IR, f: impl Fn(&mut String)) { + remap_names(&mut ir.fieldsets, &f); + + for (_, b) in ir.blocks.iter_mut() { + for i in b.items.iter_mut() { + match &mut i.inner { + BlockItemInner::Block(_p) => {} + BlockItemInner::Register(r) => rename_opt(&mut r.fieldset, &f), + } + } + } +} + +pub fn map_enum_names(ir: &mut IR, f: impl Fn(&mut String)) { + remap_names(&mut ir.enums, &f); + + for (_, fs) in ir.fieldsets.iter_mut() { + for ff in fs.fields.iter_mut() { + rename_opt(&mut ff.enum_read, &f); + rename_opt(&mut ff.enum_write, &f); + rename_opt(&mut ff.enum_readwrite, &f); + } + } +} + +pub fn map_device_names(ir: &mut IR, f: impl Fn(&mut String)) { + remap_names(&mut ir.devices, &f); +} + +pub fn map_device_interrupt_names(ir: &mut IR, f: impl Fn(&mut String)) { + for (_, d) in ir.devices.iter_mut() { + for i in &mut d.interrupts { + f(&mut i.name); + } + } +} + +pub fn map_device_peripheral_names(ir: &mut IR, f: impl Fn(&mut String)) { + for (_, d) in ir.devices.iter_mut() { + for p in &mut d.peripherals { + f(&mut p.name); + } + } +} + +pub fn map_block_item_names(ir: &mut IR, f: impl Fn(&mut String)) { + for (_, b) in ir.blocks.iter_mut() { + for i in b.items.iter_mut() { + f(&mut i.name) + } + } +} + +pub fn map_field_names(ir: &mut IR, f: impl Fn(&mut String)) { + for (_, fs) in ir.fieldsets.iter_mut() { + for ff in fs.fields.iter_mut() { + f(&mut ff.name) + } + } +} + +pub fn map_enum_variant_names(ir: &mut IR, f: impl Fn(&mut String)) { + for (_, e) in ir.enums.iter_mut() { + for v in e.variants.iter_mut() { + f(&mut v.name) + } + } +} + +pub fn map_names(ir: &mut IR, f: impl Fn(NameKind, &mut String)) { + map_device_names(ir, |s| f(NameKind::Device, s)); + map_device_peripheral_names(ir, |s| f(NameKind::DevicePeripheral, s)); + map_device_interrupt_names(ir, |s| f(NameKind::DeviceInterrupt, s)); + map_block_names(ir, |s| f(NameKind::Block, s)); + map_block_item_names(ir, |s| f(NameKind::BlockItem, s)); + map_fieldset_names(ir, |s| f(NameKind::Fieldset, s)); + map_field_names(ir, |s| f(NameKind::Field, s)); + map_enum_names(ir, |s| f(NameKind::Enum, s)); + map_enum_variant_names(ir, |s| f(NameKind::EnumVariant, s)); +} + +pub fn map_descriptions(ir: &mut IR, mut ff: impl FnMut(&str) -> String) -> anyhow::Result<()> { + let mut mapit = |d: &mut Option| { + *d = d.as_ref().map(|p| ff(p)); + }; + + for (_, b) in ir.blocks.iter_mut() { + mapit(&mut b.description); + for i in b.items.iter_mut() { + mapit(&mut i.description); + } + } + + for (_, fs) in ir.fieldsets.iter_mut() { + mapit(&mut fs.description); + for f in fs.fields.iter_mut() { + mapit(&mut f.description); + } + } + + for (_, e) in ir.enums.iter_mut() { + mapit(&mut e.description); + for v in e.variants.iter_mut() { + mapit(&mut v.description); + } + } + + Ok(()) +} + +fn remap_names(x: &mut HashMap, f: impl Fn(&mut String)) { + let mut res = HashMap::new(); + for (mut name, val) in x.drain() { + f(&mut name); + assert!(res.insert(name, val).is_none()) + } + *x = res +} + +fn sanitize_path(p: &str) -> String { + let v = p.split("::").collect::>(); + let len = v.len(); + v.into_iter() + .enumerate() + .map(|(i, s)| { + if i == len - 1 { + s.to_sanitized_pascal_case() + } else { + s.to_sanitized_snake_case() + } + }) + .collect::>() + .join("::") +} + +mod common; + +pub mod delete; +pub mod delete_enums; +pub mod delete_fieldsets; +//pub mod find_duplicate_enums; +//pub mod find_duplicate_fieldsets; +pub mod expand_extends; +pub mod make_block; +pub mod make_field_array; +pub mod make_register_array; +pub mod merge_blocks; +pub mod merge_enums; +pub mod merge_fieldsets; +pub mod rename; +pub mod rename_fields; +pub mod rename_registers; +pub mod sort; +pub mod modify_byte_offset; + +#[derive(Debug, Serialize, Deserialize)] +pub enum Transform { + Sanitize(Sanitize), + Sort(sort::Sort), + Delete(delete::Delete), + DeleteEnums(delete_enums::DeleteEnums), + DeleteFieldsets(delete_fieldsets::DeleteFieldsets), + MergeBlocks(merge_blocks::MergeBlocks), + MergeEnums(merge_enums::MergeEnums), + MergeFieldsets(merge_fieldsets::MergeFieldsets), + Rename(rename::Rename), + RenameFields(rename_fields::RenameFields), + RenameRegisters(rename_registers::RenameRegisters), + MakeRegisterArray(make_register_array::MakeRegisterArray), + MakeFieldArray(make_field_array::MakeFieldArray), + MakeBlock(make_block::MakeBlock), + ModifyByteOffset(modify_byte_offset::ModifyByteOffset), + //FindDuplicateEnums(find_duplicate_enums::FindDuplicateEnums), + //FindDuplicateFieldsets(find_duplicate_fieldsets::FindDuplicateFieldsets), +} + +impl Transform { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + match self { + Self::Sanitize(t) => t.run(ir), + Self::Sort(t) => t.run(ir), + Self::Delete(t) => t.run(ir), + Self::DeleteEnums(t) => t.run(ir), + Self::DeleteFieldsets(t) => t.run(ir), + Self::MergeBlocks(t) => t.run(ir), + Self::MergeEnums(t) => t.run(ir), + Self::MergeFieldsets(t) => t.run(ir), + Self::Rename(t) => t.run(ir), + Self::RenameFields(t) => t.run(ir), + Self::RenameRegisters(t) => t.run(ir), + Self::MakeRegisterArray(t) => t.run(ir), + Self::MakeFieldArray(t) => t.run(ir), + Self::MakeBlock(t) => t.run(ir), + Self::ModifyByteOffset(t) => t.run(ir), + //Self::FindDuplicateEnums(t) => t.run(ir), + //Self::FindDuplicateFieldsets(t) => t.run(ir), + } + } +} diff --git a/src/transform/modify_byte_offset.rs b/src/transform/modify_byte_offset.rs new file mode 100644 index 000000000000..a9ed456f340c --- /dev/null +++ b/src/transform/modify_byte_offset.rs @@ -0,0 +1,23 @@ +use serde::{Deserialize, Serialize}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct ModifyByteOffset { + pub block: String, + pub add_offset: u32, +} + +impl ModifyByteOffset { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let path_re = make_regex(&self.block)?; + for id in match_all(ir.blocks.keys().cloned(), &path_re) { + let b = ir.blocks.get_mut(&id).unwrap(); + for i in &mut b.items { + i.byte_offset += self.add_offset; + } + } + Ok(()) + } +} diff --git a/src/transform/rename.rs b/src/transform/rename.rs new file mode 100644 index 000000000000..05a07a17ff45 --- /dev/null +++ b/src/transform/rename.rs @@ -0,0 +1,29 @@ +use serde::{Deserialize, Serialize}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Rename { + pub from: String, + pub to: String, +} + +impl Rename { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = make_regex(&self.from)?; + + let renamer = |name: &mut String| { + if let Some(res) = match_expand(name, &re, &self.to) { + *name = res + } + }; + + super::map_device_names(ir, &renamer); + super::map_block_names(ir, &renamer); + super::map_fieldset_names(ir, &renamer); + super::map_enum_names(ir, &renamer); + + Ok(()) + } +} diff --git a/src/transform/rename_fields.rs b/src/transform/rename_fields.rs new file mode 100644 index 000000000000..df76c155b200 --- /dev/null +++ b/src/transform/rename_fields.rs @@ -0,0 +1,27 @@ +use serde::{Deserialize, Serialize}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct RenameFields { + pub fieldset: String, + pub from: String, + pub to: String, +} + +impl RenameFields { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let path_re = make_regex(&self.fieldset)?; + let re = make_regex(&self.from)?; + for id in match_all(ir.fieldsets.keys().cloned(), &path_re) { + let fs = ir.fieldsets.get_mut(&id).unwrap(); + for f in &mut fs.fields { + if let Some(name) = match_expand(&f.name, &re, &self.to) { + f.name = name; + } + } + } + Ok(()) + } +} diff --git a/src/transform/rename_registers.rs b/src/transform/rename_registers.rs new file mode 100644 index 000000000000..5fa0f1e3c10b --- /dev/null +++ b/src/transform/rename_registers.rs @@ -0,0 +1,27 @@ +use serde::{Deserialize, Serialize}; + +use super::common::*; +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct RenameRegisters { + pub block: String, + pub from: String, + pub to: String, +} + +impl RenameRegisters { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let path_re = make_regex(&self.block)?; + let re = make_regex(&self.from)?; + for id in match_all(ir.blocks.keys().cloned(), &path_re) { + let b = ir.blocks.get_mut(&id).unwrap(); + for i in &mut b.items { + if let Some(name) = match_expand(&i.name, &re, &self.to) { + i.name = name; + } + } + } + Ok(()) + } +} diff --git a/src/transform/sort.rs b/src/transform/sort.rs new file mode 100644 index 000000000000..03173ec11c38 --- /dev/null +++ b/src/transform/sort.rs @@ -0,0 +1,22 @@ +use serde::{Deserialize, Serialize}; + +use crate::ir::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Sort {} + +impl Sort { + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + for z in ir.blocks.values_mut() { + z.items.sort_by_key(|i| (i.byte_offset, i.name.clone())) + } + for z in ir.fieldsets.values_mut() { + z.fields.sort_by_key(|i| (i.bit_offset, i.name.clone())) + } + for z in ir.enums.values_mut() { + z.variants.sort_by_key(|i| (i.value, i.name.clone())) + } + + Ok(()) + } +} diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 000000000000..5e1995583a68 --- /dev/null +++ b/src/util.rs @@ -0,0 +1,316 @@ +use anyhow::{anyhow, Result}; +use inflections::Inflect; +use proc_macro2::{Ident, Literal, Span, TokenStream}; +use quote::{quote, ToTokens}; +use std::{borrow::Cow, str::FromStr}; + +pub const BITS_PER_BYTE: u32 = 8; + +/// List of chars that some vendors use in their peripheral/field names but +/// that are not valid in Rust ident +const BLACKLIST_CHARS: &[char] = &['(', ')', '[', ']', '/', ' ', '-']; + +pub trait ToSanitizedPascalCase { + fn to_sanitized_pascal_case(&self) -> Cow; +} + +pub trait ToSanitizedUpperCase { + fn to_sanitized_upper_case(&self) -> Cow; +} + +pub trait ToSanitizedSnakeCase { + fn to_sanitized_snake_case(&self) -> Cow; +} + +impl ToSanitizedSnakeCase for str { + fn to_sanitized_snake_case(&self) -> Cow { + macro_rules! keywords { + ($s:expr, $($kw:ident),+,) => { + Cow::from(match &$s.to_lowercase()[..] { + $(stringify!($kw) => concat!(stringify!($kw), "_")),+, + _ => return Cow::from($s.to_snake_case()) + }) + } + } + + let s = self.replace(BLACKLIST_CHARS, ""); + + match s.chars().next().unwrap_or('\0') { + '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => { + Cow::from(format!("_{}", s.to_snake_case())) + } + _ => { + keywords! { + s, + abstract, + alignof, + as, + async, + await, + become, + box, + break, + const, + continue, + crate, + do, + else, + enum, + extern, + false, + final, + fn, + for, + if, + impl, + in, + let, + loop, + macro, + match, + mod, + move, + mut, + offsetof, + override, + priv, + proc, + pub, + pure, + ref, + return, + self, + sizeof, + static, + struct, + super, + trait, + true, + try, + type, + typeof, + unsafe, + unsized, + use, + virtual, + where, + while, + yield, + set_bit, + clear_bit, + bit, + bits, + } + } + } + } +} + +impl ToSanitizedUpperCase for str { + fn to_sanitized_upper_case(&self) -> Cow { + let s = self.replace(BLACKLIST_CHARS, ""); + + match s.chars().next().unwrap_or('\0') { + '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => { + Cow::from(format!("_{}", s.to_upper_case())) + } + _ => Cow::from(s.to_upper_case()), + } + } +} + +impl ToSanitizedPascalCase for str { + fn to_sanitized_pascal_case(&self) -> Cow { + let s = self.replace(BLACKLIST_CHARS, ""); + + match s.chars().next().unwrap_or('\0') { + '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => { + Cow::from(format!("_{}", s.to_pascal_case())) + } + _ => Cow::from(s.to_pascal_case()), + } + } +} + +pub fn respace(s: &str) -> String { + s.split_whitespace().collect::>().join(" ") +} + +pub fn escape_brackets(s: &str) -> String { + s.split('[') + .fold("".to_string(), |acc, x| { + if acc.is_empty() { + x.to_string() + } else if acc.ends_with('\\') { + acc + "[" + x + } else { + acc + "\\[" + x + } + }) + .split(']') + .fold("".to_string(), |acc, x| { + if acc.is_empty() { + x.to_string() + } else if acc.ends_with('\\') { + acc + "]" + x + } else { + acc + "\\]" + x + } + }) +} + +pub fn replace_suffix(name: &str, suffix: &str) -> String { + if name.contains("[%s]") { + name.replace("[%s]", suffix) + } else { + name.replace("%s", suffix) + } +} + +/// Turns `n` into an unsuffixed separated hex token +pub fn hex(n: u64) -> TokenStream { + let (h4, h3, h2, h1) = ( + (n >> 48) & 0xffff, + (n >> 32) & 0xffff, + (n >> 16) & 0xffff, + n & 0xffff, + ); + TokenStream::from_str( + &(if h4 != 0 { + format!("0x{:04x}_{:04x}_{:04x}_{:04x}", h4, h3, h2, h1) + } else if h3 != 0 { + format!("0x{:04x}_{:04x}_{:04x}", h3, h2, h1) + } else if h2 != 0 { + format!("0x{:04x}_{:04x}", h2, h1) + } else if h1 & 0xff00 != 0 { + format!("0x{:04x}", h1) + } else if h1 != 0 { + format!("0x{:02x}", h1 & 0xff) + } else { + "0".to_string() + }), + ) + .unwrap() +} + +/// Turns `n` into an unsuffixed token +pub fn unsuffixed(n: u64) -> TokenStream { + Literal::u64_unsuffixed(n).into_token_stream() +} + +pub fn unsuffixed_or_bool(n: u64, width: u32) -> TokenStream { + if width == 1 { + Ident::new(if n == 0 { "false" } else { "true" }, Span::call_site()).into_token_stream() + } else { + unsuffixed(n) + } +} + +pub trait U32Ext { + fn to_ty(&self) -> Result; + fn to_ty_width(&self) -> Result; +} + +impl U32Ext for u32 { + fn to_ty(&self) -> Result { + Ok(Ident::new( + match *self { + 1 => "bool", + 2..=8 => "u8", + 9..=16 => "u16", + 17..=32 => "u32", + 33..=64 => "u64", + _ => { + return Err(anyhow!( + "can't convert {} bits into a Rust integral type", + *self + )) + } + }, + Span::call_site(), + )) + } + + fn to_ty_width(&self) -> Result { + Ok(match *self { + 1 => 1, + 2..=8 => 8, + 9..=16 => 16, + 17..=32 => 32, + 33..=64 => 64, + _ => { + return Err(anyhow!( + "can't convert {} bits into a Rust integral type width", + *self + )) + } + }) + } +} + +pub fn build_rs() -> TokenStream { + quote! { + use std::env; + use std::fs::File; + use std::io::Write; + use std::path::PathBuf; + + fn main() { + if env::var_os("CARGO_FEATURE_RT").is_some() { + // Put the linker script somewhere the linker can find it + let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap()); + File::create(out.join("device.x")) + .unwrap() + .write_all(include_bytes!("device.x")) + .unwrap(); + println!("cargo:rustc-link-search={}", out.display()); + + println!("cargo:rerun-if-changed=device.x"); + } + + println!("cargo:rerun-if-changed=build.rs"); + } + } +} + +/// Return a relative path to access a from b. +pub fn relative_path(a: &str, b: &str) -> TokenStream { + let a: Vec<&str> = a.split("::").collect(); + let b: Vec<&str> = b.split("::").collect(); + + let mut ma = &a[..a.len() - 1]; + let mut mb = &b[..b.len() - 1]; + while !ma.is_empty() && !mb.is_empty() && ma[0] == mb[0] { + ma = &ma[1..]; + mb = &mb[1..]; + } + + let mut res = TokenStream::new(); + + // for each item left in b, append a `super` + for _ in mb { + res.extend(quote!(super::)); + } + + // for each item in a, append it + for ident in ma { + let ident = Ident::new(ident, Span::call_site()); + res.extend(quote!(#ident::)); + } + + let ident = Ident::new(a[a.len() - 1], Span::call_site()); + res.extend(quote!(#ident)); + + res +} + +pub fn doc(doc: &Option) -> TokenStream { + if let Some(doc) = doc { + let doc = doc.replace("\\n", "\n"); + let doc = respace(&doc); + let doc = escape_brackets(&doc); + quote!(#[doc=#doc]) + } else { + quote!() + } +} From 7973e4987702bf7246187f86c3781d3bb7d84802 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Wed, 31 Aug 2022 17:48:51 -0400 Subject: [PATCH 02/15] Remove nightly toolchain file Using stable. --- raltool/rust-toolchain.toml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 raltool/rust-toolchain.toml diff --git a/raltool/rust-toolchain.toml b/raltool/rust-toolchain.toml deleted file mode 100644 index 06d6aa18d7e9..000000000000 --- a/raltool/rust-toolchain.toml +++ /dev/null @@ -1,5 +0,0 @@ -# Before upgrading check that everything is available on all tier1 targets here: -# https://rust-lang.github.io/rustup-components-history -[toolchain] -channel = "nightly-2022-03-10" -components = [ "rust-src", "rustfmt" ] From 9fee0f9512cdf9545447de1aaf0f61412ebe1db9 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Fri, 2 Sep 2022 15:51:20 -0400 Subject: [PATCH 03/15] Add support for non-u32 bitfields The BitSize newtype helped with refactoring, and could be removed if deemed too noisy. After this commit, the size of a set of fields is based on the size of the register. Before this commit, the registers for an MIMXRT PWM peripheral were all incorrectly u32s. After this commit, I observed that code generated for an MIMXRT PWM peripheral used u16 throughout. It still might use u8 for individual fields, but that doesn't affect the MMIO register interactions. Only tested by extracting peripherals, then generating blocks based on the YAML output; haven't tested a full SVD -> PAC transform, but I would expect that to work. --- raltool/src/generate/block.rs | 10 ++++---- raltool/src/generate/enumm.rs | 10 ++++---- raltool/src/generate/fieldset.rs | 37 +++++++++++++-------------- raltool/src/ir.rs | 20 +++++---------- raltool/src/main.rs | 4 +-- raltool/src/svd2ir.rs | 19 +++++++++----- raltool/src/transform/common.rs | 4 +-- raltool/src/transform/delete_enums.rs | 2 +- raltool/src/transform/mod.rs | 2 +- 9 files changed, 54 insertions(+), 54 deletions(-) diff --git a/raltool/src/generate/block.rs b/raltool/src/generate/block.rs index 115104f41246..5b508eca1c27 100644 --- a/raltool/src/generate/block.rs +++ b/raltool/src/generate/block.rs @@ -25,11 +25,11 @@ pub fn render(opts: &super::Options, ir: &IR, b: &Block, path: &str) -> Result quote!(u8), - 16 => quote!(u16), - 32 => quote!(u32), - 64 => quote!(u64), - _ => panic!("Invalid register bit size {}", r.bit_size), + BitSize(8) => quote!(u8), + BitSize(16) => quote!(u16), + BitSize(32) => quote!(u32), + BitSize(64) => quote!(u64), + BitSize(invalid) => panic!("Invalid register bit size {invalid}"), } }; diff --git a/raltool/src/generate/enumm.rs b/raltool/src/generate/enumm.rs index e51bb6f110e9..ae24230e4f6d 100644 --- a/raltool/src/generate/enumm.rs +++ b/raltool/src/generate/enumm.rs @@ -11,11 +11,11 @@ pub fn render(_opts: &super::Options, _ir: &IR, e: &Enum, path: &str) -> Result< let mut items = TokenStream::new(); let ty = match e.bit_size { - 1..=8 => quote!(u8), - 9..=16 => quote!(u16), - 17..=32 => quote!(u32), - 33..=64 => quote!(u64), - _ => panic!("Invalid bit_size {}", e.bit_size), + BitSize(1..=8) => quote!(u8), + BitSize(9..=16) => quote!(u16), + BitSize(17..=32) => quote!(u32), + BitSize(33..=64) => quote!(u64), + BitSize(invalid) => panic!("Invalid bit_size {invalid}"), }; for f in &e.variants { diff --git a/raltool/src/generate/fieldset.rs b/raltool/src/generate/fieldset.rs index 262eab8b9023..a1fcfe337565 100644 --- a/raltool/src/generate/fieldset.rs +++ b/raltool/src/generate/fieldset.rs @@ -11,19 +11,18 @@ pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Res let mut items = TokenStream::new(); let ty = match fs.bit_size { - 1..=8 => quote!(u8), - 9..=16 => quote!(u16), - 17..=32 => quote!(u32), - 33..=64 => quote!(u64), - _ => panic!("Invalid bit_size {}", fs.bit_size), + BitSize(1..=8) => quote!(u8), + BitSize(9..=16) => quote!(u16), + BitSize(17..=32) => quote!(u32), + BitSize(33..=64) => quote!(u64), + BitSize(invalid) => panic!("Invalid bit_size {invalid}"), }; for f in &fs.fields { let name = Ident::new(&f.name, span); let name_set = Ident::new(&format!("set_{}", f.name), span); let bit_offset = f.bit_offset as usize; - let _bit_size = f.bit_size as usize; - let mask = util::hex(1u64.wrapping_shl(f.bit_size).wrapping_sub(1)); + let mask = util::hex(1u64.wrapping_shl(f.bit_size.0).wrapping_sub(1)); let doc = util::doc(&f.description); let field_ty: TokenStream; let to_bits: TokenStream; @@ -33,11 +32,11 @@ pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Res let e = ir.enums.get(e_path).unwrap(); let enum_ty = match e.bit_size { - 1..=8 => quote!(u8), - 9..=16 => quote!(u16), - 17..=32 => quote!(u32), - 33..=64 => quote!(u64), - _ => panic!("Invalid bit_size {}", e.bit_size), + BitSize(1..=8) => quote!(u8), + BitSize(9..=16) => quote!(u16), + BitSize(17..=32) => quote!(u32), + BitSize(33..=64) => quote!(u64), + BitSize(invalid) => panic!("Invalid bit_size {invalid}"), }; field_ty = util::relative_path(e_path, path); @@ -45,15 +44,15 @@ pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Res from_bits = quote!(#field_ty(val as #enum_ty)); } else { field_ty = match f.bit_size { - 1 => quote!(bool), - 2..=8 => quote!(u8), - 9..=16 => quote!(u16), - 17..=32 => quote!(u32), - 33..=64 => quote!(u64), - _ => panic!("Invalid bit_size {}", f.bit_size), + BitSize(1) => quote!(bool), + BitSize(2..=8) => quote!(u8), + BitSize(9..=16) => quote!(u16), + BitSize(17..=32) => quote!(u32), + BitSize(33..=64) => quote!(u64), + BitSize(invalid) => panic!("Invalid bit_size {invalid}"), }; to_bits = quote!(val as #ty); - from_bits = if f.bit_size == 1 { + from_bits = if f.bit_size == BitSize(1) { quote!(val != 0) } else { quote!(val as #field_ty) diff --git a/raltool/src/ir.rs b/raltool/src/ir.rs index 62045d2710e6..ea7a717e0459 100644 --- a/raltool/src/ir.rs +++ b/raltool/src/ir.rs @@ -112,12 +112,14 @@ pub struct CursedArray { pub offsets: Vec, } +#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] +pub struct BitSize(pub u32); + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Register { #[serde(default = "default_readwrite", skip_serializing_if = "is_readwrite")] pub access: Access, - #[serde(default = "default_32", skip_serializing_if = "is_32")] - pub bit_size: u32, + pub bit_size: BitSize, #[serde(default, skip_serializing_if = "Option::is_none")] pub fieldset: Option, } @@ -140,8 +142,7 @@ pub struct FieldSet { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option, - #[serde(default = "default_32", skip_serializing_if = "is_32")] - pub bit_size: u32, + pub bit_size: BitSize, pub fields: Vec, } @@ -152,7 +153,7 @@ pub struct Field { pub description: Option, pub bit_offset: u32, - pub bit_size: u32, + pub bit_size: BitSize, #[serde(default, skip_serializing_if = "Option::is_none")] pub array: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -167,7 +168,7 @@ pub struct Field { pub struct Enum { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option, - pub bit_size: u32, + pub bit_size: BitSize, pub variants: Vec, } @@ -179,13 +180,6 @@ pub struct EnumVariant { pub value: u64, } -fn default_32() -> u32 { - 32 -} -fn is_32(x: &u32) -> bool { - *x == 32 -} - fn default_readwrite() -> Access { Access::ReadWrite } diff --git a/raltool/src/main.rs b/raltool/src/main.rs index 0b35c0846b49..42f2ca7ed640 100755 --- a/raltool/src/main.rs +++ b/raltool/src/main.rs @@ -297,8 +297,8 @@ fn check(args: Check) -> Result<()> { for (name, f) in &ir.fieldsets { for (i1, i2) in Pairs::new(f.fields.iter()) { - if i2.bit_offset + i2.bit_size > i1.bit_offset - && i1.bit_offset + i1.bit_size > i2.bit_offset + if i2.bit_offset + i2.bit_size.0 > i1.bit_offset + && i1.bit_offset + i1.bit_size.0 > i2.bit_offset { error(format!( "fieldset {}: fields overlap: {} {}", diff --git a/raltool/src/svd2ir.rs b/raltool/src/svd2ir.rs index 6cc3ea2402bf..40da1fd736bb 100644 --- a/raltool/src/svd2ir.rs +++ b/raltool/src/svd2ir.rs @@ -14,14 +14,14 @@ struct ProtoBlock { struct ProtoFieldset { name: Vec, description: Option, - bit_size: u32, + bit_size: BitSize, fields: Vec, } struct ProtoEnum { name: Vec, usage: Option, - bit_size: u32, + bit_size: BitSize, variants: Vec, } @@ -44,13 +44,20 @@ pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<() continue; } + let fieldset_bitsize = match r { + svd::Register::Single(info) => info.size, + svd::Register::Array(info, _) => info.size, + } + .map(BitSize) + .expect("Unsized register is not supported"); + if let Some(fields) = &r.fields { let mut fieldset_name = block.name.clone(); fieldset_name.push(util::replace_suffix(&r.name, "")); fieldsets.push(ProtoFieldset { name: fieldset_name.clone(), description: r.description.clone(), - bit_size: 32, // todo + bit_size: fieldset_bitsize, fields: fields.clone(), }); @@ -73,7 +80,7 @@ pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<() enums.push(ProtoEnum { name: enum_name, usage: e.usage, - bit_size: f.bit_range.width, + bit_size: fieldset_bitsize, variants: e.values.clone(), }); } @@ -137,7 +144,7 @@ pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<() byte_offset: r.address_offset, inner: BlockItemInner::Register(Register { access, // todo - bit_size: r.size.unwrap_or(32), + bit_size: BitSize(r.size.expect("Must have a bitsize")), fieldset: fieldset_name.clone(), }), }; @@ -198,7 +205,7 @@ pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<() name: f.name.clone(), description: f.description.clone(), bit_offset: f.bit_range.offset, - bit_size: f.bit_range.width, + bit_size: BitSize(f.bit_range.width), array: None, enum_read: None, enum_write: None, diff --git a/raltool/src/transform/common.rs b/raltool/src/transform/common.rs index 29bc4e0fe9eb..15e82bf2b33b 100644 --- a/raltool/src/transform/common.rs +++ b/raltool/src/transform/common.rs @@ -33,7 +33,7 @@ pub(crate) fn check_mergeable_enums_inner( level: CheckLevel, ) -> anyhow::Result<()> { if a.bit_size != b.bit_size { - bail!("Different bit size: {} vs {}", a.bit_size, b.bit_size) + bail!("Different bit size: {} vs {}", a.bit_size.0, b.bit_size.0) } if level >= CheckLevel::Layout { @@ -123,7 +123,7 @@ pub(crate) fn check_mergeable_fieldsets_inner( level: CheckLevel, ) -> anyhow::Result<()> { if a.bit_size != b.bit_size { - bail!("Different bit size: {} vs {}", a.bit_size, b.bit_size) + bail!("Different bit size: {} vs {}", a.bit_size.0, b.bit_size.0) } if level >= CheckLevel::Layout { diff --git a/raltool/src/transform/delete_enums.rs b/raltool/src/transform/delete_enums.rs index 3db1906c0f6a..e5334648497a 100644 --- a/raltool/src/transform/delete_enums.rs +++ b/raltool/src/transform/delete_enums.rs @@ -8,7 +8,7 @@ use crate::ir::*; #[derive(Debug, Serialize, Deserialize)] pub struct DeleteEnums { pub from: String, - pub bit_size: Option, + pub bit_size: Option, #[serde(default)] pub soft: bool, } diff --git a/raltool/src/transform/mod.rs b/raltool/src/transform/mod.rs index 4e8715b80b07..38fb7cfffee5 100644 --- a/raltool/src/transform/mod.rs +++ b/raltool/src/transform/mod.rs @@ -210,11 +210,11 @@ pub mod make_register_array; pub mod merge_blocks; pub mod merge_enums; pub mod merge_fieldsets; +pub mod modify_byte_offset; pub mod rename; pub mod rename_fields; pub mod rename_registers; pub mod sort; -pub mod modify_byte_offset; #[derive(Debug, Serialize, Deserialize)] pub enum Transform { From 8a3d79982e75ba718af43fc7dba7f0f2a08a9562 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Sun, 11 Sep 2022 13:01:57 -0400 Subject: [PATCH 04/15] Add support for combining IRs Combine lets us consolidate enums, fieldsets, and blocks across devices. Given multiple IRs, combine figures out the various versions of these IR elements, then creates a new IR that shares as much as it can. The heuristics for combining these IR elements is encoded in the combine module. --- raltool/src/combine.rs | 441 +++++++++++++++++++++++++++++++++++++++++ raltool/src/lib.rs | 1 + raltool/src/svd2ir.rs | 2 +- 3 files changed, 443 insertions(+), 1 deletion(-) create mode 100644 raltool/src/combine.rs diff --git a/raltool/src/combine.rs b/raltool/src/combine.rs new file mode 100644 index 000000000000..810618dcf922 --- /dev/null +++ b/raltool/src/combine.rs @@ -0,0 +1,441 @@ +//! Helper types to combine and consolidate IRs across devices. + +use crate::ir; +use std::{ + cmp::Ordering, + collections::{hash_map::Entry, HashMap}, +}; + +/// An element version. +pub struct Version<'ir, E> { + /// Reference to the element. + elem: &'ir E, + /// The IRs that use this version. + irs: Vec<&'ir ir::IR>, +} + +impl<'ir, E> Version<'ir, E> { + fn new(elem: &'ir E, ir: &'ir ir::IR) -> Self { + Self { + elem, + irs: vec![ir], + } + } + + /// Acquire the IR element. + pub fn element(&self) -> &'ir E { + self.elem + } + + /// Returns `true` if the provided IR uses this element version. + /// + /// This uses a pointer comparison to understand if the IRs are equal. + /// It does not use any (Partial)Eq trait. + pub fn is_used_by(&self, ir: &ir::IR) -> bool { + self.irs.iter().any(|jr| std::ptr::eq(ir, *jr)) + } +} + +/// A version of an enum. +pub type EnumVersion<'ir> = Version<'ir, ir::Enum>; +/// A version of a field set. +pub type FieldSetVersion<'ir> = Version<'ir, ir::FieldSet>; +/// A version of a block. +pub type BlockVersion<'ir> = Version<'ir, ir::Block>; + +/// Multiple versions of some element. +type Versions<'ir, E> = Vec>; + +/// Used to sort versions by most popular (most IR associations) to least +/// popular (fewest IR associations). +fn popularity(a: &Version<'_, E>, b: &Version<'_, E>) -> Ordering { + b.irs.len().cmp(&a.irs.len()) +} + +#[derive(Clone, Copy)] +struct CompareIr<'ir, E> { + elem: &'ir E, + ir: &'ir ir::IR, +} + +impl<'ir, E> CompareIr<'ir, E> { + fn from_version(version: &Version<'ir, E>) -> Self { + Self::new( + version.element(), + version.irs.first().expect("Versions always have an IR"), + ) + } + fn new(elem: &'ir E, ir: &'ir ir::IR) -> Self { + Self { elem, ir } + } + fn query(ir: &'ir ir::IR, query: impl FnOnce(&'ir ir::IR) -> Option<&'ir E>) -> Option { + query(ir).map(|elem| Self::new(elem, ir)) + } +} + +/// Assert two elements as equivalent. +/// +/// The implementation invokes this callback for similarly-named things +/// across IRs. For instance, the input will always be two UART +/// blocks from two different devices. You'll never see an UART and an +/// I2C block being compared for equivalence (unless your IR is really +/// messed up). +type Equivalence = fn(CompareIr, CompareIr) -> bool; + +/// Ensure the items in two, possibly non-sorted contiguous +/// collections are equivalent. +fn equivalent_slices(xs: &[E], ys: &[E], equiv: impl Fn(&E, &E) -> bool) -> bool { + xs.len() == ys.len() && xs.iter().all(|x| ys.iter().any(|y| equiv(x, y))) +} + +fn equivalent_options( + a: Option>, + b: Option>, + equiv: Equivalence, +) -> bool { + match (a, b) { + (Some(a), Some(b)) => equiv(a, b), + (None, None) => true, + (_, _) => false, + } +} + +/// Check if two enums are equivalent. +fn equivalent_enum( + CompareIr { elem: a, .. }: CompareIr, + CompareIr { elem: b, .. }: CompareIr, +) -> bool { + a.bit_size == b.bit_size + && equivalent_slices(&a.variants, &b.variants, |q, r| q.value == r.value) +} + +/// Check if two fieldsets are equivalent. +fn equivalent_fieldsets( + CompareIr { elem: a, ir: air }: CompareIr, + CompareIr { elem: b, ir: bir }: CompareIr, +) -> bool { + let try_equivalent_enum = |a: &Option, b: &Option| -> bool { + let a = a + .as_ref() + .and_then(|a| CompareIr::query(air, |ir| ir.enums.get(a))); + let b = b + .as_ref() + .and_then(|b| CompareIr::query(bir, |ir| ir.enums.get(b))); + equivalent_options(a, b, equivalent_enum) + }; + + a.bit_size == b.bit_size + && equivalent_slices(&a.fields, &b.fields, |q, r| { + q.bit_offset == r.bit_offset + && q.array == r.array + && q.bit_size == r.bit_size + && try_equivalent_enum(&q.enum_read, &r.enum_read) + && try_equivalent_enum(&q.enum_write, &r.enum_write) + && try_equivalent_enum(&q.enum_readwrite, &r.enum_readwrite) + }) +} + +fn equivalent_registers( + CompareIr { elem: a, ir: air }: CompareIr, + CompareIr { elem: b, ir: bir }: CompareIr, +) -> bool { + let query_builder = + |ir| move |fieldset: &String| CompareIr::query(ir, |ir| ir.fieldsets.get(fieldset)); + + a.access == b.access + && a.bit_size == b.bit_size + && equivalent_options( + a.fieldset.as_ref().and_then(query_builder(air)), + b.fieldset.as_ref().and_then(query_builder(bir)), + equivalent_fieldsets, + ) +} + +/// Check if two blocks are equivalent. +fn equivalent_blocks( + CompareIr { elem: a, ir: air }: CompareIr, + CompareIr { elem: b, ir: bir }: CompareIr, +) -> bool { + a.extends == b.extends + && equivalent_slices(&a.items, &b.items, |q, r| { + q.byte_offset == r.byte_offset + && q.array == r.array + && match (&q.inner, &r.inner) { + ( + ir::BlockItemInner::Block(ir::BlockItemBlock { block: ablock }), + ir::BlockItemInner::Block(ir::BlockItemBlock { block: bblock }), + ) => equivalent_blocks( + CompareIr::query(air, |ir| ir.blocks.get(ablock)).unwrap(), + CompareIr::query(bir, |ir| ir.blocks.get(bblock)).unwrap(), + ), + ( + ir::BlockItemInner::Register(aregister), + ir::BlockItemInner::Register(bregister), + ) => equivalent_registers( + CompareIr::new(aregister, air), + CompareIr::new(bregister, bir), + ), + _ => false, + } + }) +} + +/// Manages versions for an IR element type. +struct VersionLookup<'ir, E> { + versions: HashMap<&'ir str, Versions<'ir, E>>, +} + +impl<'ir, E> VersionLookup<'ir, E> { + /// Create new version lookups for an IR's elements. + fn new( + equiv: Equivalence, + map: impl Iterator, + ) -> Self { + let versions = map.fold( + HashMap::new(), + |mut versions: HashMap<&'ir str, Versions<'ir, E>>, (ir, path, elem)| { + versions + .entry(path.as_str()) + .and_modify(|versions| { + if let Some(version) = versions.iter_mut().find(|version| { + (equiv)(CompareIr::from_version(version), CompareIr::new(elem, ir)) + }) { + version.irs.push(ir); + } else { + versions.push(Version::new(elem, ir)) + } + }) + .or_insert_with(|| vec![Version::new(elem, ir)]) + .sort_unstable_by(popularity); + versions + }, + ); + Self { versions } + } + + fn from_irs( + equiv: Equivalence, + irs: &'ir [ir::IR], + access: impl Fn(&'ir ir::IR) -> &HashMap, + ) -> Self { + let map = irs + .iter() + .flat_map(|ir| std::iter::repeat(ir).zip(access(ir).iter())) + .map(|(ir, (path, elem))| (ir, path, elem)); + Self::new(equiv, map) + } + + fn get(&self, ir: &ir::IR, path: &str) -> Option<&Version> { + self.versions + .get(path) + .and_then(|versions| versions.iter().find(|version| version.is_used_by(ir))) + } +} + +/// Manages versions of IR elements. +/// +/// The implementation uses the address of the IR when querying for versioned elements. +/// This should be fine, since the implementation takes shared references to the IR, so +/// things can't (safely) move or be reassigned while this exists. +pub struct IrVersions<'ir> { + enums: VersionLookup<'ir, ir::Enum>, + fieldsets: VersionLookup<'ir, ir::FieldSet>, + blocks: VersionLookup<'ir, ir::Block>, +} + +impl<'ir> IrVersions<'ir> { + /// Define versions of IR elements from the collection of IRs. + pub fn from_irs(irs: &'ir [ir::IR]) -> Self { + Self { + enums: VersionLookup::from_irs(equivalent_enum, irs, |ir| &ir.enums), + fieldsets: VersionLookup::from_irs(equivalent_fieldsets, irs, |ir| &ir.fieldsets), + blocks: VersionLookup::from_irs(equivalent_blocks, irs, |ir| &ir.blocks), + } + } + /// Access an enum version that corresponds to this IR. + pub fn get_enum(&self, ir: &ir::IR, path: &str) -> Option<&EnumVersion> { + self.enums.get(ir, path) + } + /// Access a fieldset version that corresponds to this IR. + pub fn get_fieldset(&self, ir: &ir::IR, path: &str) -> Option<&FieldSetVersion> { + self.fieldsets.get(ir, path) + } + /// Access a block version that corresponds to this IR. + pub fn get_block(&self, ir: &ir::IR, path: &str) -> Option<&BlockVersion> { + self.blocks.get(ir, path) + } +} + +/// Hashing a reference by its address. +struct RefHash<'a, T>(&'a T); + +impl std::hash::Hash for RefHash<'_, T> { + fn hash(&self, state: &mut H) { + std::ptr::hash(self.0, state); + } +} + +impl std::cmp::PartialEq for RefHash<'_, T> { + fn eq(&self, other: &Self) -> bool { + std::ptr::eq(self.0, other.0) + } +} + +impl std::cmp::Eq for RefHash<'_, T> {} + +impl Clone for RefHash<'_, T> { + fn clone(&self) -> Self { + Self(self.0) + } +} + +impl Copy for RefHash<'_, T> {} + +type RefMap<'a, K, V> = HashMap, V>; + +/// Combine all IRs into a single IR. +pub fn combine(irs: &[ir::IR]) -> ir::IR { + assert!( + irs.iter().all(|ir| !ir.devices.is_empty()), + "Cannot combine an IR with empty devices." + ); + assert!( + irs.iter().all(|ir| ir.devices.len() == 1), + "Sorry, not ready to combine IRs that were already combined" + ); + { + let device_names: Vec<_> = irs + .iter() + .map(|ir| ir.devices.keys().next().unwrap()) + .collect(); + assert!( + device_names.len() == irs.len(), + "Each IR must describe a unique device." + ); + assert!( + device_names.iter().all(|name| !name.is_empty()), + "Each device needs a name." + ); + } + + let versions = IrVersions::from_irs(irs); + + let mut consolidated = ir::IR::new(); + + // Combine enums. + let mut enums: RefMap = RefMap::new(); + for ir in irs { + let device_name = ir.devices.keys().next().expect("Each IR has a name"); + + for path in ir.enums.keys() { + let version = versions + .get_enum(ir, path) + .expect("There's definitely a version"); + + if let Entry::Vacant(entry) = enums.entry(RefHash(version.element())) { + let path = format!("{device_name}::{path}"); + entry.insert(path.clone()); + consolidated.enums.insert(path, version.element().clone()); + } + } + } + + // Combine fieldsets. + let mut fieldsets: RefMap = RefMap::new(); + for ir in irs { + let device_name = ir.devices.keys().next().unwrap(); + + for path in ir.fieldsets.keys() { + let version = versions.get_fieldset(ir, path).unwrap(); + + if let Entry::Vacant(entry) = fieldsets.entry(RefHash(version.element())) { + let path = format!("{device_name}::{path}"); + entry.insert(path.clone()); + + let mut fieldset = version.element().clone(); + // Fix references to enums by looking up the version, then mapping it to + // the updated path. + for field in &mut fieldset.fields { + for name in [ + field.enum_readwrite.as_mut(), + field.enum_read.as_mut(), + field.enum_write.as_mut(), + ] + .into_iter() + .flatten() + { + let version = versions.get_enum(ir, name).unwrap(); + *name = enums.get(&RefHash(version.element())).unwrap().into(); + } + } + consolidated.fieldsets.insert(path, fieldset); + } + } + } + + // Combine blocks. + // + // Block consolidation uses two passes, since a block + // can have a reference to another block. The first pass + // manages the version -> rename mapping, and the second + // pass does the touch-up. + let mut blocks: RefMap = RefMap::new(); + for ir in irs { + let device_name = ir.devices.keys().next().unwrap(); + + for path in ir.blocks.keys() { + let version = versions.get_block(ir, path).unwrap(); + + if let Entry::Vacant(entry) = blocks.entry(RefHash(version.element())) { + let path = format!("{device_name}::{path}"); + entry.insert(path.clone()); + consolidated.blocks.insert(path, version.element().clone()); + } + } + } + + let blocks = blocks; + // Remove from this to ensure patches only happens once. + let mut filter = blocks.clone(); + for ir in irs { + for path in ir.blocks.keys() { + let version = versions.get_block(ir, path).unwrap(); + + if let Some(path) = filter.get(&RefHash(version.element())) { + let block = consolidated.blocks.get_mut(path).unwrap(); + for item in &mut block.items { + match &mut item.inner { + ir::BlockItemInner::Register(reg) => { + for fieldset in &mut reg.fieldset { + let version = versions.get_fieldset(ir, fieldset).unwrap(); + *fieldset = + fieldsets.get(&RefHash(version.element())).unwrap().into() + } + } + ir::BlockItemInner::Block(ir::BlockItemBlock { block }) => { + let version = versions.get_block(ir, block).unwrap(); + *block = blocks.get(&RefHash(version.element())).unwrap().into(); + } + } + } + } + filter.remove(&RefHash(version.element())); + } + } + + // Update all devices to point to new blocks. + for ir in irs { + let mut devices = ir.devices.clone(); + devices + .values_mut() + .flat_map(|device| device.peripherals.iter_mut()) + .flat_map(|peripheral| &mut peripheral.block) + .for_each(|name: &mut String| { + let version = versions.get_block(ir, name).unwrap(); + *name = blocks.get(&RefHash(version.element())).unwrap().into(); + }); + consolidated.devices.extend(devices); + } + + consolidated +} diff --git a/raltool/src/lib.rs b/raltool/src/lib.rs index f7aff2166151..40bf901074a4 100755 --- a/raltool/src/lib.rs +++ b/raltool/src/lib.rs @@ -1,3 +1,4 @@ +pub mod combine; pub mod generate; pub mod ir; pub mod svd2ir; diff --git a/raltool/src/svd2ir.rs b/raltool/src/svd2ir.rs index 40da1fd736bb..f43ca1c325fe 100644 --- a/raltool/src/svd2ir.rs +++ b/raltool/src/svd2ir.rs @@ -338,7 +338,7 @@ pub fn convert_svd(svd: &svd::Device) -> anyhow::Result { } } - ir.devices.insert("".to_string(), device); + ir.devices.insert(svd.name.clone(), device); transform::sort::Sort {}.run(&mut ir).unwrap(); transform::Sanitize {}.run(&mut ir).unwrap(); From 17d43a40cccef7b273fc7cd7161d9cb974ba9093 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Wed, 21 Sep 2022 14:35:40 -0400 Subject: [PATCH 05/15] Rename peripheral device names in the transformer --- raltool/src/transform/rename.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/raltool/src/transform/rename.rs b/raltool/src/transform/rename.rs index 05a07a17ff45..b92884d67785 100644 --- a/raltool/src/transform/rename.rs +++ b/raltool/src/transform/rename.rs @@ -23,6 +23,7 @@ impl Rename { super::map_block_names(ir, &renamer); super::map_fieldset_names(ir, &renamer); super::map_enum_names(ir, &renamer); + super::map_device_peripheral_names(ir, &renamer); Ok(()) } From 541e2e034a0b65eedcbd654fe194627f19552e9f Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Wed, 21 Sep 2022 21:19:45 -0400 Subject: [PATCH 06/15] Simplify block module names When a block is used across multiple peripheral instances, remove any number suffix. --- raltool/src/svd2ir.rs | 1 + raltool/src/transform/mod.rs | 64 +++++++++++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/raltool/src/svd2ir.rs b/raltool/src/svd2ir.rs index f43ca1c325fe..28831e5928ba 100644 --- a/raltool/src/svd2ir.rs +++ b/raltool/src/svd2ir.rs @@ -342,6 +342,7 @@ pub fn convert_svd(svd: &svd::Device) -> anyhow::Result { transform::sort::Sort {}.run(&mut ir).unwrap(); transform::Sanitize {}.run(&mut ir).unwrap(); + transform::SimplifyPaths::new().run(&mut ir).unwrap(); Ok(ir) } diff --git a/raltool/src/transform/mod.rs b/raltool/src/transform/mod.rs index 38fb7cfffee5..7ff21c685739 100644 --- a/raltool/src/transform/mod.rs +++ b/raltool/src/transform/mod.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use crate::ir::*; use crate::util::{ToSanitizedPascalCase, ToSanitizedSnakeCase, ToSanitizedUpperCase}; @@ -24,6 +24,7 @@ impl Sanitize { } } +#[derive(PartialEq, Eq)] pub enum NameKind { Device, DevicePeripheral, @@ -260,3 +261,64 @@ impl Transform { } } } + +/// A transform that removes extraneous numbers +/// from block paths that have multiple instances. +/// +/// If the IR uses paths that look like +/// +/// - `lpuart1::Lpuart2` +/// - `lpuart1::Lpuart7` +/// - etc. +/// +/// this transformer changes `lpuart1` to `lpuart`, +/// dropping the '1'. +pub struct SimplifyPaths(()); +impl SimplifyPaths { + pub fn new() -> Self { + SimplifyPaths(()) + } + pub fn run(&self, ir: &mut IR) -> anyhow::Result<()> { + let re = regex::Regex::new(r"\d+$")?; + + let mut block_to_peripherals: HashMap<&str, usize> = HashMap::new(); + for device in ir.devices.values() { + for peripheral in &device.peripherals { + *block_to_peripherals + .entry(peripheral.block.as_ref().unwrap()) + .or_insert(0) += 1; + } + } + + let renames: HashSet = block_to_peripherals + .into_iter() + .filter(|(_, count)| *count > 1) + .filter(|(path, _)| { + let root = path.split("::").next().unwrap(); + re.is_match(root) + }) + .map(|(path, _)| path.split("::").next().unwrap().into()) + .collect(); + + map_names(ir, |_, name| { + let mut parts = name.split("::"); + if let Some(root) = parts.next() { + if renames.contains(root) { + let new_root = re.replace(root, ""); + *name = std::iter::once(&*new_root) + .chain(parts) + .collect::>() + .join("::"); + } + } + }); + + Ok(()) + } +} + +impl Default for SimplifyPaths { + fn default() -> Self { + Self::new() + } +} From 15e271293c28378c5244f01bd1a8cbf531de8d99 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Tue, 13 Sep 2022 16:14:05 -0400 Subject: [PATCH 07/15] Generate a RAL-like API Remove the strong types, favoring a RAL-like pattern for accessing register fields and enums. This is achieved by nesting the render calls during codegen. --- raltool/Cargo.lock | 1 + raltool/Cargo.toml | 1 + raltool/build.rs | 51 --- raltool/src/generate/block.rs | 614 ++++++++++++++++++++++++++----- raltool/src/generate/common.rs | 86 ----- raltool/src/generate/device.rs | 134 +++++-- raltool/src/generate/enumm.rs | 47 --- raltool/src/generate/fieldset.rs | 132 ++----- raltool/src/generate/mod.rs | 220 ++++++----- raltool/src/ir.rs | 9 +- raltool/src/main.rs | 53 ++- raltool/src/svd2ir.rs | 5 +- raltool/src/transform/mod.rs | 4 +- raltool/src/util.rs | 10 +- 14 files changed, 870 insertions(+), 497 deletions(-) delete mode 100644 raltool/build.rs delete mode 100644 raltool/src/generate/common.rs delete mode 100644 raltool/src/generate/enumm.rs diff --git a/raltool/Cargo.lock b/raltool/Cargo.lock index 8d6097d99bed..76b8c0f0f73d 100644 --- a/raltool/Cargo.lock +++ b/raltool/Cargo.lock @@ -57,6 +57,7 @@ dependencies = [ "log", "proc-macro2", "quote", + "rayon", "regex", "serde", "serde_yaml", diff --git a/raltool/Cargo.toml b/raltool/Cargo.toml index 612bb3e742c3..5854963a6bc4 100644 --- a/raltool/Cargo.toml +++ b/raltool/Cargo.toml @@ -12,6 +12,7 @@ log = { version = "~0.4", features = ["std"] } quote = "1.0" proc-macro2 = "1.0" anyhow = "1.0.19" +rayon = "1.5" regex = "1.4.3" serde = { version = "1.0.123", features = [ "derive" ]} serde_yaml = "0.8.15" diff --git a/raltool/build.rs b/raltool/build.rs deleted file mode 100644 index 56ee3e0a19a6..000000000000 --- a/raltool/build.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::env; -use std::error::Error; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; -use std::process::Command; - -struct IgnoredError {} - -impl From for IgnoredError -where - E: Error, -{ - fn from(_: E) -> IgnoredError { - IgnoredError {} - } -} - -fn main() { - let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); - - File::create(out_dir.join("commit-info.txt")) - .unwrap() - .write_all(commit_info().as_bytes()) - .unwrap(); -} - -fn commit_info() -> String { - match (commit_hash(), commit_date()) { - (Ok(hash), Ok(date)) => format!(" ({} {})", hash.trim(), date.trim()), - _ => String::new(), - } -} - -fn commit_hash() -> Result { - Ok(String::from_utf8( - Command::new("git") - .args(&["rev-parse", "--short", "HEAD"]) - .output()? - .stdout, - )?) -} - -fn commit_date() -> Result { - Ok(String::from_utf8( - Command::new("git") - .args(&["log", "-1", "--date=short", "--pretty=format:%cd"]) - .output()? - .stdout, - )?) -} diff --git a/raltool/src/generate/block.rs b/raltool/src/generate/block.rs index 5b508eca1c27..72d44103810e 100644 --- a/raltool/src/generate/block.rs +++ b/raltool/src/generate/block.rs @@ -1,106 +1,556 @@ +//! Generates a register block, along with submodules for register fields. +//! +//! Recursively expands dependent blocks that are part of the module. This +//! means that the input to [`render`] is expected to be a root block, or +//! a block that is not a sub-block of another block. + +use std::num::NonZeroUsize; + use anyhow::Result; use proc_macro2::TokenStream; use proc_macro2::{Ident, Span}; use quote::quote; -use crate::ir::*; +use crate::ir; use crate::util; -pub fn render(opts: &super::Options, ir: &IR, b: &Block, path: &str) -> Result { - let common_path = opts.common_path(); +/// A primitive size for a (reserved) register. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[repr(usize)] +enum Size { + U8 = 8, + U16 = 16, + U32 = 32, + U64 = 64, +} + +impl Size { + const fn bits(self) -> usize { + self as usize + } + const fn bytes(self) -> usize { + self.bits() / 8 + } + fn type_token(self) -> TokenStream { + match self { + Self::U8 => quote!(u8), + Self::U16 => quote!(u16), + Self::U32 => quote!(u32), + Self::U64 => quote!(u64), + } + } + fn from_bit_size(bit_size: ir::BitSize) -> Self { + match bit_size { + ir::BitSize(8) => Self::U8, + ir::BitSize(16) => Self::U16, + ir::BitSize(32) => Self::U32, + ir::BitSize(64) => Self::U64, + ir::BitSize(invalid) => panic!("Invalid register bit size {invalid}"), + } + } +} + +/// A register block. +/// +/// Any stride necessary to meet the requirements of a cluster array +/// are implicitly expressed with a final reservation member at the back +/// of the members collection. Meaning: the stride and size of the block +/// are equal. +/// +/// A single `Block` allocation holds the layout for all of its dependent +/// sub-blocks. To find sub-block layouts, scan the `members` for a block, +/// and recurse. +#[derive(Debug)] +struct Block<'a> { + /// Module name. + module: String, + /// Type documentation + doc: Option<&'a str>, + /// Members. + /// + /// If a block requires sub-blocks, they're contained within + /// this collection. + members: Members<'a>, +} - let span = Span::call_site(); - let mut items = TokenStream::new(); +/// Produces a collection of block members with reservations. +/// +/// This is where struct layout happens. +fn layout_members<'ir>( + items: &'ir [ir::BlockItem], + ir: &'ir ir::IR, + reservation_id: &mut usize, +) -> Members<'ir> { + let mut registers: Vec<_> = items + .iter() + .flat_map(|item| Member::expand(item, ir)) + .collect(); - for i in &b.items { - let name = Ident::new(&i.name, span); - let offset = i.byte_offset as usize; + // Order by their location in the block. + // + // If registers are at the same location, prefer the alias that + // is read-write. + registers.sort_by(|left, right| { + let offsets = left.offset().cmp(&right.offset()); + match (left, right) { + (Member::Register(left), Member::Register(right)) => { + offsets.then(left.access.cmp(&right.access)) + } + _ => offsets, + } + }); - let doc = util::doc(&i.description); + // Drop aliasing registers. + registers.dedup_by(|left, right| left.offset() == right.offset()); - match &i.inner { - BlockItemInner::Register(r) => { - let reg_ty = if let Some(fieldset_path) = &r.fieldset { - let _f = ir.fieldsets.get(fieldset_path).unwrap(); - util::relative_path(fieldset_path, path) - } else { - match r.bit_size { - BitSize(8) => quote!(u8), - BitSize(16) => quote!(u16), - BitSize(32) => quote!(u32), - BitSize(64) => quote!(u64), - BitSize(invalid) => panic!("Invalid register bit size {invalid}"), + // Insert reservations. + let mut members: Vec = Vec::new(); + for register in registers { + let offset = members + .last() + .map(|mem| mem.offset() + mem.size_bytes()) + .unwrap_or(0usize); + if offset != register.offset() { + assert!(register.offset() > offset); + members.push(Member::Reserved { + len: register.offset() - offset, + offset, + id: *reservation_id, + }); + *reservation_id += 1; + } + members.push(register) + } + + members +} + +/// Sanity check of the block layout. +/// +/// Panics if there's an issue. +#[cfg(debug_assertions)] +fn check_layout(members: &[Member], path: &str) { + // Expand registers, modeling each as a range. + type RegRange = std::ops::Range; + + fn recurse(members: &[Member], registers: &mut Vec, global_offset: usize) { + for member in members { + match member { + Member::Register(reg) => { + for idx in 0..reg.len { + registers.push(RegRange { + start: global_offset + reg.offset + idx * reg.size.bytes(), + end: global_offset + reg.offset + (idx + 1) * reg.size.bytes(), + }) } - }; + } + Member::Reserved { len, offset, .. } => registers.push(RegRange { + start: global_offset + *offset, + end: global_offset + *offset + *len, + }), + Member::Block { + block, len, offset, .. + } => { + for idx in 0..*len { + recurse( + &block.members, + registers, + global_offset + *offset + idx * block.size_bytes(), + ); + } + } + } + } + } - let access = match r.access { - Access::Read => quote!(#common_path::R), - Access::Write => quote!(#common_path::W), - Access::ReadWrite => quote!(#common_path::RW), - }; + let mut registers: Vec = Vec::new(); + recurse(members, &mut registers, 0); - let ty = quote!(#common_path::Reg<#reg_ty, #access>); - if let Some(array) = &i.array { - let (len, offs_expr) = super::process_array(array); - items.extend(quote!( - #doc - #[inline(always)] - pub fn #name(self, n: usize) -> #ty { - assert!(n < #len); - unsafe { #common_path::Reg::from_ptr(self.0.add(#offset + #offs_expr)) } - } - )); - } else { - items.extend(quote!( - #doc - #[inline(always)] - pub fn #name(self) -> #ty { - unsafe { #common_path::Reg::from_ptr(self.0.add(#offset)) } - } - )); + for (idx, reg) in registers.iter().enumerate() { + for (jdx, seg) in registers.iter().enumerate() { + if idx != jdx { + for r in reg.clone() { + if seg.contains(&r) { + panic!( + r#"{members:#?} +There's an issue in the '{path}' block layout(s). +This routine flattens registers from blocks, ensuring +that there's no register overlap. If you're reading this +panic message, it's because there's likely overlap. +The questionable block is printed above this message. +Evaluate its layout, and compare it with the SVD. +"# + ); + } } } - BlockItemInner::Block(b) => { - let block_path = &b.block; - let _b2 = ir.blocks.get(block_path).unwrap(); - let ty = util::relative_path(block_path, path); - if let Some(array) = &i.array { - let (len, offs_expr) = super::process_array(array); - - items.extend(quote!( - #doc - #[inline(always)] - pub fn #name(self, n: usize) -> #ty { - assert!(n < #len); - unsafe { #ty(self.0.add(#offset + #offs_expr)) } - } - )); + } + } +} + +impl<'ir> Block<'ir> { + /// Allocate a new block. + /// + /// `path` is the IR path, and `block` is the associated block. A stride + /// of `None` prevents the routine from inserting any padding at the back + /// of the block to meet a cluster stride. + fn new( + path: &'ir str, + block: &'ir ir::Block, + ir: &'ir ir::IR, + stride: Option, + ) -> Self { + let module = path.split("::").last().unwrap().to_lowercase(); + let mut reservation_id = 0usize; + let members = layout_members(&block.items, ir, &mut reservation_id); + + let mut block = Self { + module, + doc: block.description.as_ref().map(String::as_ref), + members, + }; + + // Jam some padding in the back to meet the stride. + if let Some(stride) = stride { + let size = block.size_bytes(); + assert!( + stride.get() >= size, + "Expecting that we need to insert padding or do nothing, but it seems we need to take it away...?" + ); + + let padding_bytes = stride.get() - size; + if padding_bytes > 0 { + block.members.push(Member::Reserved { + id: reservation_id, + len: padding_bytes, + offset: block + .members + .last() + .map(|mem| mem.offset() + mem.size_bytes()) + .unwrap_or(0usize), + }); + } + } + + #[cfg(debug_assertions)] + check_layout(&block.members, path); + + block + } +} + +impl Block<'_> { + fn size_bytes(&self) -> usize { + self.members.iter().map(|mem| mem.size_bytes()).sum() + } + fn render_into(&self, tokens: &mut TokenStream) { + let members = self + .members + .iter() + .fold(TokenStream::new(), |mut tokens, member| { + member.render_into(&mut tokens); + tokens + }); + let doc = util::doc(&self.doc.map(ToString::to_string)); + tokens.extend(quote! { + #doc + #[repr(C)] + pub struct RegisterBlock { + #members + } + }); + } + fn subblocks(&self) -> impl Iterator { + self.members.iter().filter_map(|mem| match mem { + Member::Block { block, .. } => Some(block), + _ => None, + }) + } + fn registers(&self) -> impl Iterator { + self.members.iter().filter_map(|mem| match mem { + Member::Register(reg) => Some(reg), + _ => None, + }) + } +} + +/// A register (array). +#[derive(Debug)] +struct Register<'a> { + /// Register name. + /// + /// Expands to the struct member name. + name: String, + /// Size of the register. + size: Size, + /// How may registers? + /// + /// If `len` is one, the implementation emits a scalar (non-array) + /// type. + len: usize, + /// Optional documentation. + doc: Option<&'a str>, + /// Access. + access: ir::Access, + /// Key to the associated fieldset. + fieldset: Option<&'a str>, + /// Offset of this register within the block. + offset: usize, +} + +/// A struct member. +#[derive(Debug)] +enum Member<'a> { + /// A useful register. + Register(Register<'a>), + /// A reserved register. + /// + /// Always a byte array with some `len` of bytes. + Reserved { + /// Arbitrary ID for the reserved register. + /// + /// Assigned when specifying the block. Only + /// used to generate a unique identifier for the + /// member name. + id: usize, + /// How many bytes to reserve. + len: usize, + /// Byte position in the block. + offset: usize, + }, + /// A cluster, or another register subblock. + Block { + /// Register layout for the block. + block: Block<'a>, + /// How many subblocks? Always greater than zero. + /// + /// If one, the implementation emits a single struct + /// instead of an array. + len: usize, + /// The member name. + /// + /// Differs from the module name; this expands to + /// the struct member identifier. + name: String, + /// The member documentation. + /// + /// Differs from the type documentation. + doc: Option<&'a str>, + /// Offset of this block within the parent block. + offset: usize, + }, +} +type Members<'a> = Vec>; + +impl<'ir> Member<'ir> { + /// Expands a block into one or more members. + /// + /// The returned collection is never empty. + fn expand(block_item: &'ir ir::BlockItem, ir: &'ir ir::IR) -> Vec { + let name = block_item.name.as_str(); + let offset = block_item.byte_offset as usize; + let doc = block_item.description.as_deref(); + + match (&block_item.array, &block_item.inner) { + // Individual register. + (None, ir::BlockItemInner::Register(reg)) => { + vec![Self::Register(Register { + name: name.into(), + size: Size::from_bit_size(reg.bit_size), + len: 1, + doc, + access: reg.access.clone(), + fieldset: reg.fieldset.as_deref(), + offset, + })] + } + // Array of registers with contiguous allocation. + ( + Some(ir::Array::Regular(ir::RegularArray { len, stride })), + ir::BlockItemInner::Register(reg), + ) if ir::BitSize(*stride * 8) == reg.bit_size => { + vec![Self::Register(Register { + name: name.into(), + size: Size::from_bit_size(reg.bit_size), + len: *len as usize, + doc, + access: reg.access.clone(), + fieldset: reg.fieldset.as_deref(), + offset, + })] + } + // "Array" of registers, but they're not contiguous. Describe them as + // individual registers. + ( + Some(ir::Array::Regular(ir::RegularArray { len, stride })), + ir::BlockItemInner::Register(reg), + ) => (0..*len as usize) + .map(|idx| { + Self::Register(Register { + name: format!("{name}{idx}"), + size: Size::from_bit_size(reg.bit_size), + len: 1, + doc, + access: reg.access.clone(), + fieldset: reg.fieldset.as_deref(), + offset: offset + idx * *stride as usize, + }) + }) + .collect(), + // A cluster. + ( + Some(ir::Array::Regular(ir::RegularArray { len, stride })), + ir::BlockItemInner::Block(ir::BlockItemBlock { block }), + ) => vec![Self::Block { + block: Block::new( + block, + ir.blocks.get(block).unwrap(), + ir, + NonZeroUsize::new(*stride as usize), + ), + len: *len as usize, + name: name.to_lowercase(), + doc, + offset, + }], + (Some(ir::Array::Cursed(_)), _) => { + panic!("Not yet handling a cursed array. I'd rather not spread the curse."); + } + (None, ir::BlockItemInner::Block(_)) => { + panic!("Unexpected cluster without a stride"); + } + } + } +} + +impl Member<'_> { + /// Returns the size of the member allocation. + fn size_bytes(&self) -> usize { + match self { + Self::Register(Register { size, len, .. }) => size.bytes() * len, + Self::Reserved { len, .. } => Size::U8.bytes() * len, + Self::Block { block, len, .. } => block.size_bytes() * *len, + } + } + + fn offset(&self) -> usize { + match self { + Self::Register(Register { offset, .. }) => *offset, + Self::Block { offset, .. } => *offset, + Self::Reserved { offset, .. } => *offset, + } + } + + /// Render this member into a token stream. + /// + /// This does not render a sub-block; it only inserts a member name and type + /// for the parent block (self). + fn render_into(&self, tokens: &mut TokenStream) { + match self { + Self::Reserved { id, len, .. } => { + assert!(*len > 0, "There's at least one reservation"); + let ty = Size::U8.type_token(); + let reservation = quote::format_ident!("_reserved{}", *id); + let len = util::hex(*len as u64); + tokens.extend(quote! { + #reservation: [#ty; #len], + }); + } + Self::Register(Register { + name, + size, + len, + doc, + access, + .. + }) => { + assert!(*len > 0, "There's at least one register"); + let register = match access { + ir::Access::Read => quote!(crate::RORegister), + ir::Access::Write => quote!(crate::WORegister), + ir::Access::ReadWrite => quote!(crate::RWRegister), + }; + let reg_ty = size.type_token(); + let ty = if *len == 1 { + quote!(#register<#reg_ty>) } else { - items.extend(quote!( - #doc - #[inline(always)] - pub fn #name(self) -> #ty { - unsafe { #ty(self.0.add(#offset)) } - } - )); - } + quote!([#register<#reg_ty>; #len]) + }; + let span = Span::call_site(); + let name = Ident::new(name, span); + let doc = util::doc(&doc.map(ToString::to_string)); + tokens.extend(quote! { + #doc + pub #name: #ty, + }) + } + Self::Block { + len, + name, + doc, + block, + .. + } => { + assert!(*len > 0, "There's at least one block"); + let span = Span::call_site(); + let scalar = Ident::new(&block.module, span); + let ty = if *len == 1 { + quote!(#scalar::RegisterBlock) + } else { + quote!([#scalar::RegisterBlock; #len]) + }; + let doc = util::doc(&doc.map(ToString::to_string)); + let field = Ident::new(&name.to_uppercase(), span); + tokens.extend(quote! { + #doc + pub #field: #ty, + }) } } } +} - let (_, name) = super::split_path(path); - let name = Ident::new(name, span); - let doc = util::doc(&b.description); - let out = quote! { - #doc - #[derive(Copy, Clone, Eq, PartialEq)] - pub struct #name (pub *mut u8); - unsafe impl Send for #name {} - unsafe impl Sync for #name {} - impl #name { - #items +/// Renders a block module, recursing to render all sub-block modules. +fn render_module(block: &Block, ir: &ir::IR) -> Result { + let mut tokens = TokenStream::new(); + block.render_into(&mut tokens); + block.registers().try_for_each(|reg| -> Result<()> { + if let Some(fieldset) = reg + .fieldset + .as_ref() + .and_then(|fieldset| ir.fieldsets.get(*fieldset)) + { + let span = Span::call_site(); + let name = Ident::new(®.name, span); + let doc = util::doc(®.doc.map(ToString::to_string)); + let field_modules = super::fieldset::render(ir, fieldset)?; + tokens.extend(quote! { + #doc + pub mod #name { + #field_modules + } + }); } - }; + Ok(()) + })?; + + block.subblocks().try_for_each(|block| -> Result<()> { + let block_mod = render_module(block, ir)?; + + let span = Span::call_site(); + let mod_name = Ident::new(&block.module, span); + tokens.extend(quote! { + pub mod #mod_name { + #block_mod + } + }); + Ok(()) + })?; + + Ok(tokens) +} - Ok(out) +pub fn render(ir: &ir::IR, b: &ir::Block, path: &str) -> Result { + let block = Block::new(path, b, ir, None); + render_module(&block, ir) } diff --git a/raltool/src/generate/common.rs b/raltool/src/generate/common.rs deleted file mode 100644 index 1b4282d98146..000000000000 --- a/raltool/src/generate/common.rs +++ /dev/null @@ -1,86 +0,0 @@ -use core::marker::PhantomData; - -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct RW; -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct R; -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct W; - -mod sealed { - use super::*; - pub trait Access {} - impl Access for R {} - impl Access for W {} - impl Access for RW {} -} - -pub trait Access: sealed::Access + Copy {} -impl Access for R {} -impl Access for W {} -impl Access for RW {} - -pub trait Read: Access {} -impl Read for RW {} -impl Read for R {} - -pub trait Write: Access {} -impl Write for RW {} -impl Write for W {} - -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct Reg { - ptr: *mut u8, - phantom: PhantomData<*mut (T, A)>, -} -unsafe impl Send for Reg {} -unsafe impl Sync for Reg {} - -impl Reg { - #[inline(always)] - pub fn from_ptr(ptr: *mut u8) -> Self { - Self { - ptr, - phantom: PhantomData, - } - } - - #[inline(always)] - pub fn ptr(&self) -> *mut T { - self.ptr as _ - } -} - -impl Reg { - #[inline(always)] - pub unsafe fn read(&self) -> T { - (self.ptr as *mut T).read_volatile() - } -} - -impl Reg { - #[inline(always)] - pub unsafe fn write_value(&self, val: T) { - (self.ptr as *mut T).write_volatile(val) - } -} - -impl Reg { - #[inline(always)] - pub unsafe fn write(&self, f: impl FnOnce(&mut T) -> R) -> R { - let mut val = Default::default(); - let res = f(&mut val); - self.write_value(val); - res - } -} - -impl Reg { - #[inline(always)] - pub unsafe fn modify(&self, f: impl FnOnce(&mut T) -> R) -> R { - let mut val = self.read(); - let res = f(&mut val); - self.write_value(val); - res - } -} diff --git a/raltool/src/generate/device.rs b/raltool/src/generate/device.rs index 1428a8b1427f..02c41da28bdc 100644 --- a/raltool/src/generate/device.rs +++ b/raltool/src/generate/device.rs @@ -1,3 +1,5 @@ +use std::collections::BTreeMap; + use anyhow::Result; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; @@ -5,7 +7,8 @@ use quote::quote; use crate::ir::*; use crate::util::{self, ToSanitizedUpperCase}; -pub fn render(_opts: &super::Options, ir: &IR, d: &Device, path: &str) -> Result { +pub fn render(_opts: &super::Options, _ir: &IR, d: &Device) -> Result { + let num_endings = regex::Regex::new(r"(\d+)$").unwrap(); let mut out = TokenStream::new(); let span = Span::call_site(); @@ -47,25 +50,110 @@ pub fn render(_opts: &super::Options, ir: &IR, d: &Device, path: &str) -> Result names.push(name_uc); } - for p in &d.peripherals { - let name = Ident::new(&p.name, span); - let address = util::hex(p.base_address as u64); - let doc = util::doc(&p.description); + let mut block_to_peripherals = BTreeMap::new(); + for peripheral in &d.peripherals { + let block_name = peripheral + .block + .as_ref() + .expect("All peripherals must have a block"); + let (block_path, _) = super::split_path(block_name); + let mod_name = block_path + .last() + .expect("There's a final component") + .to_string(); + block_to_peripherals + .entry(mod_name) + .or_insert_with(|| (block_path, Vec::new())) + .1 + .push(peripheral) + } - if let Some(block_name) = &p.block { - let _b = ir.blocks.get(block_name); - let path = util::relative_path(block_name, path); + for (mod_name, (block_path, periphs)) in block_to_peripherals { + let mut consts = TokenStream::new(); + for peripheral in periphs.iter() { + let name = Ident::new(&peripheral.name, span); + let address = util::hex(peripheral.base_address as u64); + let doc = util::doc(&peripheral.description); - peripherals.extend(quote! { - #doc - pub const #name: #path = #path(#address as u32 as _); - }); - } else { - peripherals.extend(quote! { + consts.extend(quote! { #doc - pub const #name: *mut () = #address as u32 as _; + pub const #name: *const RegisterBlock = #address as *const RegisterBlock; }); } + + let import = { + let block_path = block_path.join("/"); + const BLOCK_MOD: &str = super::BLOCK_MOD; + let module_path = format!("{BLOCK_MOD}/{block_path}.rs"); + quote! { + #[path = #module_path] + mod blocks; + pub use blocks::*; + } + }; + + let instances = if periphs.len() > 1 + && periphs + .iter() + .all(|periph| num_endings.is_match(&periph.name)) + { + let mut instances = TokenStream::new(); + for peripheral in periphs.iter() { + let name = Ident::new(&peripheral.name, span); + let num = num_endings.captures(&peripheral.name).unwrap(); + let num = util::unsuffixed( + num.get(1) + .and_then(|num| str::parse(num.as_str()).ok()) + .unwrap(), + ); + + instances.extend(quote! { + pub type #name = Instance<#num>; + impl crate::private::Sealed for #name {} + impl crate::Valid for #name {} + + impl #name { + pub const unsafe fn instance() -> Self { + Instance::new(#name) + } + } + }); + } + instances + } else { + assert!( + periphs.len() == 1, + r#"{periphs:#?} +Cannot generate this constified API when there's multiple, un-numbered peripherals. +The implementation doesn't automagically handle this right now. Until this is implemented, +you should use transforms to rename peripherals, putting numbers at the end of the peripheral +name."# + ); + let peripheral = periphs.first().unwrap(); + let name = Ident::new(&peripheral.name, span); + quote! { + pub type #name = Instance<{crate::SOLE_INSTANCE}>; + impl crate::private::Sealed for #name {} + impl crate::Valid for #name {} + impl #name { + pub const unsafe fn instance() -> Self { + Instance::new(#name) + } + } + } + }; + + let mod_name = Ident::new(&mod_name, span); + peripherals.extend(quote! { + #[path = "."] + pub mod #mod_name { + #consts + #import + + pub type Instance = crate::Instance; + #instances + } + }) } let n = util::unsuffixed(pos as u64); @@ -74,6 +162,7 @@ pub fn render(_opts: &super::Options, ir: &IR, d: &Device, path: &str) -> Result pub enum Interrupt { #interrupts } + pub type interrupt = Interrupt; unsafe impl cortex_m::interrupt::InterruptNumber for Interrupt { #[inline(always)] @@ -103,16 +192,13 @@ pub fn render(_opts: &super::Options, ir: &IR, d: &Device, path: &str) -> Result #peripherals )); - /* - if let Some(cpu) = d.cpu.as_ref() { - let bits = util::unsuffixed(u64::from(cpu.nvic_priority_bits)); + let cpu = d.cpu.as_ref().expect("There must be a CPU."); + let bits = util::unsuffixed(u64::from(cpu.nvic_priority_bits)); - out.extend(quote! { - ///Number available in the NVIC for configuring priority - pub const NVIC_PRIO_BITS: u8 = #bits; - }); - } - */ + out.extend(quote! { + ///Number available in the NVIC for configuring priority + pub const NVIC_PRIO_BITS: u8 = #bits; + }); Ok(out) } diff --git a/raltool/src/generate/enumm.rs b/raltool/src/generate/enumm.rs deleted file mode 100644 index ae24230e4f6d..000000000000 --- a/raltool/src/generate/enumm.rs +++ /dev/null @@ -1,47 +0,0 @@ -use anyhow::Result; -use proc_macro2::TokenStream; -use proc_macro2::{Ident, Span}; -use quote::quote; - -use crate::ir::*; -use crate::util; - -pub fn render(_opts: &super::Options, _ir: &IR, e: &Enum, path: &str) -> Result { - let span = Span::call_site(); - let mut items = TokenStream::new(); - - let ty = match e.bit_size { - BitSize(1..=8) => quote!(u8), - BitSize(9..=16) => quote!(u16), - BitSize(17..=32) => quote!(u32), - BitSize(33..=64) => quote!(u64), - BitSize(invalid) => panic!("Invalid bit_size {invalid}"), - }; - - for f in &e.variants { - let name = Ident::new(&f.name, span); - let value = util::hex(f.value); - let doc = util::doc(&f.description); - items.extend(quote!( - #doc - pub const #name: Self = Self(#value); - )); - } - - let (_, name) = super::split_path(path); - let name = Ident::new(name, span); - let doc = util::doc(&e.description); - - let out = quote! { - #doc - #[repr(transparent)] - #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] - pub struct #name (pub #ty); - - impl #name { - #items - } - }; - - Ok(out) -} diff --git a/raltool/src/generate/fieldset.rs b/raltool/src/generate/fieldset.rs index a1fcfe337565..6d75788f5520 100644 --- a/raltool/src/generate/fieldset.rs +++ b/raltool/src/generate/fieldset.rs @@ -6,7 +6,7 @@ use quote::quote; use crate::ir::*; use crate::util; -pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Result { +pub fn render(ir: &IR, fs: &FieldSet) -> Result { let span = Span::call_site(); let mut items = TokenStream::new(); @@ -15,107 +15,55 @@ pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Res BitSize(9..=16) => quote!(u16), BitSize(17..=32) => quote!(u32), BitSize(33..=64) => quote!(u64), - BitSize(invalid) => panic!("Invalid bit_size {invalid}"), + BitSize(invalid) => anyhow::bail!("Invalid bit_size {invalid}"), }; for f in &fs.fields { + anyhow::ensure!( + f.array.is_none(), + "Field {} is an array, and that's not supported", + f.name + ); + let name = Ident::new(&f.name, span); - let name_set = Ident::new(&format!("set_{}", f.name), span); - let bit_offset = f.bit_offset as usize; + let bit_offset = proc_macro2::Literal::u32_unsuffixed(f.bit_offset); let mask = util::hex(1u64.wrapping_shl(f.bit_size.0).wrapping_sub(1)); let doc = util::doc(&f.description); - let field_ty: TokenStream; - let to_bits: TokenStream; - let from_bits: TokenStream; - if let Some(e_path) = &f.enum_readwrite { - let e = ir.enums.get(e_path).unwrap(); + let enum_tokenize = |enm: &Option| -> TokenStream { + enm.as_ref() + .and_then(|path| ir.enums.get(path)) + .map(|enm| { + let mut items = TokenStream::new(); + for e in &enm.variants { + let name = Ident::new(&e.name, span); + let value = util::hex(e.value); + let doc = util::doc(&e.description); + items.extend(quote!( + #doc + pub const #name: #ty = #value; + )); + } + items + }) + .unwrap_or_else(TokenStream::new) + }; - let enum_ty = match e.bit_size { - BitSize(1..=8) => quote!(u8), - BitSize(9..=16) => quote!(u16), - BitSize(17..=32) => quote!(u32), - BitSize(33..=64) => quote!(u64), - BitSize(invalid) => panic!("Invalid bit_size {invalid}"), - }; + let reads = enum_tokenize(&f.enum_read); + let writes = enum_tokenize(&f.enum_write); + let reads_writes = enum_tokenize(&f.enum_readwrite); - field_ty = util::relative_path(e_path, path); - to_bits = quote!(val.0 as #ty); - from_bits = quote!(#field_ty(val as #enum_ty)); - } else { - field_ty = match f.bit_size { - BitSize(1) => quote!(bool), - BitSize(2..=8) => quote!(u8), - BitSize(9..=16) => quote!(u16), - BitSize(17..=32) => quote!(u32), - BitSize(33..=64) => quote!(u64), - BitSize(invalid) => panic!("Invalid bit_size {invalid}"), - }; - to_bits = quote!(val as #ty); - from_bits = if f.bit_size == BitSize(1) { - quote!(val != 0) - } else { - quote!(val as #field_ty) + items.extend(quote! { + #doc + pub mod #name { + pub const offset: #ty = #bit_offset; + pub const mask: #ty = #mask << offset; + pub mod R { #reads } + pub mod W { #writes } + pub mod RW { #reads_writes } } - } - - if let Some(array) = &f.array { - let (len, offs_expr) = super::process_array(array); - items.extend(quote!( - #doc - #[inline(always)] - pub fn #name(&self, n: usize) -> #field_ty{ - assert!(n < #len); - let offs = #bit_offset + #offs_expr; - let val = (self.0 >> offs) & #mask; - #from_bits - } - #doc - #[inline(always)] - pub fn #name_set(&mut self, n: usize, val: #field_ty) { - assert!(n < #len); - let offs = #bit_offset + #offs_expr; - self.0 = (self.0 & !(#mask << offs)) | (((#to_bits) & #mask) << offs); - } - )); - } else { - items.extend(quote!( - #doc - #[inline(always)] - pub const fn #name(&self) -> #field_ty{ - let val = (self.0 >> #bit_offset) & #mask; - #from_bits - } - #doc - #[inline(always)] - pub fn #name_set(&mut self, val: #field_ty) { - self.0 = (self.0 & !(#mask << #bit_offset)) | (((#to_bits) & #mask) << #bit_offset); - } - )); - } + }); } - let (_, name) = super::split_path(path); - let name = Ident::new(name, span); - let doc = util::doc(&fs.description); - - let out = quote! { - #doc - #[repr(transparent)] - #[derive(Copy, Clone, Eq, PartialEq)] - pub struct #name (pub #ty); - - impl #name { - #items - } - - impl Default for #name { - #[inline(always)] - fn default() -> #name { - #name(0) - } - } - }; - - Ok(out) + Ok(quote! { #items }) } diff --git a/raltool/src/generate/mod.rs b/raltool/src/generate/mod.rs index 227283ff4a99..e2374f39717d 100644 --- a/raltool/src/generate/mod.rs +++ b/raltool/src/generate/mod.rs @@ -1,21 +1,23 @@ mod block; mod device; -mod enumm; mod fieldset; use anyhow::Result; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use std::collections::HashMap; -use std::str::FromStr; +use std::collections::{BTreeMap, HashSet}; +use std::fs; +use std::path::{Path, PathBuf}; use crate::ir::*; -pub const COMMON_MODULE: &[u8] = include_bytes!("common.rs"); - struct Module { items: TokenStream, - children: HashMap, + children: BTreeMap, + public: bool, + fs_only: bool, + reexport: bool, + conditional_feature: Option, } impl Module { @@ -23,10 +25,34 @@ impl Module { Self { // Default mod contents items: quote!(), - children: HashMap::new(), + children: BTreeMap::new(), + public: true, + fs_only: false, + reexport: false, + conditional_feature: None, } } + fn mark_private(&mut self) -> &mut Module { + self.public = false; + self + } + + fn mark_fs_only(&mut self) -> &mut Module { + self.fs_only = true; + self + } + + fn mark_reexport(&mut self) -> &mut Module { + self.reexport = true; + self + } + + fn conditional_on(&mut self, feature: &str) -> &mut Module { + self.conditional_feature = Some(feature.into()); + self + } + fn get_by_path(&mut self, path: &[&str]) -> &mut Module { if path.is_empty() { return self; @@ -38,7 +64,7 @@ impl Module { .get_by_path(&path[1..]) } - fn render(self) -> Result { + fn render(self, path: &Path) -> Result<()> { let span = Span::call_site(); let mut res = TokenStream::new(); @@ -46,14 +72,52 @@ impl Module { for (name, module) in self.children.into_iter() { let name = Ident::new(&name, span); - let contents = module.render()?; - res.extend(quote! { - pub mod #name { - #contents + + let subpath = if let Some(parent) = path.parent() { + if path.file_name() == Some(std::ffi::OsStr::new("lib.rs")) { + parent.join(format! {"{name}.rs"}) + } else { + parent + .join(path.file_stem().unwrap()) + .join(format!("{name}.rs")) } - }); + } else { + PathBuf::from(format!("{name}.rs")) + }; + + if !module.fs_only { + let privacy = if module.public { quote!(pub) } else { quote!() }; + let conditional = if let Some(feature) = &module.conditional_feature { + quote!(#[cfg(feature = #feature)]) + } else { + quote!() + }; + let reexport = if module.reexport { + quote!(pub use #name::*;) + } else { + quote!() + }; + module.render(&subpath)?; + let file_path = format!("{name}.rs"); + res.extend(quote! { + #conditional + #[path = #file_path] + #privacy mod #name; + #conditional + #reexport + }); + } else { + module.render(&subpath)?; + } + } + + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + if !self.fs_only { + fs::write(path, res.to_string().as_bytes())?; } - Ok(res) + Ok(()) } } @@ -63,84 +127,81 @@ pub enum CommonModule { } pub struct Options { - pub common_module: CommonModule, + pub module_root: PathBuf, } -impl Options { - fn common_path(&self) -> TokenStream { - match &self.common_module { - CommonModule::Builtin => TokenStream::from_str("crate::common").unwrap(), - CommonModule::External(path) => path.clone(), - } - } -} +const BLOCK_MOD: &str = "blocks"; -pub fn render(ir: &IR, opts: &Options) -> Result { +pub fn render(ir: &IR, opts: &Options) -> Result<()> { let mut root = Module::new(); root.items = TokenStream::new(); // Remove default contents + root.get_by_path(&[BLOCK_MOD]).mark_fs_only(); - let commit_info = { - let tmp = include_str!(concat!(env!("OUT_DIR"), "/commit-info.txt")); + root.items.extend(quote!( + #![no_std] + #![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)] + + pub use ral_registers::{RWRegister, RORegister, WORegister, read_reg, write_reg, modify_reg}; - if tmp.is_empty() { - " (untracked)" - } else { - tmp + pub struct Instance { + ptr: *const T, } - }; - let doc = format!( - "Peripheral access API (generated using chiptool v{}{})", - env!("CARGO_PKG_VERSION"), - commit_info - ); + impl core::ops::Deref for Instance { + type Target = T; + fn deref(&self) -> &Self::Target { + unsafe { &*self.ptr } + } + } - root.items.extend(quote!( - #![no_std] - #![doc=#doc] + impl Instance { + pub const unsafe fn new(ptr: *const T) -> Self { + Self { ptr } + } + } + + unsafe impl Send for Instance {} + + pub const SOLE_INSTANCE: u8 = 0u8; + mod private { + pub trait Sealed {} + } + pub trait Valid : private::Sealed {} )); + let mut root_blocks = HashSet::new(); for (p, d) in ir.devices.iter() { - let (mods, _) = split_path(p); + root_blocks.extend( + d.peripherals + .iter() + .filter_map(|peripheral| peripheral.block.as_ref()), + ); + let mods = p.split("::").collect::>(); root.get_by_path(&mods) .items - .extend(device::render(opts, ir, d, p)?); + .extend(device::render(opts, ir, d)?); } - for (p, b) in ir.blocks.iter() { - let (mods, _) = split_path(p); - root.get_by_path(&mods) + for root_block in root_blocks { + let b = ir.blocks.get(root_block).unwrap(); + let (mods, _) = split_path(root_block); + root.get_by_path(&[BLOCK_MOD]) + .get_by_path(&mods) .items - .extend(block::render(opts, ir, b, p)?); + .extend(block::render(ir, b, root_block)?); } - for (p, fs) in ir.fieldsets.iter() { - let (mods, _) = split_path(p); - root.get_by_path(&mods) - .items - .extend(fieldset::render(opts, ir, fs, p)?); + for (dev_mod_name, dev_mod) in root.children.iter_mut().filter(|(k, _)| *k != BLOCK_MOD) { + dev_mod + .mark_private() + .conditional_on(dev_mod_name) + .mark_reexport(); } - - for (p, e) in ir.enums.iter() { - let (mods, _) = split_path(p); - root.get_by_path(&mods) - .items - .extend(enumm::render(opts, ir, e, p)?); + for block_dev_mod in root.get_by_path(&[BLOCK_MOD]).children.values_mut() { + block_dev_mod.mark_fs_only(); } - match &opts.common_module { - CommonModule::Builtin => { - let tokens = - TokenStream::from_str(std::str::from_utf8(COMMON_MODULE).unwrap()).unwrap(); - - let module = root.get_by_path(&["common"]); - module.items = TokenStream::new(); // Remove default contents - module.items.extend(tokens); - } - CommonModule::External(_) => {} - } - - root.render() + root.render(&opts.module_root) } fn split_path(s: &str) -> (Vec<&str>, &str) { @@ -148,24 +209,3 @@ fn split_path(s: &str) -> (Vec<&str>, &str) { let n = v.pop().unwrap(); (v, n) } - -fn process_array(array: &Array) -> (usize, TokenStream) { - match array { - Array::Regular(array) => { - let len = array.len as usize; - let stride = array.stride as usize; - let offs_expr = quote!(n*#stride); - (len, offs_expr) - } - Array::Cursed(array) => { - let len = array.offsets.len(); - let offsets = array - .offsets - .iter() - .map(|&x| x as usize) - .collect::>(); - let offs_expr = quote!(([#(#offsets),*][n] as usize)); - (len, offs_expr) - } - } -} diff --git a/raltool/src/ir.rs b/raltool/src/ir.rs index ea7a717e0459..8dff903a2d4b 100644 --- a/raltool/src/ir.rs +++ b/raltool/src/ir.rs @@ -29,10 +29,17 @@ impl IR { } } +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Cpu { + pub nvic_priority_bits: u32, +} + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Device { pub peripherals: Vec, pub interrupts: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cpu: Option, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -128,7 +135,7 @@ pub struct BlockItemBlock { pub block: String, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] pub enum Access { ReadWrite, Read, diff --git a/raltool/src/main.rs b/raltool/src/main.rs index 42f2ca7ed640..53b9a0c8fd61 100755 --- a/raltool/src/main.rs +++ b/raltool/src/main.rs @@ -1,12 +1,14 @@ #![recursion_limit = "128"] use anyhow::{bail, Context, Result}; -use chiptool::{generate, svd2ir}; +use chiptool::{combine, generate, svd2ir}; use clap::Parser; use log::*; +use rayon::prelude::*; use regex::Regex; use std::fs; use std::io::Read; +use std::path::PathBuf; use std::{fs::File, io::stdout}; use chiptool::ir::IR; @@ -59,12 +61,14 @@ struct Transform { /// Generate a PAC directly from a SVD #[derive(Parser)] struct Generate { - /// SVD file path - #[clap(long)] - svd: String, + /// SVD file path(s) + svds: Vec, /// Transforms file path #[clap(long)] transform: Option, + /// Directory for the output. + #[clap(long, default_value_t = String::from("src"))] + output_directory: String, } /// Reformat a YAML @@ -168,29 +172,40 @@ fn extract_peripheral(args: ExtractPeripheral) -> Result<()> { Ok(()) } -fn gen(args: Generate) -> Result<()> { +fn gen(mut args: Generate) -> Result<()> { let config = match args.transform { Some(s) => load_config(&s)?, None => Config::default(), }; - let svd = load_svd(&args.svd)?; - let mut ir = svd2ir::convert_svd(&svd)?; - // Fix weird newline spam in descriptions. let re = Regex::new("[ \n]+").unwrap(); - chiptool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; - for t in &config.transforms { - info!("running: {:?}", t); - t.run(&mut ir)?; - } + args.svds.sort_unstable(); + let irs: Vec = args + .svds + .par_iter() + .map(|svd| -> Result { + let svd = load_svd(&svd)?; + let mut ir = svd2ir::convert_svd(&svd)?; + + chiptool::transform::map_descriptions(&mut ir, |d| { + re.replace_all(d, " ").into_owned() + })?; + + for t in &config.transforms { + t.run(&mut ir)?; + } + Ok(ir) + }) + .collect::>()?; let generate_opts = generate::Options { - common_module: generate::CommonModule::Builtin, + module_root: PathBuf::from(args.output_directory).join("lib.rs"), }; - let items = generate::render(&ir, &generate_opts).unwrap(); - fs::write("lib.rs", items.to_string())?; + + let combination = combine::combine(&irs); + generate::render(&combination, &generate_opts)?; Ok(()) } @@ -321,11 +336,9 @@ fn gen_block(args: GenBlock) -> Result<()> { chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); let generate_opts = generate::Options { - common_module: generate::CommonModule::Builtin, + module_root: std::path::PathBuf::from(&args.output), }; - let items = generate::render(&ir, &generate_opts).unwrap(); - fs::write(&args.output, items.to_string())?; - + generate::render(&ir, &generate_opts)?; Ok(()) } #[derive(serde::Serialize, serde::Deserialize)] diff --git a/raltool/src/svd2ir.rs b/raltool/src/svd2ir.rs index 28831e5928ba..993df3df8607 100644 --- a/raltool/src/svd2ir.rs +++ b/raltool/src/svd2ir.rs @@ -267,10 +267,13 @@ pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<() pub fn convert_svd(svd: &svd::Device) -> anyhow::Result { let mut ir = IR::new(); - + let cpu = svd.cpu.as_ref().map(|cpu| Cpu { + nvic_priority_bits: cpu.nvic_priority_bits, + }); let mut device = Device { peripherals: vec![], interrupts: vec![], + cpu, }; for p in &svd.peripherals { diff --git a/raltool/src/transform/mod.rs b/raltool/src/transform/mod.rs index 7ff21c685739..96011440b453 100644 --- a/raltool/src/transform/mod.rs +++ b/raltool/src/transform/mod.rs @@ -16,8 +16,8 @@ impl Sanitize { NameKind::Block => *p = sanitize_path(p), NameKind::Fieldset => *p = sanitize_path(p), NameKind::Enum => *p = sanitize_path(p), - NameKind::BlockItem => *p = p.to_sanitized_snake_case().to_string(), - NameKind::Field => *p = p.to_sanitized_snake_case().to_string(), + NameKind::BlockItem => *p = p.to_sanitized_upper_case().to_string(), + NameKind::Field => *p = p.to_sanitized_upper_case().to_string(), NameKind::EnumVariant => *p = p.to_sanitized_upper_case().to_string(), }); Ok(()) diff --git a/raltool/src/util.rs b/raltool/src/util.rs index 5e1995583a68..c0a89a914f9f 100644 --- a/raltool/src/util.rs +++ b/raltool/src/util.rs @@ -272,7 +272,6 @@ pub fn build_rs() -> TokenStream { } } } - /// Return a relative path to access a from b. pub fn relative_path(a: &str, b: &str) -> TokenStream { let a: Vec<&str> = a.split("::").collect(); @@ -304,6 +303,15 @@ pub fn relative_path(a: &str, b: &str) -> TokenStream { res } +pub fn absolute_path(path: &str) -> TokenStream { + path.split("::") + .map(|part| Ident::new(part, Span::call_site())) + .fold(quote!(crate), |mut path, ident| { + path.extend(quote!(::#ident)); + path + }) +} + pub fn doc(doc: &Option) -> TokenStream { if let Some(doc) = doc { let doc = doc.replace("\\n", "\n"); From fac30eaa2eed51b131d88937297dbc2747ef2e2d Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Thu, 22 Sep 2022 12:17:06 -0400 Subject: [PATCH 08/15] Generate weak symbols for interrupt handlers --- raltool/src/generate/mod.rs | 36 ++++++++++++++++++++++++++++++++++-- raltool/src/main.rs | 2 ++ 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/raltool/src/generate/mod.rs b/raltool/src/generate/mod.rs index e2374f39717d..658428acf735 100644 --- a/raltool/src/generate/mod.rs +++ b/raltool/src/generate/mod.rs @@ -6,8 +6,9 @@ use anyhow::Result; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; use std::collections::{BTreeMap, HashSet}; -use std::fs; +use std::io::Write; use std::path::{Path, PathBuf}; +use std::{fs, io}; use crate::ir::*; @@ -128,6 +129,7 @@ pub enum CommonModule { pub struct Options { pub module_root: PathBuf, + pub weak_syms: bool, } const BLOCK_MOD: &str = "blocks"; @@ -201,7 +203,9 @@ pub fn render(ir: &IR, opts: &Options) -> Result<()> { block_dev_mod.mark_fs_only(); } - root.render(&opts.module_root) + root.render(&opts.module_root)?; + weak_syms(opts, ir)?; + Ok(()) } fn split_path(s: &str) -> (Vec<&str>, &str) { @@ -209,3 +213,31 @@ fn split_path(s: &str) -> (Vec<&str>, &str) { let n = v.pop().unwrap(); (v, n) } + +/// Generate a linker script of weak symbols for interrupt handlers. +fn weak_syms(opts: &Options, ir: &IR) -> Result<()> { + if !opts.weak_syms { + return Ok(()); + } + + for (name, device) in &ir.devices { + if name.is_empty() { + continue; + } + + let mut interrupts = device.interrupts.clone(); + interrupts.sort_by_key(|intr| intr.value); + + let mut path = opts.module_root.parent().unwrap().join(name); + path.set_extension("x"); + + let file = fs::File::create(path)?; + let mut file = io::BufWriter::new(file); + + for intr in interrupts { + writeln!(file, "PROVIDE({} = DefaultHandler);", intr.name)?; + } + } + + Ok(()) +} diff --git a/raltool/src/main.rs b/raltool/src/main.rs index 53b9a0c8fd61..cd6c8c6d687a 100755 --- a/raltool/src/main.rs +++ b/raltool/src/main.rs @@ -202,6 +202,7 @@ fn gen(mut args: Generate) -> Result<()> { let generate_opts = generate::Options { module_root: PathBuf::from(args.output_directory).join("lib.rs"), + weak_syms: true, }; let combination = combine::combine(&irs); @@ -337,6 +338,7 @@ fn gen_block(args: GenBlock) -> Result<()> { let generate_opts = generate::Options { module_root: std::path::PathBuf::from(&args.output), + weak_syms: false, }; generate::render(&ir, &generate_opts)?; Ok(()) From 3015485c94915d06e9f4cf751a4e4069dab86b59 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Thu, 22 Sep 2022 13:37:50 -0400 Subject: [PATCH 09/15] Only emit link section for embedded builds --- raltool/src/generate/device.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/raltool/src/generate/device.rs b/raltool/src/generate/device.rs index 02c41da28bdc..a39a45715b83 100644 --- a/raltool/src/generate/device.rs +++ b/raltool/src/generate/device.rs @@ -182,7 +182,7 @@ name."# _reserved: u32, } - #[link_section = ".vector_table.interrupts"] + #[cfg_attr(target_os = "none", link_section = ".vector_table.interrupts")] #[no_mangle] pub static __INTERRUPTS: [Vector; #n] = [ #vectors From 92894b273d73997fab0a4adc67d438a564c09bab Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Thu, 22 Sep 2022 11:35:54 -0400 Subject: [PATCH 10/15] Rebrand as raltool --- raltool/Cargo.lock | 36 ++--- raltool/Cargo.toml | 3 +- raltool/LICENSE-MIT | 2 + raltool/README.md | 355 +++++++++++++------------------------------- raltool/src/main.rs | 23 ++- 5 files changed, 138 insertions(+), 281 deletions(-) diff --git a/raltool/Cargo.lock b/raltool/Cargo.lock index 76b8c0f0f73d..58d38336771c 100644 --- a/raltool/Cargo.lock +++ b/raltool/Cargo.lock @@ -46,24 +46,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "chiptool" -version = "0.1.0" -dependencies = [ - "anyhow", - "clap", - "env_logger", - "inflections", - "log", - "proc-macro2", - "quote", - "rayon", - "regex", - "serde", - "serde_yaml", - "svd-parser", -] - [[package]] name = "clap" version = "3.1.6" @@ -310,6 +292,24 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "raltool" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "env_logger", + "inflections", + "log", + "proc-macro2", + "quote", + "rayon", + "regex", + "serde", + "serde_yaml", + "svd-parser", +] + [[package]] name = "rayon" version = "1.5.1" diff --git a/raltool/Cargo.toml b/raltool/Cargo.toml index 5854963a6bc4..8dfc00fc1e49 100644 --- a/raltool/Cargo.toml +++ b/raltool/Cargo.toml @@ -1,8 +1,9 @@ [package] -name = "chiptool" +name = "raltool" license = "MIT OR Apache-2.0" version = "0.1.0" edition = "2021" +publish = false [dependencies] clap = { version = "3.1.6", features = ["derive"] } diff --git a/raltool/LICENSE-MIT b/raltool/LICENSE-MIT index a43445e6cd1b..bdc2e8383836 100644 --- a/raltool/LICENSE-MIT +++ b/raltool/LICENSE-MIT @@ -1,4 +1,6 @@ Copyright (c) 2016 Jorge Aparicio +Copyright (c) 2021 Embassy project contributors +Copyright (c) 2022 imxrt-rs project contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/raltool/README.md b/raltool/README.md index d8dbf2b741e5..fdf7c58d453b 100644 --- a/raltool/README.md +++ b/raltool/README.md @@ -1,260 +1,117 @@ -# chiptool - -`chiptool` is an experimental fork of `svd2rust` to experiment with: - -- Different API for the generated code. -- Integrating "transforms" in the generation process -- New workflow for storing register definitions in standalone YAML files. - -## Example - -Tested with the RP2040 SVD. Other SVDs might not work quite right yet. - -- svd: https://github.com/Dirbaio/svd2rust/blob/master/svd/rp2040.svd -- yaml: https://github.com/Dirbaio/svd2rust/blob/master/svd/rp2040.yaml -- repo: https://github.com/Dirbaio/rp2040-pac/settings -- docs: https://dirbaio.github.io/rp2040-pac/rp2040_pac/index.html - -## Changes from svd2rust main - -### No owned structs - -Original svd2rust generates an owned struct for each peripheral. This has turned out to have some severe downsides: - -1. there are many cases where the HAL wants to "split up" a peripheral into multiple owned parts. Examples: - - Many pins in a GPIO port peripheral. - - The RX and TX halfs of a UART peripheral. - - Different clocks/PLLs in a clock control peripheral. - - Channels/streams in a DMA controller - - PWM channels - - Virtually all existing HALs run into this issue, and have to unsafely bypass the ownership rules. [nrf gpio](https://github.com/nrf-rs/nrf-hal/blob/6fc5061509d5f3efaa2db15d4af7e3bced4a2e83/nrf-hal-common/src/gpio.rs#L135), [nrf i2c](https://github.com/nrf-rs/nrf-hal/blob/1d6e228f11b7df3847d33d66b01ff772501beb3c/nrf-hal-common/src/twi.rs#L28), [nrf ppi](https://github.com/nrf-rs/nrf-hal/blob/8a28455ab93eb47be4e4edb62ebe96939e1a7ebd/nrf-hal-common/src/ppi/mod.rs#L122), [stm32f4 gpio](https://github.com/stm32-rs/stm32f4xx-hal/blob/9b6aad4b3365a48ae652c315730ab47522e57cfb/src/gpio.rs#L302), [stm32f4 dma](https://github.com/stm32-rs/stm32f4xx-hal/blob/9b6aad4b3365a48ae652c315730ab47522e57cfb/src/dma/mod.rs#L359), [stm32f4 pwm](https://github.com/stm32-rs/stm32f4xx-hal/blob/bb214b6017d84a9c8dd2e8c9fd1f915141e167cc/src/pwm.rs#L228), [atsamd gpio](https://github.com/atsamd-rs/atsamd/blob/4816bb13a12a604e51f929d17b286071a0082c82/hal/src/common/gpio/v2/pin.rs#L669) ... - - Since HALs in practice always bypass the PAC ownership rules and create their own safe abstractions, there's not much advantage in having ownership rules in the PAC in the first place. Not having them makes HAL code cleaner. - -2. sometimes "ownership" is not so clear-cut: - - Multicore. Some peripherals are "core-local", they have an instance per core. Constant address, which instance you access depends on which core you're running on. For example Cortex-M core peripherals, and SIO in RP2040. - - Mutually-exclusive peripherals. In nRF you can only use one of (UART0, SPIM0, SPIS0, TWIM0, TWIS0) at the same time, one of (UART1, SPIM1, SPIS1, TWIM1, TWIS1) at the same time... They're the same peripheral in different "modes". Current nRF PACs get this wrong, allowing you to use e.g. SPIM0 and TWIM0 at the same time, which breaks. -3. Ownership in PACs means upgrading the PAC is ALWAYS a breaking change. - - To guarantee you can't get two singletons for the same peripheral, PACs deliberately sabotage building a binary containing two PAC major versions (with this [no\_mangle thing](https://github.com/nrf-rs/nrf-pacs/blob/8f9da05ca1b496bd743f223ed1122dfe9220956c/pacs/nrf52840-pac/src/lib.rs#L2279-L2280)). - - This means the HAL major-bumping the PAC dep version is a breaking change, so the HAL would have to be major-bumped as well. And all PAC bumps are breaking, and they're VERY common... - -### All register access is unsafe - -Reasons: - -- Since there are no owned structs, there can be data races when writing to a register from multiple contexts (eg main thread and interrupt). Ensuring no data races is left to the HALs (HALs are already doing this anyway, see above) -- DMA registers can be turned into arbitrary pointer dereferencing. -- Controls for low-level chip features such as RAM power control or clock control can break safety in interesting ways. - -### Structs representing register values (sets of fields) - -Current svd2rust provides "read proxy" and "write proxy" structs with methods to access register fields when reading/writing. However: - -- There's no type-safe way to save the _value_ of a register in a variable to write later. (there's `.bits()`, but it's not typesafe) -- There's no way to read/modify register fields on a saved value (if using `.bits()`, the user has a raw u32, they need to extract the fields manually with bitwise manipulation) - -Solution: for each register with fields, a "fieldset" struct is generated. This struct wraps the raw `u32` and allows getting/setting individual fields. - -```rust -let val = pac::watchdog::fields::Tick(0); -val.set_cycles(XOSC_MHZ as u16); -val.set_enable(true); -info!("enabled: {:bool}", val.enable()); -``` - -On a register, `.read()` and `.write_value()` can get and set such fieldset values: - -```rust -let val = pac::WATCHDOG.tick().read(); -val.set_enable(false); -// We could save val in a variable somewhere else -// then get it and write it back later -pac::WATCHDOG.tick().write_value(val); -``` - -Closure-based `.write()` and `.modify()` are provided too, like the current svd2rust. - -```rust -pac::WATCHDOG.tick().write(|w| { - w.set_cycles(XOSC_MHZ as u16); - w.set_enable(true); -}); -``` - -### Structs representing enumerated values - -For each EnumeratedValues in a field, a struct is generated. - -This struct is _not_ a Rust enum, it is a struct with associated constants. - -### Possibility to share items (blocks, fieldsets, enums) - -Many peripherals have multiple registers with the same fields (same names, same bit offsets). This tool allows the user to merge them via YAML config. Same for enums and register blocks. - -Fieldsets and enums can be shared across different registers, different register blocks, even different peripherals. - -Example: the RP2040 chip has two GPIO banks: `BANK0` and `QSPI`. These share many enums and field sets. Example of merging some: - -```yaml -- MergeEnums: - from: io_[^:]+::values::Gpio.+Ctrl(.+)over - to: io::values::${1}over -``` - -This merges all `INOVER`, `OUTOVER`, `OEOVER` and `IRQOVER` enums (144 enums!) into just 4. - -- huge reduction in generated code, mitigating long compile times which is one of the top complaints of current PACs. -- Better code sharing in HALs since they can use a single enum/fieldset to read/write to multiple registers. - -### Automatic cluster creation - -```yaml -- MakeBlock: - block: pio0::Pio0 - from: sm(\d+)_(.+) - to_outer: sm$1 - to_inner: $2 - to_block: pio0::StateMachine -``` - -This collapses all `smX_*` registers into a single cluster: - - // before: - RegisterBlock: - sm0_clkdiv - sm0_execctrl - sm0_shiftctrl - sm0_addr - sm0_instr - sm0_pinctrl - sm1_clkdiv - sm1_execctrl - sm1_shiftctrl - sm1_addr - sm1_instr - sm1_pinctrl - sm2_clkdiv - sm2_execctrl - sm2_shiftctrl - sm2_addr - sm2_instr - sm2_pinctrl - sm3_clkdiv - sm3_execctrl - sm3_shiftctrl - sm3_addr - sm3_instr - sm3_pinctrl - - // after: - RegisterBlock: - sm0 - sm1 - sm2 - sm3 - - StateMachine block: - clkdiv - execctrl - shiftctrl - addr - instr - pinctrl - -### Automatic array creation - -example: - -```yaml -- MakeRegisterArray: - block: pio0::Pio0 - from: sm\d+ - to: sm -``` - - // before: - RegisterBlock: - sm0 - sm1 - sm2 - sm3 - - // after: - RegisterBlock: - sm (array of length 4) - -### RegisterBlocks and Registers wrap pointers - -```rust -// a RegisterBlock -pub struct Resets(*mut u8); - -impl Resets { - // A register access function. This is just pointer arithmetic - pub fn reset_done(self) -> Reg { - unsafe { Reg::new(self.0.add(8usize))) } - } -} - -// the Reg struct -pub struct Reg { - ptr: *mut u8, - ... -} +raltool +======= + +`raltool` is a fork of [`chiptool`][], which is itself a fork of [`svd2rust`][]. +The register access layer (RAL) generated by `raltool` strives to compile faster +than anything generated by `chiptool` or `svd2rust`, supporting the needs of +Rust projects that work with very large SVDs. + +`raltool` is an experiment to support the [`imxrt-ral`][] project. The +tool's interface and features do not vary much from [`chiptool`][], so see +that project's documentation for more information. However, the +`raltool`-generated code is different than both `chiptool`- and +`svd2rust`-generated code. For more information on the `raltool`-generated API, +see the [`imxrt-ral`][] project. + + [`chiptool`]: https://github.com/embassy-rs/chiptool + [`svd2rust`]: https://github.com/rust-embedded/svd2rust + [`imxrt-ral`]: https://github.com/imxrt-rs/imxrt-ral + +Benchmarks +---------- + +`svd2rust`, `chiptool`, and `raltool` ingested a patched SVD for an i.MX RT 1062 +MCU and generated a Rust crate. The table below shows the `cargo build` build +times for each crate. Each build succeeded without warnings. + +| Codegen tool | Build time | +|:------------------------|:-----------| +| `svd2rust` | 1m 30s | +| `chiptool` 1 | 47s | +| **`raltool`** | **9s** | + +1 Lightly modified the generated crate to suppress warnings. + +`svd2rust` version 0.24.1. `chiptool` built at revision `73b33d9`. rustc 1.63.0. + +`chiptool` enhancements +----------------------- + + ┌───────┐ IR_A ┌───────────┐ IR_A' ┏━━━━━━━━━┓ + SVD_A -> │ Parse │ ---> │ Transform │ ----> ┃ ┃ + └───────┘ └───────────┘ ┃ ┃ + ┌───────┐ IR_B ┌───────────┐ IR_B' ┃ ┃ IR ┌─────────┐ + SVD_B -> │ Parse │ ---> │ Transform │ ----> ┃ Combine ┃ -> │ Codegen │ -> RAL Crate + └───────┘ └───────────┘ ┃ ┃ └─────────┘ + ┌───────┐ IR_C ┌───────────┐ IR_C' ┃ ┃ + SVD_C -> │ Parse │ ---> │ Transform │ ----> ┃ ┃ + └───────┘ └───────────┘ ┗━━━━━━━━━┛ + +`raltool` changes more than just the generated code. `raltool` accepts multiple +SVD files, and introduces a new "combine" phase to the process. The +combine phase consolidates blocks, fieldsets, and enums *across* devices. The +combine pass runs after the transform pass(es), consuming one or more `chiptool` +intermediate representations (IR) to create a new IR. This single, combined IR +represents all the peripherals, blocks, fieldsets, and enums for all devices. +The codegen phase generates modules to represent that combined IR. + +In practice, the combine phase can automatically reduce `N` UART peripherals +from `N` SVDs down to 1 UART peripheral. This works even if the input SVDs have +different names for blocks, fieldsets, or enums. + +The combine phase is conservative, and it won't combine device elements if +they don't seem equivalent. If you know that blocks, fieldsets, or enums +should be equivalent, you can use transforms to coax the IR into a combine-able +form, or you can patch your SVD(s). + +Limitations +----------- + +Aliased registers are not supported. To control codegen, use a transform to +remove aliased registers / fields. The codegen phase will select an arbitrary +register alias for codegen, though it prefers an alias that's both +read-write. + +Aliased cluster arrays are also not supported. The recommendation is to either +remove the peripherals, or describe the peripherals differently, using +`chiptool` YAML. Surprisingly, aliased cluster arrays appear in practice; see +the i.MX RT 11xx SVDs. + +The transform phase(s) run before the combine phase, not after. It's +trivial to introduce a transform phase on the combined IR, but there +hasn't been a need for this. + +`raltool` simply generates a tree of Rust source files. It does not generate a +`Cargo.toml` to define the package. You're responsible for defining this +manifest, adding all dependencies, and defining the features expected in the +generated code. `cargo init` and `cargo add` can help with this: + +``` bash +cd path/to/output + +cargo init +cargo add cortex-m ral-registers + +# Edit Cargo.toml, add feature flags for devices. +# See lib.rs for expected features. ``` -- No need to calculate and fill padding holes in RegisterBlock structs -- No problem if registers overlap (currently svd2rust has to check for this, and falls back to a function-based codegen similar to this one) -- Pointer provenance is not erased. Previous codegen causes pointers to become references (&), so it's undefined behavior to do arithmetic with a register pointer to write somewhere else. This is useful in a few niche situations: - - calculating a pointer to a particular register bit in the bitbanding region - - The RP2040 chip has register aliases that atomically set/clear/xor register bits at addr + 0x1000/0x2000/0x3000 - -This generates the same assembly code as original svd2rust when optimizations are enabled. - -## Running - - mkdir -p out - mkdir -p out/src - cargo run -- -i svd/rp2040.svd -c svd/rp2040.yaml - rustfmt out/src/lib.rs - (cd out; cargo build && cargo doc) - -## To-Do - -Missing features: - -- Clusters in input SVD file -- registers with bit width other than 32 - -Nice to have features: - -- More transforms (deletes, renames, move entire module...) -- clean up doc comments better - -## License +License +------- Licensed under either of -- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or +- Apache License, Version 2.0 ([LICENSE-APACHE][] or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) +- MIT license ([LICENSE-MIT][] or http://opensource.org/licenses/MIT) at your option. -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the -work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any -additional terms or conditions. - -## Code of Conduct + [LICENSE-APACHE]: LICENSE-APACHE + [LICENSE-MIT]: LICENSE-MIT -Contribution to this crate is organized under the terms of the [Rust Code of -Conduct][coc], the maintainer of this crate, the [Tools team][team], promises -to intervene to uphold that code of conduct. +**Contribution** -[coc]: CODE_OF_CONDUCT.md -[team]: https://github.com/rust-embedded/wg#the-tools-team +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/raltool/src/main.rs b/raltool/src/main.rs index cd6c8c6d687a..af5230ef63f2 100755 --- a/raltool/src/main.rs +++ b/raltool/src/main.rs @@ -1,9 +1,9 @@ #![recursion_limit = "128"] use anyhow::{bail, Context, Result}; -use chiptool::{combine, generate, svd2ir}; use clap::Parser; use log::*; +use raltool::{combine, generate, svd2ir}; use rayon::prelude::*; use regex::Regex; use std::fs; @@ -11,10 +11,9 @@ use std::io::Read; use std::path::PathBuf; use std::{fs::File, io::stdout}; -use chiptool::ir::IR; +use raltool::ir::IR; #[derive(Parser)] -#[clap(version = "1.0", author = "Dirbaio ")] struct Opts { #[clap(subcommand)] subcommand: Subcommand, @@ -154,11 +153,11 @@ fn extract_peripheral(args: ExtractPeripheral) -> Result<()> { .expect("derivedFrom peripheral not found"); } - chiptool::svd2ir::convert_peripheral(&mut ir, p)?; + raltool::svd2ir::convert_peripheral(&mut ir, p)?; // Fix weird newline spam in descriptions. let re = Regex::new("[ \n]+").unwrap(); - chiptool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; + raltool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; for t in &config.transforms { info!("running: {:?}", t); @@ -166,7 +165,7 @@ fn extract_peripheral(args: ExtractPeripheral) -> Result<()> { } // Ensure consistent sort order in the YAML. - chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); + raltool::transform::sort::Sort {}.run(&mut ir).unwrap(); serde_yaml::to_writer(stdout(), &ir).unwrap(); Ok(()) @@ -189,9 +188,7 @@ fn gen(mut args: Generate) -> Result<()> { let svd = load_svd(&svd)?; let mut ir = svd2ir::convert_svd(&svd)?; - chiptool::transform::map_descriptions(&mut ir, |d| { - re.replace_all(d, " ").into_owned() - })?; + raltool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; for t in &config.transforms { t.run(&mut ir)?; @@ -231,7 +228,7 @@ fn fmt(args: Fmt) -> Result<()> { let mut ir: IR = serde_yaml::from_slice(&got_data)?; // Ensure consistent sort order in the YAML. - chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); + raltool::transform::sort::Sort {}.run(&mut ir).unwrap(); // Trim all descriptions @@ -331,10 +328,10 @@ fn gen_block(args: GenBlock) -> Result<()> { let data = fs::read(&args.input)?; let mut ir: IR = serde_yaml::from_slice(&data)?; - chiptool::transform::Sanitize {}.run(&mut ir).unwrap(); + raltool::transform::Sanitize {}.run(&mut ir).unwrap(); // Ensure consistent sort order in the YAML. - chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); + raltool::transform::sort::Sort {}.run(&mut ir).unwrap(); let generate_opts = generate::Options { module_root: std::path::PathBuf::from(&args.output), @@ -345,7 +342,7 @@ fn gen_block(args: GenBlock) -> Result<()> { } #[derive(serde::Serialize, serde::Deserialize)] struct Config { - transforms: Vec, + transforms: Vec, } impl Default for Config { From c434ec452d96f73a181c265c5264d7f8ec1db696 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Sun, 25 Sep 2022 10:24:37 -0400 Subject: [PATCH 11/15] Address clippy warnings in ir, main --- raltool/src/ir.rs | 44 +++++++++++++++++++++++++------------------- raltool/src/main.rs | 16 +++++----------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/raltool/src/ir.rs b/raltool/src/ir.rs index 8dff903a2d4b..4f86dbce179b 100644 --- a/raltool/src/ir.rs +++ b/raltool/src/ir.rs @@ -3,7 +3,7 @@ use serde::{de, de::Visitor, ser::SerializeMap, Deserialize, Deserializer, Seria use std::collections::{BTreeMap, HashMap}; use std::fmt; -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq)] pub struct IR { pub devices: HashMap, pub blocks: HashMap, @@ -29,12 +29,18 @@ impl IR { } } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +impl Default for IR { + fn default() -> Self { + Self::new() + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Cpu { pub nvic_priority_bits: u32, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Device { pub peripherals: Vec, pub interrupts: Vec, @@ -42,7 +48,7 @@ pub struct Device { pub cpu: Option, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Peripheral { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -62,7 +68,7 @@ pub struct Peripheral { pub interrupts: HashMap, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Interrupt { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -70,7 +76,7 @@ pub struct Interrupt { pub value: u32, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Block { #[serde(default, skip_serializing_if = "Option::is_none")] pub extends: Option, @@ -80,7 +86,7 @@ pub struct Block { pub items: Vec, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BlockItem { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -94,35 +100,35 @@ pub struct BlockItem { pub inner: BlockItemInner, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(untagged)] pub enum BlockItemInner { Block(BlockItemBlock), Register(Register), } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(untagged)] pub enum Array { Regular(RegularArray), Cursed(CursedArray), } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct RegularArray { pub len: u32, pub stride: u32, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct CursedArray { pub offsets: Vec, } -#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BitSize(pub u32); -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Register { #[serde(default = "default_readwrite", skip_serializing_if = "is_readwrite")] pub access: Access, @@ -130,7 +136,7 @@ pub struct Register { #[serde(default, skip_serializing_if = "Option::is_none")] pub fieldset: Option, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BlockItemBlock { pub block: String, } @@ -142,7 +148,7 @@ pub enum Access { Write, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct FieldSet { #[serde(default, skip_serializing_if = "Option::is_none")] pub extends: Option, @@ -153,7 +159,7 @@ pub struct FieldSet { pub fields: Vec, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Field { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -171,7 +177,7 @@ pub struct Field { pub enum_readwrite: Option, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Enum { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option, @@ -179,7 +185,7 @@ pub struct Enum { pub variants: Vec, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct EnumVariant { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -266,7 +272,7 @@ impl<'de> Visitor<'de> for IRVisitor { // into our map. while let Some(key) = access.next_key()? { let key: String = key; - let (kind, name) = key.split_once("/").ok_or(de::Error::custom("item names must be in form `kind/name`, where kind is `block`, `fieldset` or `enum`"))?; + let (kind, name) = key.split_once('/').ok_or_else(|| de::Error::custom("item names must be in form `kind/name`, where kind is `block`, `fieldset` or `enum`"))?; match kind { "block" => { let val: Block = access.next_value()?; diff --git a/raltool/src/main.rs b/raltool/src/main.rs index af5230ef63f2..d2a5839b9e0b 100755 --- a/raltool/src/main.rs +++ b/raltool/src/main.rs @@ -185,7 +185,7 @@ fn gen(mut args: Generate) -> Result<()> { .svds .par_iter() .map(|svd| -> Result { - let svd = load_svd(&svd)?; + let svd = load_svd(svd)?; let mut ir = svd2ir::convert_svd(&svd)?; raltool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; @@ -238,21 +238,21 @@ fn fmt(args: Fmt) -> Result<()> { } }; - for (_, b) in &mut ir.blocks { + for b in ir.blocks.values_mut() { cleanup(&mut b.description); for i in &mut b.items { cleanup(&mut i.description); } } - for (_, b) in &mut ir.fieldsets { + for b in ir.fieldsets.values_mut() { cleanup(&mut b.description); for i in &mut b.fields { cleanup(&mut i.description); } } - for (_, b) in &mut ir.enums { + for b in ir.enums.values_mut() { cleanup(&mut b.description); for i in &mut b.variants { cleanup(&mut i.description); @@ -340,17 +340,11 @@ fn gen_block(args: GenBlock) -> Result<()> { generate::render(&ir, &generate_opts)?; Ok(()) } -#[derive(serde::Serialize, serde::Deserialize)] +#[derive(serde::Serialize, serde::Deserialize, Default)] struct Config { transforms: Vec, } -impl Default for Config { - fn default() -> Self { - Self { transforms: vec![] } - } -} - // ============== struct Pairs { From d6557da25bafacad4680d15e0abfc8d368f36fec Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Sun, 25 Sep 2022 11:12:29 -0400 Subject: [PATCH 12/15] Remove CI definitions, tool configs I plan to subtree this project directly into imxrt-ral. That project's CI will cover this build. --- raltool/.github/bors.toml | 4 ---- raltool/.github/workflows/ci.yaml | 20 -------------------- raltool/.vscode/settings.json | 8 -------- 3 files changed, 32 deletions(-) delete mode 100644 raltool/.github/bors.toml delete mode 100644 raltool/.github/workflows/ci.yaml delete mode 100644 raltool/.vscode/settings.json diff --git a/raltool/.github/bors.toml b/raltool/.github/bors.toml deleted file mode 100644 index 27f77ded18f5..000000000000 --- a/raltool/.github/bors.toml +++ /dev/null @@ -1,4 +0,0 @@ -status = [ - "build", -] -delete_merged_branches = true diff --git a/raltool/.github/workflows/ci.yaml b/raltool/.github/workflows/ci.yaml deleted file mode 100644 index 870ec9be64bc..000000000000 --- a/raltool/.github/workflows/ci.yaml +++ /dev/null @@ -1,20 +0,0 @@ -name: ci -on: - push: - -env: - CARGO_TERM_COLOR: always - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Cache Dependencies - uses: Swatinem/rust-cache@v1.3.0 - - - name: Check - run: | - cargo check - \ No newline at end of file diff --git a/raltool/.vscode/settings.json b/raltool/.vscode/settings.json deleted file mode 100644 index 054f9f3bda87..000000000000 --- a/raltool/.vscode/settings.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "rust-analyzer.cargo.runBuildScripts": true, - "rust-analyzer.procMacro.enable": true, - "rust-analyzer.experimental.procAttrMacros": false, - "rust-analyzer.assist.importGranularity": "module", - "rust-analyzer.assist.importEnforceGranularity": true, - "editor.formatOnSave": true -} \ No newline at end of file From f2273024aafa98f4912faed0a6d7ac95afd9cf76 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Sat, 8 Oct 2022 15:21:30 -0400 Subject: [PATCH 13/15] Add number functions for ptr -> N runtime query If you discard type information, it's nice to have a way to re-acquire the N associated with a peripheral. This commit introduces the `number` functions for that purpose. This can let users create thin drivers just around a register block static reference, and only acquire the number at runtime. Document this new function as an advanced usage, along with one other (unrelated) usage pattern. --- raltool/src/generate/device.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/raltool/src/generate/device.rs b/raltool/src/generate/device.rs index a39a45715b83..e4365ffaa508 100644 --- a/raltool/src/generate/device.rs +++ b/raltool/src/generate/device.rs @@ -92,12 +92,14 @@ pub fn render(_opts: &super::Options, _ir: &IR, d: &Device) -> Result 1 && periphs .iter() .all(|periph| num_endings.is_match(&periph.name)) { let mut instances = TokenStream::new(); + let mut const_to_num: Vec = Vec::new(); for peripheral in periphs.iter() { let name = Ident::new(&peripheral.name, span); let num = num_endings.captures(&peripheral.name).unwrap(); @@ -106,7 +108,7 @@ pub fn render(_opts: &super::Options, _ir: &IR, d: &Device) -> Result; impl crate::private::Sealed for #name {} @@ -119,6 +121,14 @@ pub fn render(_opts: &super::Options, _ir: &IR, d: &Device) -> Result Option { + [#(#const_to_num)*].into_iter() + .find(|(ptr, _)| core::ptr::eq(rb, *ptr)) + .map(|(_, inst)| inst) + } + }; instances } else { assert!( @@ -131,6 +141,12 @@ name."# ); let peripheral = periphs.first().unwrap(); let name = Ident::new(&peripheral.name, span); + number_fn = quote! { + /// Returns the instance number `N` for a peripheral instance. + pub fn number(rb: *const RegisterBlock) -> Option { + core::ptr::eq(rb, #name).then_some(0) + } + }; quote! { pub type #name = Instance<{crate::SOLE_INSTANCE}>; impl crate::private::Sealed for #name {} @@ -152,6 +168,7 @@ name."# pub type Instance = crate::Instance; #instances + #number_fn } }) } From 8b7721ded89013d278c1c510b4f1ea1c39a910f7 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Sat, 8 Oct 2022 16:22:33 -0400 Subject: [PATCH 14/15] Add additional API docs, function attributes --- raltool/src/generate/device.rs | 12 ++++++ raltool/src/generate/mod.rs | 71 ++++++++++++++++++++++++++++++++-- 2 files changed, 79 insertions(+), 4 deletions(-) diff --git a/raltool/src/generate/device.rs b/raltool/src/generate/device.rs index e4365ffaa508..9a5784d26b1a 100644 --- a/raltool/src/generate/device.rs +++ b/raltool/src/generate/device.rs @@ -115,6 +115,12 @@ pub fn render(_opts: &super::Options, _ir: &IR, d: &Device) -> Result Self { Instance::new(#name) } @@ -152,6 +158,12 @@ name."# impl crate::private::Sealed for #name {} impl crate::Valid for #name {} impl #name { + /// Acquire a vaild, but possibly aliased, instance. + /// + /// # Safety + /// + /// See [the struct-level safety documentation](crate::Instance). + #[inline] pub const unsafe fn instance() -> Self { Instance::new(#name) } diff --git a/raltool/src/generate/mod.rs b/raltool/src/generate/mod.rs index 658428acf735..c029c8e49ff2 100644 --- a/raltool/src/generate/mod.rs +++ b/raltool/src/generate/mod.rs @@ -141,33 +141,96 @@ pub fn render(ir: &IR, opts: &Options) -> Result<()> { root.items.extend(quote!( #![no_std] - #![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)] + #![allow(non_camel_case_types, non_snake_case, non_upper_case_globals, clippy::self_named_constructors, clippy::module_inception)] pub use ral_registers::{RWRegister, RORegister, WORegister, read_reg, write_reg, modify_reg}; + /// An owned peripheral of type `T`, instance `N`. + /// + /// Fabricating an `Instance` is always `unsafe`. An owner of an + /// `Instance` may assume that + /// + /// - the underlying pointer points to a static register block of type `T`. + /// - the instance number `N` properly describes the peripheral instance. + /// - they own _all_ registers pointed at by `T`. + /// + /// Owners use this guarantee to safely access the peripheral registers. + /// However, nothing guarantees any of these except for your diligence. + /// + /// Constructing an `Instance` is zero cost. Additionally, `Instance` is transparent + /// and amenable to null-pointer optimizations. + /// + /// See the package-level documentation for more information on fabricating + /// instances. + /// + /// # Safety of `new()`. + /// + /// By calling `new()`, you claim + /// + /// 1. `ptr` points to static memory that can be described by a type `T`. + /// 2. The instance number `N` correctly describes `ptr`. + /// 3. You are becoming the sole owner of this instance. + /// + /// # Safety of `instance()` + /// + /// The various `instance()` methods handle safety concerns 1 and 2 from `new()`. + /// By their construction, each `instance()` implementation provides a pointer to valid + /// peripheral memory, and associates the correct `N` with that pointer. Therefore, + /// you're only responsible for ensuring safety concern 3 from `new()`. + #[repr(transparent)] pub struct Instance { - ptr: *const T, + ptr: core::ptr::NonNull, } impl core::ops::Deref for Instance { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*self.ptr } + // Safety: User provided a pointer that points to static MMIO. + // This implies non-null, initialized, aligned, and dereferenceable. + unsafe { self.ptr.as_ref() } } } impl Instance { + /// Create an arbitrary `Instance` from a pointer to `T`. + /// + /// # Safety + /// + /// See [the struct docs](Instance) for the safety contract. + #[inline] pub const unsafe fn new(ptr: *const T) -> Self { - Self { ptr } + // Casting *const _ to *mut _ is OK. The mutable pointer never + // escapes Instance. + Self { ptr: core::ptr::NonNull::new_unchecked(ptr as *mut _) } } } unsafe impl Send for Instance {} + /// The instance number for a peripheral singleton. + /// + /// If your peripheral only has one instance, it's given + /// this number. The CCM peripheral is a good example of + /// a peripheral that uses this constant. + /// + /// See the package documentation for more information on + /// this constant. pub const SOLE_INSTANCE: u8 = 0u8; mod private { pub trait Sealed {} } + + /// Vouches for an `Instance`'s validity. + /// + /// This trait is implemented for all `Instance` supported + /// by your chip. Note that the implementation may change when + /// selecting new chip features. For instance, i.MX RT 1011 chips + /// do not have LPUART 4 through 8. So, `Valid` is _not_ implemented + /// for `lpuart::Instance<4>` through `lpuart::Instance<8>`. + /// + /// See the package documentation for more information on how + /// to use this trait in your APIs. pub trait Valid : private::Sealed {} )); From 0b97844b896356e26960138da8324808faee6bd9 Mon Sep 17 00:00:00 2001 From: Ian McIntyre Date: Sat, 8 Oct 2022 19:22:25 -0400 Subject: [PATCH 15/15] Emit RTIC-compatible Peripheral structs Hide the `steal()` method; it's only available for RTIC. Users should call their own `instance()` methods. --- raltool/src/generate/device.rs | 53 ++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/raltool/src/generate/device.rs b/raltool/src/generate/device.rs index 9a5784d26b1a..d8c5f1979b99 100644 --- a/raltool/src/generate/device.rs +++ b/raltool/src/generate/device.rs @@ -68,7 +68,7 @@ pub fn render(_opts: &super::Options, _ir: &IR, d: &Device) -> Result Self { + Self { + #member_inits + } + } + } + }); + Ok(out) }