diff --git a/.github/workflows/dockerimage.yml b/.github/workflows/dockerimage.yml index 2486cae18..8090dc750 100644 --- a/.github/workflows/dockerimage.yml +++ b/.github/workflows/dockerimage.yml @@ -95,18 +95,6 @@ jobs: tags: ${{ secrets.DOCKERHUB_ORGNAME }}/${{ matrix.bin }}:${{ steps.docker_tag.outputs.value }} - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} - - name: Build and push image with evm - id: docker_build_evm - uses: docker/build-push-action@v2 - with: - context: . - file: Dockerfile.evm - push: true - platforms: linux/amd64 - build-args: | - BIN=${{ matrix.bin }} - PROFILE=release - tags: ${{ secrets.DOCKERHUB_ORGNAME }}/${{ matrix.bin }}:${{ steps.docker_tag.outputs.value }}_evm - name: slack uses: 8398a7/action-slack@v3 env: diff --git a/Cargo.lock b/Cargo.lock index 8736f5eaa..06bfc2970 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3541,6 +3541,8 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-timestamp", "cumulus-primitives-utility", + "fp-rpc", + "fp-self-contained", "frame-benchmarking", "frame-executive", "frame-support", @@ -3562,6 +3564,7 @@ dependencies = [ "pallet-aura", "pallet-authorship", "pallet-balances", + "pallet-base-fee", "pallet-bridge", "pallet-collator-selection", "pallet-collective", @@ -3569,6 +3572,18 @@ dependencies = [ "pallet-currency-adapter", "pallet-democracy", "pallet-emergency-shutdown", + "pallet-ethereum", + "pallet-evm", + "pallet-evm-precompile-assets-erc20", + "pallet-evm-precompile-balances-erc20", + "pallet-evm-precompile-blake2", + "pallet-evm-precompile-bn128", + "pallet-evm-precompile-dispatch", + "pallet-evm-precompile-ed25519", + "pallet-evm-precompile-modexp", + "pallet-evm-precompile-sha3fips", + "pallet-evm-precompile-simple", + "pallet-evm-signatures", "pallet-farming", "pallet-identity", "pallet-liquid-staking", @@ -7610,6 +7625,8 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-timestamp", "cumulus-primitives-utility", + "fp-rpc", + "fp-self-contained", "frame-benchmarking", "frame-executive", "frame-support", @@ -7631,6 +7648,7 @@ dependencies = [ "pallet-aura", "pallet-authorship", "pallet-balances", + "pallet-base-fee", "pallet-bridge", "pallet-collator-selection", "pallet-collective", @@ -7638,6 +7656,18 @@ dependencies = [ "pallet-currency-adapter", "pallet-democracy", "pallet-emergency-shutdown", + "pallet-ethereum", + "pallet-evm", + "pallet-evm-precompile-assets-erc20", + "pallet-evm-precompile-balances-erc20", + "pallet-evm-precompile-blake2", + "pallet-evm-precompile-bn128", + "pallet-evm-precompile-dispatch", + "pallet-evm-precompile-ed25519", + "pallet-evm-precompile-modexp", + "pallet-evm-precompile-sha3fips", + "pallet-evm-precompile-simple", + "pallet-evm-signatures", "pallet-farming", "pallet-identity", "pallet-liquid-staking", diff --git a/Dockerfile.evm b/Dockerfile.evm deleted file mode 100644 index ba965a080..000000000 --- a/Dockerfile.evm +++ /dev/null @@ -1,45 +0,0 @@ -FROM docker.io/paritytech/ci-linux:production as builder -LABEL description="This is the build stage for Parallel. Here we create the binary." - -ARG PROFILE=release -ARG BIN=parallel - -WORKDIR /parallel - -COPY . /parallel - -RUN rustup default nightly - -RUN cargo build --workspace --exclude runtime-integration-tests --profile $PROFILE --bin $BIN --features with-evm-runtime --features runtime-benchmarks --features try-runtime - -# ===== SECOND STAGE ====== - -FROM docker.io/library/ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive -LABEL description="This is the 2nd stage: a very small image where we copy the Parallel binary." - -ARG PROFILE=release -ARG BIN=parallel - -ENV BIN_PATH=/usr/local/bin/$BIN - -COPY --from=builder /parallel/target/$PROFILE/$BIN /usr/local/bin - -RUN apt update -y \ - && apt install -y ca-certificates libssl-dev tzdata \ - && useradd -m -u 1000 -U -s /bin/sh -d /parallel parallel \ - && mkdir -p /parallel/.local \ - && mkdir /data \ - && chown -R parallel:parallel /data \ - && ln -s /data /parallel/.local/share \ - && chown -R parallel:parallel /parallel/.local/share - -USER parallel -WORKDIR /parallel -EXPOSE 30333 9933 9944 29933 -VOLUME ["/data"] - -RUN echo '#!/bin/bash\n$BIN_PATH $@' > .entrypoint.sh -RUN chmod u+x .entrypoint.sh - -ENTRYPOINT ["/parallel/.entrypoint.sh"] diff --git a/LICENSE b/LICENSE index e6e77b089..9cecc1d46 100644 --- a/LICENSE +++ b/LICENSE @@ -1,201 +1,674 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Makefile b/Makefile index 99b6cd84e..08248baa3 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ SURI := //Alice LAUNCH_CONFIG_YAML := config.yml LAUNCH_CONFIG_JSON := config.json DOCKER_OVERRIDE_YAML := docker-compose.override.yml -DOCKER_TAG := latest_evm +DOCKER_TAG := latest RELAY_DOCKER_TAG := v0.9.28 CUMULUS_DOCKER_TAG := v0.9.28 @@ -37,10 +37,6 @@ build: build-release: cargo build --locked --workspace --exclude runtime-integration-tests --bin parallel --release --features runtime-benchmarks --features try-runtime -.PHONY: build-release-with-evm -build-release-with-evm: - cargo build --locked --workspace --exclude runtime-integration-tests --bin parallel --release --features with-evm-runtime --features runtime-benchmarks --features try-runtime - .PHONY: build-compact-release build-compact-release: cargo build --locked --workspace --exclude runtime-integration-tests --bin parallel --release @@ -55,26 +51,16 @@ build-release-if-not-exists: make build-release; \ fi -.PHONY: build-evm-release-if-not-exists -build-evm-release-if-not-exists: - if [ ! -f ./target/release/parallel ]; then \ - make build-release-with-evm; \ - fi - .PHONY: clean clean: cargo clean -p parallel -p vanilla-runtime -p kerria-runtime -p heiko-runtime -p parallel-runtime .PHONY: ci -ci: check check-with-evm lint check-helper check-wasm test integration-test +ci: check lint check-helper check-wasm test integration-test .PHONY: check check: - SKIP_WASM_BUILD= cargo check --all-targets --features runtime-benchmarks --features try-runtime - -.PHONY: check-with-evm -check-with-evm: - SKIP_WASM_BUILD= cargo check --all-targets --features with-evm-runtime --features runtime-benchmarks --features try-runtime --features testing + SKIP_WASM_BUILD= cargo check --all-targets --features runtime-benchmarks --features try-runtime --features testing .PHONY: check-wasm check-wasm: @@ -88,10 +74,6 @@ check-helper: test: SKIP_WASM_BUILD= cargo test --workspace --features runtime-benchmarks --exclude runtime-integration-tests --exclude parallel --exclude parallel-runtime --exclude vanilla-runtime --exclude kerria-runtime --exclude heiko-runtime --exclude pallet-loans-rpc --exclude pallet-loans-rpc-runtime-api --exclude parallel-primitives -- --nocapture -.PHONY: test-with-evm -test-with-evm: - SKIP_WASM_BUILD= cargo test --workspace --features with-evm-runtime --features runtime-benchmarks --exclude runtime-integration-tests --exclude pallet-loans-rpc --exclude pallet-loans-rpc-runtime-api --exclude parallel-primitives -- --nocapture - .PHONY: test-loans test-loans: SKIP_WASM_BUILD= cargo test -p pallet-loans --lib --no-fail-fast -- --nocapture @@ -137,43 +119,43 @@ bench:build-release-if-not-exists ./scripts/benchmark.sh .PHONY: bench-farming -bench-farming: build-evm-release-if-not-exists +bench-farming: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-farming --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/farming/src/weights.rs .PHONY: bench-loans -bench-loans: build-evm-release-if-not-exists +bench-loans: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-loans --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/loans/src/weights.rs .PHONY: bench-crowdloans -bench-crowdloans: build-evm-release-if-not-exists +bench-crowdloans: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-crowdloans --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/crowdloans/src/weights.rs .PHONY: bench-bridge -bench-bridge: build-evm-release-if-not-exists +bench-bridge: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-bridge --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/bridge/src/weights.rs .PHONY: bench-xcm-helper -bench-xcm-helper: build-evm-release-if-not-exists +bench-xcm-helper: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-xcm-helper --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/xcm-helper/src/weights.rs .PHONY: bench-amm -bench-amm: build-evm-release-if-not-exists +bench-amm: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-amm --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/amm/src/weights.rs .PHONY: bench-liquid-staking -bench-liquid-staking: build-evm-release-if-not-exists +bench-liquid-staking: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-liquid-staking --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/liquid-staking/src/weights.rs .PHONY: bench-amm-router -bench-amm-router: build-evm-release-if-not-exists +bench-amm-router: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-router --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/router/src/weights.rs .PHONY: bench-streaming -bench-streaming: build-evm-release-if-not-exists +bench-streaming: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-streaming --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/streaming/src/weights.rs .PHONY: bench-asset-registry -bench-asset-registry: build-evm-release-if-not-exists +bench-asset-registry: build-release-if-not-exists ./target/release/parallel benchmark pallet --chain=$(CHAIN) --execution=wasm --wasm-execution=compiled --pallet=pallet-asset-registry --extrinsic='*' --steps=50 --repeat=20 --heap-pages=4096 --template=./.maintain/frame-weight-template.hbs --output=./pallets/asset-registry/src/weights.rs .PHONY: lint @@ -253,9 +235,9 @@ dev-launch: shutdown dev-launch-vanilla: make PARA_ID=2085 CHAIN=vanilla-dev RELAY_CHAIN=kusama-local dev-launch -.PHONY: launch-evm -launch-evm: - cargo run --locked --bin parallel --features with-evm-runtime --features runtime-benchmarks --features try-runtime -- --tmp --alice --dev --rpc-cors all --unsafe-ws-external --rpc-methods unsafe --unsafe-rpc-external --ws-port 19944 --rpc-port 29933 +.PHONY: run-dev-node +run-dev-node: + cargo run --locked --bin parallel --features runtime-benchmarks --features try-runtime -- --tmp --alice --dev --rpc-cors all --unsafe-ws-external --rpc-methods unsafe --unsafe-rpc-external --ws-port 19944 --rpc-port 29933 .PHONY: provisioning-evm provisioning-evm: @@ -284,13 +266,6 @@ production-image: -t parallelfinance/parallel:latest \ -f Dockerfile.release . -.PHONY: integration-image-with-evm -integration-image-with-evm: - DOCKER_BUILDKIT=1 docker build --build-arg BIN=parallel \ - -c 512 \ - -t parallelfinance/parallel:latest_evm \ - -f Dockerfile.evm . - .PHONY: key key: docker run --rm parallelfinance/parallel:$(DOCKER_TAG) key generate-node-key diff --git a/docs/EVM.md b/docs/EVM.md index 6e6c6fd11..1c089563b 100644 --- a/docs/EVM.md +++ b/docs/EVM.md @@ -9,7 +9,7 @@ - launch chain locally ``` -make init && make launch-evm +make init && make run-dev-node ``` - provisioning diff --git a/node/parallel/Cargo.toml b/node/parallel/Cargo.toml index aba15323c..c5613dbf6 100644 --- a/node/parallel/Cargo.toml +++ b/node/parallel/Cargo.toml @@ -33,8 +33,8 @@ pallet-transaction-payment-rpc-runtime-api = { git = 'https://github.com/parityt # runtimes heiko-runtime = { path = '../../runtime/heiko' } parallel-runtime = { path = '../../runtime/parallel' } -kerria-runtime = { path = '../../runtime/kerria', optional = true} -vanilla-runtime = { path = '../../runtime/vanilla', optional = true } +kerria-runtime = { path = '../../runtime/kerria' } +vanilla-runtime = { path = '../../runtime/vanilla' } sc-basic-authorship = { git = 'https://github.com/paritytech/substrate.git', branch = 'polkadot-v0.9.32' } sc-chain-spec = { git = 'https://github.com/paritytech/substrate.git', branch = 'polkadot-v0.9.32' } @@ -131,6 +131,8 @@ default = ['std'] runtime-benchmarks = [ 'parallel-runtime/runtime-benchmarks', 'heiko-runtime/runtime-benchmarks', + 'vanilla-runtime/runtime-benchmarks', + 'kerria-runtime/runtime-benchmarks', 'polkadot-service/runtime-benchmarks', "try-runtime-cli", ] @@ -138,13 +140,7 @@ std = [] try-runtime = [ 'heiko-runtime/try-runtime', 'parallel-runtime/try-runtime', - "polkadot-service/try-runtime", -] -with-evm-runtime = [ - 'vanilla-runtime', - 'kerria-runtime', - 'vanilla-runtime/runtime-benchmarks', 'vanilla-runtime/try-runtime', - 'kerria-runtime/runtime-benchmarks', 'kerria-runtime/try-runtime', + "polkadot-service/try-runtime", ] diff --git a/node/parallel/src/chain_spec/heiko.rs b/node/parallel/src/chain_spec/heiko.rs index 847452c64..5f7a140af 100644 --- a/node/parallel/src/chain_spec/heiko.rs +++ b/node/parallel/src/chain_spec/heiko.rs @@ -13,11 +13,12 @@ // limitations under the License. use heiko_runtime::{ - opaque::SessionKeys, BalancesConfig, BridgeMembershipConfig, CollatorSelectionConfig, - CrowdloansAutomatorsMembershipConfig, DemocracyConfig, GeneralCouncilConfig, - GeneralCouncilMembershipConfig, GenesisConfig, LiquidStakingAgentsMembershipConfig, - LiquidStakingConfig, OracleMembershipConfig, ParachainInfoConfig, PolkadotXcmConfig, - SessionConfig, SystemConfig, TechnicalCommitteeMembershipConfig, VestingConfig, WASM_BINARY, + opaque::SessionKeys, BalancesConfig, BaseFeeConfig, BridgeMembershipConfig, + CollatorSelectionConfig, CrowdloansAutomatorsMembershipConfig, DemocracyConfig, EVMConfig, + GeneralCouncilConfig, GeneralCouncilMembershipConfig, GenesisConfig, + LiquidStakingAgentsMembershipConfig, LiquidStakingConfig, OracleMembershipConfig, + ParachainInfoConfig, ParallelPrecompilesType, PolkadotXcmConfig, SessionConfig, SystemConfig, + TechnicalCommitteeMembershipConfig, VestingConfig, WASM_BINARY, }; // use heiko_runtime::SudoConfig; use primitives::*; @@ -139,6 +140,11 @@ fn heiko_genesis( technical_committee: Vec, id: ParaId, ) -> GenesisConfig { + // This is supposed the be the simplest bytecode to revert without returning any data. + // We will pre-deploy it under all of our precompiles to ensure they can be called from + // within contracts. + // (PUSH1 0x00 PUSH1 0x00 REVERT) + let revert_bytecode = vec![0x60, 0x00, 0x60, 0x00, 0xFD]; GenesisConfig { system: SystemConfig { code: WASM_BINARY @@ -211,5 +217,24 @@ fn heiko_genesis( polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(2), }, + evm: EVMConfig { + // We need _some_ code inserted at the precompile address so that + // the evm will actually call the address. + accounts: ParallelPrecompilesType::used_addresses() + .map(|addr| { + ( + addr, + fp_evm::GenesisAccount { + nonce: Default::default(), + balance: Default::default(), + storage: Default::default(), + code: revert_bytecode.clone(), + }, + ) + }) + .collect(), + }, + base_fee: BaseFeeConfig::new(sp_core::U256::from(10_000_000), sp_runtime::Permill::zero()), + ethereum: Default::default(), } } diff --git a/node/parallel/src/chain_spec/kerria.rs b/node/parallel/src/chain_spec/kerria.rs index 53836a58f..954045b9f 100644 --- a/node/parallel/src/chain_spec/kerria.rs +++ b/node/parallel/src/chain_spec/kerria.rs @@ -254,10 +254,7 @@ fn kerria_genesis( }) .collect(), }, - base_fee: BaseFeeConfig::new( - sp_core::U256::from(1_000_000_000), - sp_runtime::Permill::from_parts(125_000), - ), + base_fee: BaseFeeConfig::new(sp_core::U256::from(10_000_000), sp_runtime::Permill::zero()), ethereum: Default::default(), } } diff --git a/node/parallel/src/chain_spec/mod.rs b/node/parallel/src/chain_spec/mod.rs index 98903742d..d77b3734b 100644 --- a/node/parallel/src/chain_spec/mod.rs +++ b/node/parallel/src/chain_spec/mod.rs @@ -13,11 +13,8 @@ // limitations under the License. pub mod heiko; -pub mod parallel; - -#[cfg(feature = "with-evm-runtime")] pub mod kerria; -#[cfg(feature = "with-evm-runtime")] +pub mod parallel; pub mod vanilla; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; diff --git a/node/parallel/src/chain_spec/parallel.rs b/node/parallel/src/chain_spec/parallel.rs index 702eef267..f70c4032e 100644 --- a/node/parallel/src/chain_spec/parallel.rs +++ b/node/parallel/src/chain_spec/parallel.rs @@ -13,11 +13,12 @@ // limitations under the License. use parallel_runtime::{ - opaque::SessionKeys, BalancesConfig, BridgeMembershipConfig, CollatorSelectionConfig, - CrowdloansAutomatorsMembershipConfig, DemocracyConfig, GeneralCouncilConfig, - GeneralCouncilMembershipConfig, GenesisConfig, LiquidStakingAgentsMembershipConfig, - LiquidStakingConfig, OracleMembershipConfig, ParachainInfoConfig, PolkadotXcmConfig, - SessionConfig, SystemConfig, TechnicalCommitteeMembershipConfig, VestingConfig, WASM_BINARY, + opaque::SessionKeys, BalancesConfig, BaseFeeConfig, BridgeMembershipConfig, + CollatorSelectionConfig, CrowdloansAutomatorsMembershipConfig, DemocracyConfig, EVMConfig, + GeneralCouncilConfig, GeneralCouncilMembershipConfig, GenesisConfig, + LiquidStakingAgentsMembershipConfig, LiquidStakingConfig, OracleMembershipConfig, + ParachainInfoConfig, ParallelPrecompilesType, PolkadotXcmConfig, SessionConfig, SystemConfig, + TechnicalCommitteeMembershipConfig, VestingConfig, WASM_BINARY, }; // use parallel_runtime::SudoConfig; use primitives::{network::NetworkType, *}; @@ -139,6 +140,7 @@ fn parallel_genesis( technical_committee: Vec, id: ParaId, ) -> GenesisConfig { + let revert_bytecode = vec![0x60, 0x00, 0x60, 0x00, 0xFD]; GenesisConfig { system: SystemConfig { code: WASM_BINARY @@ -211,5 +213,24 @@ fn parallel_genesis( polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(2), }, + evm: EVMConfig { + // We need _some_ code inserted at the precompile address so that + // the evm will actually call the address. + accounts: ParallelPrecompilesType::used_addresses() + .map(|addr| { + ( + addr, + fp_evm::GenesisAccount { + nonce: Default::default(), + balance: Default::default(), + storage: Default::default(), + code: revert_bytecode.clone(), + }, + ) + }) + .collect(), + }, + base_fee: BaseFeeConfig::new(sp_core::U256::from(10_000_000), sp_runtime::Permill::zero()), + ethereum: Default::default(), } } diff --git a/node/parallel/src/chain_spec/vanilla.rs b/node/parallel/src/chain_spec/vanilla.rs index 5879ca338..d98704fb0 100644 --- a/node/parallel/src/chain_spec/vanilla.rs +++ b/node/parallel/src/chain_spec/vanilla.rs @@ -328,10 +328,7 @@ fn vanilla_genesis( map }, }, - base_fee: BaseFeeConfig::new( - sp_core::U256::from(1_000_000_000), - sp_runtime::Permill::from_parts(125_000), - ), + base_fee: BaseFeeConfig::new(sp_core::U256::from(10_000_000), sp_runtime::Permill::zero()), ethereum: Default::default(), } } diff --git a/node/parallel/src/client.rs b/node/parallel/src/client.rs index c4a0a8741..e94b9fe55 100644 --- a/node/parallel/src/client.rs +++ b/node/parallel/src/client.rs @@ -39,6 +39,8 @@ pub trait RuntimeApiCollection: + cumulus_primitives_core::CollectCollationInfo + pallet_loans_rpc::LoansRuntimeApi + pallet_router_rpc::RouterRuntimeApi + + fp_rpc::EthereumRuntimeRPCApi + + fp_rpc::ConvertTransactionRuntimeApi where >::StateBackend: sp_api::StateBackend, { @@ -58,7 +60,9 @@ where + sp_session::SessionKeys + cumulus_primitives_core::CollectCollationInfo + pallet_loans_rpc::LoansRuntimeApi - + pallet_router_rpc::RouterRuntimeApi, + + pallet_router_rpc::RouterRuntimeApi + + fp_rpc::EthereumRuntimeRPCApi + + fp_rpc::ConvertTransactionRuntimeApi, >::StateBackend: sp_api::StateBackend, { } diff --git a/node/parallel/src/command.rs b/node/parallel/src/command.rs index 651898262..7bc0e21b5 100644 --- a/node/parallel/src/command.rs +++ b/node/parallel/src/command.rs @@ -40,7 +40,6 @@ const HEIKO_PARA_ID: u32 = 2085; fn load_spec(id: &str) -> std::result::Result, String> { Ok(match id { - #[cfg(feature = "with-evm-runtime")] "dev" => Box::new(chain_spec::vanilla::local_development_config(ParaId::from( HEIKO_PARA_ID, ))), @@ -56,11 +55,9 @@ fn load_spec(id: &str) -> std::result::Result, St "" | "parallel" => Box::new(chain_spec::parallel::parallel_config(ParaId::from( PARALLEL_PARA_ID, ))?), - #[cfg(feature = "with-evm-runtime")] "vanilla-dev" => Box::new(chain_spec::vanilla::vanilla_dev_config(ParaId::from( HEIKO_PARA_ID, ))), - #[cfg(feature = "with-evm-runtime")] "kerria-dev" => Box::new(chain_spec::kerria::kerria_dev_config(ParaId::from( PARALLEL_PARA_ID, ))), @@ -78,25 +75,9 @@ fn load_spec(id: &str) -> std::result::Result, St } else if starts_with("heiko") { Box::new(chain_spec::heiko::ChainSpec::from_json_file(path)?) } else if starts_with("vanilla") { - #[cfg(feature = "with-evm-runtime")] - { - Box::new(chain_spec::vanilla::ChainSpec::from_json_file(path)?) - } - #[cfg(not(feature = "with-evm-runtime"))] - return Err( - "chain_spec's filename start with vanilla should be built with evm runtime" - .into(), - ); + Box::new(chain_spec::vanilla::ChainSpec::from_json_file(path)?) } else if starts_with("kerria") { - #[cfg(feature = "with-evm-runtime")] - { - Box::new(chain_spec::kerria::ChainSpec::from_json_file(path)?) - } - #[cfg(not(feature = "with-evm-runtime"))] - return Err( - "chain_spec's filename start with kerria should be built with evm runtime" - .into(), - ); + Box::new(chain_spec::kerria::ChainSpec::from_json_file(path)?) } else { return Err( "chain_spec's filename must start with parallel/heiko/kerria/vanilla".into(), @@ -141,15 +122,9 @@ impl SubstrateCli for Cli { } else if chain_spec.is_heiko() { &heiko_runtime::VERSION } else if chain_spec.is_vanilla() || chain_spec.is_dev() { - #[cfg(feature = "with-evm-runtime")] - return &vanilla_runtime::VERSION; - #[cfg(not(feature = "with-evm-runtime"))] - return &heiko_runtime::VERSION; + &vanilla_runtime::VERSION } else if chain_spec.is_kerria() { - #[cfg(feature = "with-evm-runtime")] - return &kerria_runtime::VERSION; - #[cfg(not(feature = "with-evm-runtime"))] - return ¶llel_runtime::VERSION; + &kerria_runtime::VERSION } else { unreachable!() } @@ -199,52 +174,33 @@ impl SubstrateCli for RelayChainCli { macro_rules! switch_runtime { ($chain_spec:expr, { $( $code:tt )* }) => { if $chain_spec.is_parallel() { - #[cfg(not(feature = "with-evm-runtime"))] { - #[allow(unused_imports)] - use crate::service::ParallelExecutor as Executor; - #[allow(unused_imports)] - use parallel_runtime::{RuntimeApi, Block}; + #[allow(unused_imports)] + use crate::service::ParallelExecutor as Executor; + #[allow(unused_imports)] + use parallel_runtime::{RuntimeApi, Block}; - $( $code )* - } - #[cfg(feature = "with-evm-runtime")] { - unreachable!(); - } + $( $code )* } else if $chain_spec.is_heiko() { - #[cfg(not(feature = "with-evm-runtime"))] { - #[allow(unused_imports)] - use crate::service::HeikoExecutor as Executor; - #[allow(unused_imports)] - use heiko_runtime::{RuntimeApi, Block}; + #[allow(unused_imports)] + use crate::service::HeikoExecutor as Executor; + #[allow(unused_imports)] + use heiko_runtime::{RuntimeApi, Block}; - $( $code )* - } - #[cfg(feature = "with-evm-runtime")] { - unreachable!(); - } + $( $code )* } else if ($chain_spec.is_vanilla() || $chain_spec.is_dev()) { - #[cfg(feature = "with-evm-runtime")] { - #[allow(unused_imports)] - use crate::evm_service::VanillaExecutor as Executor; - #[allow(unused_imports)] - use vanilla_runtime::{RuntimeApi, Block}; - - $( $code )* - }#[cfg(not(feature = "with-evm-runtime"))] { - unreachable!(); - } + #[allow(unused_imports)] + use crate::service::VanillaExecutor as Executor; + #[allow(unused_imports)] + use vanilla_runtime::{RuntimeApi, Block}; + + $( $code )* } else if $chain_spec.is_kerria() { - #[cfg(feature = "with-evm-runtime")] { - #[allow(unused_imports)] - use crate::evm_service::KerriaExecutor as Executor; - #[allow(unused_imports)] - use kerria_runtime::{RuntimeApi, Block}; + #[allow(unused_imports)] + use crate::service::KerriaExecutor as Executor; + #[allow(unused_imports)] + use kerria_runtime::{RuntimeApi, Block}; - $( $code )* - } - #[cfg(not(feature = "with-evm-runtime"))] { - unreachable!(); - } + $( $code )* } else { unreachable!(); } @@ -467,13 +423,9 @@ pub fn run() -> Result<()> { switch_runtime!(chain_spec, { runner.run_node_until_exit(|config| async move { - #[cfg(feature = "with-evm-runtime")] - { - if config.chain_spec.is_dev() { - return crate::evm_service::start_dev_node::(config).map_err(Into::into); - } + if config.chain_spec.is_dev() { + return crate::service::start_dev_node::(config).map_err(Into::into); } - let extension = chain_spec::Extensions::try_get(&*config.chain_spec); let relay_chain_id = extension.map(|e| e.relay_chain.clone()); let para_chain_id = extension @@ -524,25 +476,15 @@ pub fn run() -> Result<()> { } ); - #[cfg(feature = "with-evm-runtime")] - { - crate::evm_service::start_node::(config, polkadot_config, collator_options, id) - .await - .map(|r| r.0) - .map_err(Into::into) - } - #[cfg(not(feature = "with-evm-runtime"))] - { - crate::service::start_node::( - config, - polkadot_config, - collator_options, - id, - ) - .await - .map(|r| r.0) - .map_err(Into::into) - } + crate::service::start_node::( + config, + polkadot_config, + collator_options, + id, + ) + .await + .map(|r| r.0) + .map_err(Into::into) }) }) } diff --git a/node/parallel/src/evm_rpc.rs b/node/parallel/src/evm_rpc.rs deleted file mode 100644 index 734ad495a..000000000 --- a/node/parallel/src/evm_rpc.rs +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2021 Parallel Finance Developer. -// This file is part of Parallel Finance. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![allow(dead_code, unused)] - -use fc_db::DatabaseSource; -use fc_rpc::{ - Eth, EthApiServer, EthBlockDataCacheTask, EthFilter, EthFilterApiServer, EthPubSub, - EthPubSubApiServer, Net, NetApiServer, OverrideHandle, RuntimeApiStorageOverride, - SchemaV1Override, SchemaV2Override, SchemaV3Override, StorageOverride, Web3, Web3ApiServer, -}; -use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; -use fp_storage::EthereumStorageSchema; -use jsonrpsee::RpcModule; -use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; -use primitives::*; -use sc_client_api::{AuxStore, Backend, BlockchainEvents, StateBackend, StorageProvider}; -use sc_network::NetworkService; -pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; -use sc_service::{ - error::Error as ServiceError, BasePath, ChainSpec, Configuration, PartialComponents, - TFullBackend, TFullClient, TaskManager, -}; -use sc_transaction_pool::{ChainApi, Pool}; -use sc_transaction_pool_api::TransactionPool; -use sp_api::ProvideRuntimeApi; -use sp_block_builder::BlockBuilder; -use sp_blockchain::{ - Backend as BlockchainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, -}; -use sp_runtime::traits::BlakeTwo256; -use std::collections::BTreeMap; -use std::sync::Arc; -use substrate_frame_rpc_system::{System, SystemApiServer}; - -use orml_oracle_rpc::{Oracle, OracleApiServer}; -use pallet_loans_rpc::{Loans, LoansApiServer}; -use pallet_router_rpc::{Router, RouterApiServer}; - -pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf { - let config_dir = config - .base_path - .as_ref() - .map(|base_path| base_path.config_dir(config.chain_spec.id())) - .unwrap_or_else(|| { - BasePath::from_project("", "", "parallel").config_dir(config.chain_spec.id()) - }); - config_dir.join("frontier").join(path) -} - -pub fn open_frontier_backend( - client: Arc, - config: &Configuration, -) -> Result>, String> -where - C: sp_blockchain::HeaderBackend, -{ - Ok(Arc::new(fc_db::Backend::::new( - client, - &fc_db::DatabaseSettings { - source: match config.database { - DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { - path: frontier_database_dir(config, "db"), - cache_size: 0, - }, - DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { - path: frontier_database_dir(config, "paritydb"), - }, - DatabaseSource::Auto { .. } => DatabaseSource::Auto { - rocksdb_path: frontier_database_dir(config, "db"), - paritydb_path: frontier_database_dir(config, "paritydb"), - cache_size: 0, - }, - _ => { - return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string()) - } - }, - }, - )?)) -} - -pub fn overrides_handle(client: Arc) -> Arc> -where - C: ProvideRuntimeApi + StorageProvider + AuxStore, - C: HeaderBackend + HeaderMetadata, - C: Send + Sync + 'static, - C::Api: fp_rpc::EthereumRuntimeRPCApi, - BE: Backend + 'static, - BE::State: StateBackend, -{ - let mut overrides_map = BTreeMap::new(); - overrides_map.insert( - EthereumStorageSchema::V1, - Box::new(SchemaV1Override::new(client.clone())) - as Box + Send + Sync>, - ); - overrides_map.insert( - EthereumStorageSchema::V2, - Box::new(SchemaV2Override::new(client.clone())) - as Box + Send + Sync>, - ); - overrides_map.insert( - EthereumStorageSchema::V3, - Box::new(SchemaV3Override::new(client.clone())) - as Box + Send + Sync>, - ); - - Arc::new(OverrideHandle { - schemas: overrides_map, - fallback: Box::new(RuntimeApiStorageOverride::new(client)), - }) -} - -/// Full client dependencies -pub struct FullDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// Graph pool instance. - pub graph: Arc>, - /// Network service - pub network: Arc>, - /// Whether to deny unsafe calls - pub deny_unsafe: DenyUnsafe, - /// The Node authority flag - pub is_authority: bool, - /// Frontier Backend. - pub frontier_backend: Arc>, - /// EthFilterApi pool. - pub filter_pool: FilterPool, - /// Maximum fee history cache size. - pub fee_history_limit: u64, - /// Fee history cache. - pub fee_history_cache: FeeHistoryCache, - /// Ethereum data access overrides. - pub overrides: Arc>, - /// Cache for Ethereum block data. - pub block_data_cache: Arc>, -} - -/// Instantiate all RPC extensions. -pub fn create_full( - deps: FullDeps, - subscription_task_executor: SubscriptionTaskExecutor, -) -> Result, Box> -where - C: ProvideRuntimeApi - + HeaderBackend - + AuxStore - + StorageProvider - + HeaderMetadata - + BlockchainEvents - + Send - + Sync - + 'static, - C: sc_client_api::BlockBackend, - C::Api: substrate_frame_rpc_system::AccountNonceApi - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + BlockBuilder - + orml_oracle_rpc::OracleRuntimeApi - + pallet_loans_rpc::LoansRuntimeApi - + pallet_router_rpc::RouterRuntimeApi - + fp_rpc::ConvertTransactionRuntimeApi - + fp_rpc::EthereumRuntimeRPCApi, - P: TransactionPool + Sync + Send + 'static, - BE: Backend + 'static, - BE::State: StateBackend, - BE::Blockchain: BlockchainBackend, - A: ChainApi + 'static, -{ - let mut io = RpcModule::new(()); - let FullDeps { - client, - pool, - graph, - network, - deny_unsafe, - is_authority, - frontier_backend, - filter_pool, - fee_history_limit, - fee_history_cache, - overrides, - block_data_cache, - } = deps; - - io.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; - io.merge(TransactionPayment::new(client.clone()).into_rpc())?; - - // let no_tx_converter: Option = None; - enum Never {} - impl fp_rpc::ConvertTransaction for Never { - fn convert_transaction(&self, _transaction: pallet_ethereum::Transaction) -> T { - // The Never type is not instantiable, but this method requires the type to be - // instantiated to be called (`&self` parameter), so if the code compiles we have the - // guarantee that this function will never be called. - unreachable!() - } - } - let convert_transaction: Option = None; - - io.merge( - Eth::new( - Arc::clone(&client), - Arc::clone(&pool), - graph.clone(), - convert_transaction, - Arc::clone(&network), - Default::default(), - Arc::clone(&overrides), - Arc::clone(&frontier_backend), - is_authority, - Arc::clone(&block_data_cache), - fee_history_cache, - fee_history_limit, - 1, - ) - .into_rpc(), - )?; - - let max_past_logs: u32 = 10_000; - let max_stored_filters: usize = 500; - io.merge( - EthFilter::new( - client.clone(), - frontier_backend, - filter_pool, - max_stored_filters, - max_past_logs, - block_data_cache, - ) - .into_rpc(), - )?; - - io.merge(Net::new(Arc::clone(&client), network.clone(), true).into_rpc())?; - - io.merge(Web3::new(Arc::clone(&client)).into_rpc())?; - - io.merge( - EthPubSub::new( - pool, - Arc::clone(&client), - network, - subscription_task_executor, - overrides, - ) - .into_rpc(), - )?; - - io.merge(Oracle::new(client.clone()).into_rpc())?; - io.merge(Loans::new(client.clone()).into_rpc())?; - io.merge(Router::new(client.clone()).into_rpc())?; - - Ok(io) -} diff --git a/node/parallel/src/evm_service.rs b/node/parallel/src/evm_service.rs deleted file mode 100644 index 19e36ce89..000000000 --- a/node/parallel/src/evm_service.rs +++ /dev/null @@ -1,1238 +0,0 @@ -// Copyright 2021 Parallel Finance Developer. -// This file is part of Parallel Finance. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![allow(dead_code, unused)] - -use cumulus_client_consensus_common::{ParachainCandidate, ParachainConsensus}; -use cumulus_primitives_core::relay_chain::v2::{Hash as PHash, PersistedValidationData}; -use cumulus_primitives_parachain_inherent::MockValidationDataInherentDataProvider; -use futures::lock::Mutex; -use sc_consensus::{import_queue::Verifier as VerifierT, BlockImportParams}; -use sp_api::ApiExt; -use sp_consensus::CacheKeyId; -use sp_consensus_aura::{sr25519::AuthorityId as AuraId, AuraApi}; -use sp_runtime::{generic::BlockId, traits::Header as HeaderT}; -use std::sync::Arc; - -use cumulus_client_cli::CollatorOptions; -use cumulus_client_consensus_aura::{AuraConsensus, BuildAuraConsensusParams, SlotProportion}; -use cumulus_client_consensus_common::ParachainBlockImport; -use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; -use cumulus_client_network::BlockAnnounceValidator; -use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, -}; -use cumulus_primitives_core::ParaId; -use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; -use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; -use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; -use fc_consensus::FrontierBlockImport; -use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; -use futures::StreamExt; -use polkadot_service::CollatorPair; -use sc_client_api::{BlockchainEvents, ExecutorProvider}; -use sc_consensus::import_queue::BasicQueue; -use sc_consensus_manual_seal::{self as manual_seal}; -use sc_executor::NativeElseWasmExecutor; -use sc_network::NetworkService; -use sc_network_common::service::NetworkBlock; -use sc_service::error::Error as ServiceError; -use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; -use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; -use sp_api::ConstructRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_keystore::SyncCryptoStorePtr; -use sp_runtime::traits::BlakeTwo256; -use std::{collections::BTreeMap, time::Duration}; -use substrate_prometheus_endpoint::Registry; - -use primitives::*; - -pub struct VanillaExecutor; -impl sc_executor::NativeExecutionDispatch for VanillaExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - vanilla_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - vanilla_runtime::native_version() - } -} - -pub type FullBackend = sc_service::TFullBackend; -pub type FullClient = - sc_service::TFullClient>; -pub type FullSelectChain = sc_consensus::LongestChain; - -pub enum BuildOnAccess { - Uninitialized(Option R + Send + Sync>>), - Initialized(R), -} - -impl BuildOnAccess { - fn get_mut(&mut self) -> &mut R { - loop { - match self { - Self::Uninitialized(f) => { - *self = Self::Initialized((f.take().unwrap())()); - } - Self::Initialized(ref mut r) => return r, - } - } - } -} - -pub struct WaitForAuraConsensus { - pub client: Arc, - pub aura_consensus: Arc>>>>, - pub relay_chain_consensus: Arc>>>, -} - -impl Clone for WaitForAuraConsensus { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - aura_consensus: self.aura_consensus.clone(), - relay_chain_consensus: self.relay_chain_consensus.clone(), - } - } -} - -#[async_trait::async_trait] -impl ParachainConsensus for WaitForAuraConsensus -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, -{ - async fn produce_candidate( - &mut self, - parent: &Header, - relay_parent: PHash, - validation_data: &PersistedValidationData, - ) -> Option> { - let block_id = BlockId::hash(parent.hash()); - if self - .client - .runtime_api() - .has_api::>(&block_id) - .unwrap_or(false) - { - self.aura_consensus - .lock() - .await - .get_mut() - .produce_candidate(parent, relay_parent, validation_data) - .await - } else { - self.relay_chain_consensus - .lock() - .await - .produce_candidate(parent, relay_parent, validation_data) - .await - } - } -} - -pub struct Verifier { - pub client: Arc, - pub aura_verifier: BuildOnAccess>>, - pub relay_chain_verifier: Box>, -} - -#[async_trait::async_trait] -impl VerifierT for Verifier -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, -{ - async fn verify( - &mut self, - block_import: BlockImportParams, - ) -> Result< - ( - BlockImportParams, - Option)>>, - ), - String, - > { - let block_id = BlockId::hash(*block_import.header.parent_hash()); - - if self - .client - .runtime_api() - .has_api::>(&block_id) - .unwrap_or(false) - { - self.aura_verifier.get_mut().verify(block_import).await - } else { - self.relay_chain_verifier.verify(block_import).await - } - } -} - -/// Vanilla network runtime executor. -pub mod vanilla { - pub use vanilla_runtime::RuntimeApi; - - /// vanilla runtime executor. - pub struct Executor; - impl sc_executor::NativeExecutionDispatch for Executor { - #[cfg(not(feature = "runtime-benchmarks"))] - type ExtendHostFunctions = (); - - #[cfg(feature = "runtime-benchmarks")] - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - vanilla_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - vanilla_runtime::native_version() - } - } -} - -pub struct KerriaExecutor; -impl sc_executor::NativeExecutionDispatch for KerriaExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - kerria_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - kerria_runtime::native_version() - } -} - -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -pub fn new_partial( - config: &Configuration, - build_import_queue: BIQ, -) -> Result< - PartialComponents< - TFullClient>, - TFullBackend, - (), - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_transaction_pool::FullPool< - Block, - TFullClient>, - >, - ( - Option, - Option, - Arc>, - ), - >, - sc_service::Error, -> -where - RuntimeApi: ConstructRuntimeApi>> - + Send - + Sync - + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi - + frame_system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + fp_rpc::EthereumRuntimeRPCApi - + orml_oracle_rpc::OracleRuntimeApi - + pallet_loans_rpc::LoansRuntimeApi - + pallet_router_rpc::RouterRuntimeApi, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: sc_executor::NativeExecutionDispatch + 'static, - BIQ: FnOnce( - Arc>>, - FrontierBlockImport< - Block, - Arc>>, - TFullClient>, - >, - &Configuration, - Option, - &TaskManager, - ) -> Result< - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_service::Error, - >, -{ - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let executor = sc_executor::NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let frontier_backend = crate::evm_rpc::open_frontier_backend(client.clone(), config)?; - let frontier_block_import = - FrontierBlockImport::new(client.clone(), client.clone(), frontier_backend.clone()); - - let import_queue = build_import_queue( - client.clone(), - frontier_block_import, - config, - telemetry.as_ref().map(|telemetry| telemetry.handle()), - &task_manager, - )?; - - let params = PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (telemetry, telemetry_worker_handle, frontier_backend), - }; - - Ok(params) -} - -async fn build_relay_chain_interface( - polkadot_config: Configuration, - parachain_config: &Configuration, - telemetry_worker_handle: Option, - task_manager: &mut TaskManager, - collator_options: CollatorOptions, -) -> RelayChainResult<( - Arc<(dyn RelayChainInterface + 'static)>, - Option, -)> { - match collator_options.relay_chain_rpc_url { - Some(relay_chain_url) => { - build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await - } - None => build_inprocess_relay_chain( - polkadot_config, - parachain_config, - telemetry_worker_handle, - task_manager, - None, - ), - } -} - -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - id: ParaId, - build_import_queue: BIQ, - build_consensus: BIC, -) -> sc_service::error::Result<( - TaskManager, - Arc>>, -)> -where - RuntimeApi: ConstructRuntimeApi>> - + Send - + Sync - + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + substrate_frame_rpc_system::AccountNonceApi - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + sp_consensus_aura::AuraApi - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi - + cumulus_primitives_core::CollectCollationInfo - + orml_oracle_rpc::OracleRuntimeApi - + pallet_loans_rpc::LoansRuntimeApi - + pallet_router_rpc::RouterRuntimeApi, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: sc_executor::NativeExecutionDispatch + 'static, - BIQ: FnOnce( - Arc>>, - FrontierBlockImport< - Block, - Arc>>, - TFullClient>, - >, - &Configuration, - Option, - &TaskManager, - ) -> Result< - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_service::Error, - >, - BIC: FnOnce( - Arc>>, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc< - sc_transaction_pool::FullPool< - Block, - TFullClient>, - >, - >, - Arc>, - SyncCryptoStorePtr, - bool, - ) -> Result>, sc_service::Error>, -{ - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (mut telemetry, telemetry_worker_handle, frontier_backend) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - ) - .await - .map_err(|e| match e { - RelayChainError::ServiceError(polkadot_service::Error::Sub(x)) => x, - s => format!("{}", s).into(), - })?; - let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface.clone(), id); - - let force_authoring = parachain_config.force_authoring; - let is_authority = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue); - let (network, system_rpc_tx, tx_handler_controller, start_network) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: ¶chain_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue: import_queue.clone(), - block_announce_validator_builder: Some(Box::new(|_| { - Box::new(block_announce_validator) - })), - warp_sync: None, - })?; - - let filter_pool: FilterPool = Arc::new(std::sync::Mutex::new(BTreeMap::new())); - let fee_history_cache: FeeHistoryCache = Arc::new(std::sync::Mutex::new(BTreeMap::new())); - let overrides = crate::evm_rpc::overrides_handle(client.clone()); - - // Frontier offchain DB task. Essential. - // Maps emulated ethereum data to substrate native data. - task_manager.spawn_essential_handle().spawn( - "frontier-mapping-sync-worker", - Some("frontier"), - fc_mapping_sync::MappingSyncWorker::new( - client.import_notification_stream(), - Duration::new(6, 0), - client.clone(), - backend.clone(), - frontier_backend.clone(), - 3, - 0, - fc_mapping_sync::SyncStrategy::Parachain, - ) - .for_each(|()| futures::future::ready(())), - ); - - // Frontier `EthFilterApi` maintenance. Manages the pool of user-created Filters. - // Each filter is allowed to stay in the pool for 100 blocks. - const FILTER_RETAIN_THRESHOLD: u64 = 100; - task_manager.spawn_essential_handle().spawn( - "frontier-filter-pool", - Some("frontier"), - fc_rpc::EthTask::filter_pool_task( - client.clone(), - filter_pool.clone(), - FILTER_RETAIN_THRESHOLD, - ), - ); - - const FEE_HISTORY_LIMIT: u64 = 2048; - task_manager.spawn_essential_handle().spawn( - "frontier-fee-history", - Some("frontier"), - fc_rpc::EthTask::fee_history_task( - client.clone(), - overrides.clone(), - fee_history_cache.clone(), - FEE_HISTORY_LIMIT, - ), - ); - - let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( - task_manager.spawn_handle(), - overrides.clone(), - 50, - 50, - prometheus_registry.clone(), - )); - - let rpc_extensions_builder = { - let client = client.clone(); - let network = network.clone(); - let transaction_pool = transaction_pool.clone(); - let frontier_backend = frontier_backend.clone(); - let overrides = overrides.clone(); - let fee_history_cache = fee_history_cache.clone(); - let block_data_cache = block_data_cache.clone(); - - Box::new(move |deny_unsafe, subscription| { - let deps = crate::evm_rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - graph: transaction_pool.pool().clone(), - network: network.clone(), - is_authority, - deny_unsafe, - frontier_backend: frontier_backend.clone(), - filter_pool: filter_pool.clone(), - fee_history_limit: FEE_HISTORY_LIMIT, - fee_history_cache: fee_history_cache.clone(), - block_data_cache: block_data_cache.clone(), - overrides: overrides.clone(), - }; - - crate::evm_rpc::create_full(deps, subscription).map_err(Into::into) - }) - }; - - // Spawn basic services. - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder: Box::new(rpc_extensions_builder), - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.sync_keystore(), - backend: backend.clone(), - network: network.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - let announce_block = { - let network = network.clone(); - Arc::new(move |hash, data| network.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - if is_authority { - let parachain_consensus = build_consensus( - client.clone(), - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - network, - params.keystore_container.sync_keystore(), - force_authoring, - )?; - - let spawner = task_manager.spawn_handle(); - - let params = StartCollatorParams { - para_id: id, - block_status: client.clone(), - announce_block, - client: client.clone(), - task_manager: &mut task_manager, - relay_chain_interface: relay_chain_interface.clone(), - spawner, - parachain_consensus, - import_queue, - collator_key: collator_key.expect("Command line arguments do not allow this. qed"), - relay_chain_slot_duration, - }; - - start_collator(params).await?; - } else { - let params = StartFullNodeParams { - client: client.clone(), - announce_block, - task_manager: &mut task_manager, - para_id: id, - relay_chain_interface, - relay_chain_slot_duration, - import_queue, - }; - - start_full_node(params)?; - } - - start_network.start_network(); - - Ok((task_manager, client)) -} - -/// Build the import queue. -#[allow(dead_code, unused)] -pub fn build_import_queue( - client: Arc>>, - block_import: FrontierBlockImport< - Block, - Arc>>, - TFullClient>, - >, - config: &Configuration, - telemetry_handle: Option, - task_manager: &TaskManager, -) -> Result< - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_service::Error, -> -where - RuntimeApi: ConstructRuntimeApi>> - + Send - + Sync - + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + frame_system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + fp_rpc::EthereumRuntimeRPCApi - + sp_consensus_aura::AuraApi, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: sc_executor::NativeExecutionDispatch + 'static, -{ - let client2 = client.clone(); - - let aura_verifier = move || { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client2).unwrap(); - - Box::new(cumulus_client_consensus_aura::build_verifier::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - >( - cumulus_client_consensus_aura::BuildVerifierParams { - client: client2.clone(), - create_inherent_data_providers: move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *time, - slot_duration, - ); - - Ok((slot, time)) - }, - telemetry: telemetry_handle, - }, - )) as Box<_> - }; - - let relay_chain_verifier = Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { - Ok(()) - })) as Box<_>; - - let verifier = Verifier { - client, - relay_chain_verifier, - aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), - }; - - let registry = config.prometheus_registry(); - let spawner = task_manager.spawn_essential_handle(); - - Ok(BasicQueue::new( - verifier, - Box::new(ParachainBlockImport::new(block_import)), - None, - &spawner, - registry, - )) -} - -/// Start a parachain node for evm node(only for vanilla now) -pub async fn start_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - id: ParaId, -) -> sc_service::error::Result<( - TaskManager, - Arc>>, -)> -where - RuntimeApi: ConstructRuntimeApi>> - + Send - + Sync - + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + substrate_frame_rpc_system::AccountNonceApi - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + sp_consensus_aura::AuraApi - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi - + cumulus_primitives_core::CollectCollationInfo - + orml_oracle_rpc::OracleRuntimeApi - + pallet_loans_rpc::LoansRuntimeApi - + pallet_router_rpc::RouterRuntimeApi, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: sc_executor::NativeExecutionDispatch + 'static, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - id, - |client, - block_import, - config, - telemetry, - task_manager| { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *time, - slot_duration, - ); - - Ok((slot, time)) - }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) - }, - |client, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - force_authoring| { - let spawn_handle = task_manager.spawn_handle(); - - let slot_duration = - cumulus_client_consensus_aura::slot_duration(&*client).unwrap(); - - let proposer_factory = - sc_basic_authorship::ProposerFactory::with_proof_recording( - spawn_handle, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let relay_chain_for_aura = relay_chain_interface.clone(); - - Ok(AuraConsensus::build::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - _, - >(BuildAuraConsensusParams { - proposer_factory, - create_inherent_data_providers: - move |_, (relay_parent, validation_data)| { - let relay_chain_for_aura = relay_chain_for_aura.clone(); - async move { - let parachain_inherent = - cumulus_primitives_parachain_inherent::ParachainInherentData::create_at( - relay_parent, - &relay_chain_for_aura, - &validation_data, - id, - ).await; - let time = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *time, - slot_duration, - ); - - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok((slot, time, parachain_inherent)) - } - }, - block_import: client.clone(), - para_client: client, - backoff_authoring_blocks: Option::<()>::None, - sync_oracle, - keystore, - force_authoring, - slot_duration, - // We got around 500ms for proposing - block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32), - // And a maximum of 750ms if slots are skipped - max_block_proposal_slot_portion: Some(SlotProportion::new(1f32 / 16f32)), - telemetry, - }) - ) - }).await -} - -/// Build a partial chain component config -pub fn new_dev_partial( - config: &Configuration, -) -> Result< - sc_service::PartialComponents< - FullClient, - FullBackend, - FullSelectChain, - sc_consensus::DefaultImportQueue>, - sc_transaction_pool::FullPool>, - ( - FrontierBlockImport< - Block, - Arc>, - FullClient, - >, - Option, - Arc>, - ), - >, - ServiceError, -> -where - RuntimeApi: - ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi - + frame_system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi - + orml_oracle_rpc::OracleRuntimeApi - + pallet_loans_rpc::LoansRuntimeApi - + pallet_router_rpc::RouterRuntimeApi, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: sc_executor::NativeExecutionDispatch + 'static, -{ - if config.keystore_remote.is_some() { - return Err(ServiceError::Other( - "Remote Keystores are not supported.".to_string(), - )); - } - - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - let executor = sc_executor::NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - let frontier_backend = crate::evm_rpc::open_frontier_backend(client.clone(), config)?; - - let frontier_block_import = - FrontierBlockImport::new(client.clone(), client.clone(), frontier_backend.clone()); - - let import_queue = sc_consensus_manual_seal::import_queue( - Box::new(frontier_block_import.clone()), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - ); - - Ok(sc_service::PartialComponents { - client, - backend, - task_manager, - import_queue, - keystore_container, - select_chain, - transaction_pool, - other: (frontier_block_import, telemetry, frontier_backend), - }) -} - -/// Builds a new service. -pub fn start_dev_node( - config: Configuration, -) -> Result -where - RuntimeApi: ConstructRuntimeApi>> - + Send - + Sync - + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi - + frame_system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi - + orml_oracle_rpc::OracleRuntimeApi - + pallet_loans_rpc::LoansRuntimeApi - + pallet_router_rpc::RouterRuntimeApi, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: sc_executor::NativeExecutionDispatch + 'static, -{ - let sc_service::PartialComponents { - client, - backend, - mut task_manager, - import_queue, - keystore_container, - select_chain, - transaction_pool, - other: (block_import, mut telemetry, frontier_backend), - } = new_dev_partial::(&config)?; - - let (network, system_rpc_tx, tx_handler_controller, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let filter_pool: FilterPool = Arc::new(std::sync::Mutex::new(BTreeMap::new())); - let fee_history_cache: FeeHistoryCache = Arc::new(std::sync::Mutex::new(BTreeMap::new())); - let overrides = crate::evm_rpc::overrides_handle(client.clone()); - - // Frontier offchain DB task. Essential. - // Maps emulated ethereum data to substrate native data. - task_manager.spawn_essential_handle().spawn( - "frontier-mapping-sync-worker", - Some("frontier"), - fc_mapping_sync::MappingSyncWorker::new( - client.import_notification_stream(), - Duration::new(6, 0), - client.clone(), - backend.clone(), - frontier_backend.clone(), - 3, - 0, - fc_mapping_sync::SyncStrategy::Parachain, - ) - .for_each(|()| futures::future::ready(())), - ); - - // Frontier `EthFilterApi` maintenance. Manages the pool of user-created Filters. - // Each filter is allowed to stay in the pool for 100 blocks. - const FILTER_RETAIN_THRESHOLD: u64 = 100; - task_manager.spawn_essential_handle().spawn( - "frontier-filter-pool", - Some("frontier"), - fc_rpc::EthTask::filter_pool_task( - client.clone(), - filter_pool.clone(), - FILTER_RETAIN_THRESHOLD, - ), - ); - - const FEE_HISTORY_LIMIT: u64 = 2048; - task_manager.spawn_essential_handle().spawn( - "frontier-fee-history", - Some("frontier"), - fc_rpc::EthTask::fee_history_task( - client.clone(), - overrides.clone(), - fee_history_cache.clone(), - FEE_HISTORY_LIMIT, - ), - ); - - let role = config.role.clone(); - let prometheus_registry = config.prometheus_registry().cloned(); - let is_authority = config.role.is_authority(); - - let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( - task_manager.spawn_handle(), - overrides.clone(), - 50, - 50, - prometheus_registry.clone(), - )); - - let rpc_extensions_builder = { - let client = client.clone(); - let network = network.clone(); - let transaction_pool = transaction_pool.clone(); - - Box::new(move |deny_unsafe, subscription| { - let deps = crate::evm_rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - graph: transaction_pool.pool().clone(), - network: network.clone(), - is_authority, - deny_unsafe, - frontier_backend: frontier_backend.clone(), - filter_pool: filter_pool.clone(), - fee_history_limit: FEE_HISTORY_LIMIT, - fee_history_cache: fee_history_cache.clone(), - block_data_cache: block_data_cache.clone(), - overrides: overrides.clone(), - }; - - let io = crate::evm_rpc::create_full(deps, subscription) - .map_err::(Into::into)?; - - Ok(io) - }) - }; - - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_builder: rpc_extensions_builder, - backend, - system_rpc_tx, - config, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if role.is_authority() { - let env = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - prometheus_registry.as_ref(), - telemetry.as_ref().map(|x| x.handle()), - ); - - let target_gas_price = 1; - let commands_stream = transaction_pool - .pool() - .clone() - .validated_pool() - .import_notification_stream() - .map( - |_| sc_consensus_manual_seal::rpc::EngineCommand::SealNewBlock { - create_empty: true, - finalize: true, - parent_hash: None, - sender: None, - }, - ); - let client_for_cidp = client.clone(); - - // Background authorship future - let authorship_future = manual_seal::run_manual_seal(manual_seal::ManualSealParams { - block_import, - env, - client: client.clone(), - pool: transaction_pool.clone(), - commands_stream, - select_chain, - consensus_data_provider: None, - create_inherent_data_providers: move |block: Hash, _| { - let current_para_block = client_for_cidp - .number(block) - .expect("Header lookup should succeed") - .expect("Header passed in as parent should be present in backend."); - - async move { - let dynamic_fee = - fp_dynamic_fee::InherentDataProvider(sp_core::U256::from(target_gas_price)); - - let mocked_parachain = MockValidationDataInherentDataProvider { - current_para_block, - relay_offset: 1000, - relay_blocks_per_para_block: 2, - para_blocks_per_relay_epoch: 10, - relay_randomness_config: (), - xcm_config: Default::default(), - raw_downward_messages: vec![], - raw_horizontal_messages: vec![], - }; - - Ok(( - sp_timestamp::InherentDataProvider::from_system_time(), - mocked_parachain, - dynamic_fee, - )) - } - }, - }); - // we spawn the future on a background thread managed by service. - task_manager.spawn_essential_handle().spawn_blocking( - "instant-seal", - None, - authorship_future, - ); - } - log::info!("Manual Seal Ready"); - - network_starter.start_network(); - Ok(task_manager) -} diff --git a/node/parallel/src/main.rs b/node/parallel/src/main.rs index 108b7bdf5..393e32f05 100644 --- a/node/parallel/src/main.rs +++ b/node/parallel/src/main.rs @@ -22,11 +22,6 @@ mod client; mod command; mod rpc; -#[cfg(feature = "with-evm-runtime")] -mod evm_rpc; -#[cfg(feature = "with-evm-runtime")] -mod evm_service; - fn main() -> sc_cli::Result<()> { command::run() } diff --git a/node/parallel/src/rpc.rs b/node/parallel/src/rpc.rs index b6b8154c4..734ad495a 100644 --- a/node/parallel/src/rpc.rs +++ b/node/parallel/src/rpc.rs @@ -1,67 +1,267 @@ -#![warn(missing_docs)] -#![allow(dead_code, unused)] +// Copyright 2021 Parallel Finance Developer. +// This file is part of Parallel Finance. -use std::sync::Arc; +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(dead_code, unused)] -use primitives::{AccountId, Balance, Block, CurrencyId, DataProviderId, Index, TimeStampedPrice}; -pub use sc_rpc_api::DenyUnsafe; +use fc_db::DatabaseSource; +use fc_rpc::{ + Eth, EthApiServer, EthBlockDataCacheTask, EthFilter, EthFilterApiServer, EthPubSub, + EthPubSubApiServer, Net, NetApiServer, OverrideHandle, RuntimeApiStorageOverride, + SchemaV1Override, SchemaV2Override, SchemaV3Override, StorageOverride, Web3, Web3ApiServer, +}; +use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; +use fp_storage::EthereumStorageSchema; +use jsonrpsee::RpcModule; +use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; +use primitives::*; +use sc_client_api::{AuxStore, Backend, BlockchainEvents, StateBackend, StorageProvider}; +use sc_network::NetworkService; +pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; +use sc_service::{ + error::Error as ServiceError, BasePath, ChainSpec, Configuration, PartialComponents, + TFullBackend, TFullClient, TaskManager, +}; +use sc_transaction_pool::{ChainApi, Pool}; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; - -/// substrate rpc -use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; +use sp_blockchain::{ + Backend as BlockchainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, +}; +use sp_runtime::traits::BlakeTwo256; +use std::collections::BTreeMap; +use std::sync::Arc; use substrate_frame_rpc_system::{System, SystemApiServer}; -/// orml rpc use orml_oracle_rpc::{Oracle, OracleApiServer}; - -/// parallel rpc use pallet_loans_rpc::{Loans, LoansApiServer}; use pallet_router_rpc::{Router, RouterApiServer}; -/// A type representing all RPC extensions. -pub type RpcExtension = jsonrpsee::RpcModule<()>; +pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf { + let config_dir = config + .base_path + .as_ref() + .map(|base_path| base_path.config_dir(config.chain_spec.id())) + .unwrap_or_else(|| { + BasePath::from_project("", "", "parallel").config_dir(config.chain_spec.id()) + }); + config_dir.join("frontier").join(path) +} + +pub fn open_frontier_backend( + client: Arc, + config: &Configuration, +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend, +{ + Ok(Arc::new(fc_db::Backend::::new( + client, + &fc_db::DatabaseSettings { + source: match config.database { + DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { + path: frontier_database_dir(config, "db"), + cache_size: 0, + }, + DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { + path: frontier_database_dir(config, "paritydb"), + }, + DatabaseSource::Auto { .. } => DatabaseSource::Auto { + rocksdb_path: frontier_database_dir(config, "db"), + paritydb_path: frontier_database_dir(config, "paritydb"), + cache_size: 0, + }, + _ => { + return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string()) + } + }, + }, + )?)) +} + +pub fn overrides_handle(client: Arc) -> Arc> +where + C: ProvideRuntimeApi + StorageProvider + AuxStore, + C: HeaderBackend + HeaderMetadata, + C: Send + Sync + 'static, + C::Api: fp_rpc::EthereumRuntimeRPCApi, + BE: Backend + 'static, + BE::State: StateBackend, +{ + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V1, + Box::new(SchemaV1Override::new(client.clone())) + as Box + Send + Sync>, + ); + overrides_map.insert( + EthereumStorageSchema::V2, + Box::new(SchemaV2Override::new(client.clone())) + as Box + Send + Sync>, + ); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) + as Box + Send + Sync>, + ); + + Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(RuntimeApiStorageOverride::new(client)), + }) +} -/// Full client dependencies. -pub struct FullDeps { +/// Full client dependencies +pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. pub pool: Arc

, + /// Graph pool instance. + pub graph: Arc>, + /// Network service + pub network: Arc>, /// Whether to deny unsafe calls pub deny_unsafe: DenyUnsafe, + /// The Node authority flag + pub is_authority: bool, + /// Frontier Backend. + pub frontier_backend: Arc>, + /// EthFilterApi pool. + pub filter_pool: FilterPool, + /// Maximum fee history cache size. + pub fee_history_limit: u64, + /// Fee history cache. + pub fee_history_cache: FeeHistoryCache, + /// Ethereum data access overrides. + pub overrides: Arc>, + /// Cache for Ethereum block data. + pub block_data_cache: Arc>, } -/// Instantiate all full RPC extensions. -pub fn create_full( - deps: FullDeps, -) -> Result> +/// Instantiate all RPC extensions. +pub fn create_full( + deps: FullDeps, + subscription_task_executor: SubscriptionTaskExecutor, +) -> Result, Box> where - C: ProvideRuntimeApi, - C: HeaderBackend + HeaderMetadata + 'static, - C: Send + Sync + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: orml_oracle_rpc::OracleRuntimeApi, - C::Api: pallet_loans_rpc::LoansRuntimeApi, - C::Api: pallet_router_rpc::RouterRuntimeApi, - C::Api: BlockBuilder, - P: TransactionPool + 'static, + C: ProvideRuntimeApi + + HeaderBackend + + AuxStore + + StorageProvider + + HeaderMetadata + + BlockchainEvents + + Send + + Sync + + 'static, + C: sc_client_api::BlockBackend, + C::Api: substrate_frame_rpc_system::AccountNonceApi + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + BlockBuilder + + orml_oracle_rpc::OracleRuntimeApi + + pallet_loans_rpc::LoansRuntimeApi + + pallet_router_rpc::RouterRuntimeApi + + fp_rpc::ConvertTransactionRuntimeApi + + fp_rpc::EthereumRuntimeRPCApi, + P: TransactionPool + Sync + Send + 'static, + BE: Backend + 'static, + BE::State: StateBackend, + BE::Blockchain: BlockchainBackend, + A: ChainApi + 'static, { - let mut module = RpcExtension::new(()); + let mut io = RpcModule::new(()); let FullDeps { client, pool, + graph, + network, deny_unsafe, + is_authority, + frontier_backend, + filter_pool, + fee_history_limit, + fee_history_cache, + overrides, + block_data_cache, } = deps; - module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client.clone()).into_rpc())?; - module.merge(Oracle::new(client.clone()).into_rpc())?; - module.merge(Loans::new(client.clone()).into_rpc())?; - module.merge(Router::new(client.clone()).into_rpc())?; + io.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + io.merge(TransactionPayment::new(client.clone()).into_rpc())?; + + // let no_tx_converter: Option = None; + enum Never {} + impl fp_rpc::ConvertTransaction for Never { + fn convert_transaction(&self, _transaction: pallet_ethereum::Transaction) -> T { + // The Never type is not instantiable, but this method requires the type to be + // instantiated to be called (`&self` parameter), so if the code compiles we have the + // guarantee that this function will never be called. + unreachable!() + } + } + let convert_transaction: Option = None; + + io.merge( + Eth::new( + Arc::clone(&client), + Arc::clone(&pool), + graph.clone(), + convert_transaction, + Arc::clone(&network), + Default::default(), + Arc::clone(&overrides), + Arc::clone(&frontier_backend), + is_authority, + Arc::clone(&block_data_cache), + fee_history_cache, + fee_history_limit, + 1, + ) + .into_rpc(), + )?; + + let max_past_logs: u32 = 10_000; + let max_stored_filters: usize = 500; + io.merge( + EthFilter::new( + client.clone(), + frontier_backend, + filter_pool, + max_stored_filters, + max_past_logs, + block_data_cache, + ) + .into_rpc(), + )?; + + io.merge(Net::new(Arc::clone(&client), network.clone(), true).into_rpc())?; + + io.merge(Web3::new(Arc::clone(&client)).into_rpc())?; + + io.merge( + EthPubSub::new( + pool, + Arc::clone(&client), + network, + subscription_task_executor, + overrides, + ) + .into_rpc(), + )?; + + io.merge(Oracle::new(client.clone()).into_rpc())?; + io.merge(Loans::new(client.clone()).into_rpc())?; + io.merge(Router::new(client.clone()).into_rpc())?; - Ok(module) + Ok(io) } diff --git a/node/parallel/src/service.rs b/node/parallel/src/service.rs index 2b35b71ab..d16399c25 100644 --- a/node/parallel/src/service.rs +++ b/node/parallel/src/service.rs @@ -17,22 +17,31 @@ use cumulus_client_network::BlockAnnounceValidator; use cumulus_client_service::{ prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, }; +use cumulus_primitives_parachain_inherent::MockValidationDataInherentDataProvider; use polkadot_service::{CollatorPair, ConstructRuntimeApi}; +use sc_consensus_manual_seal::{self as manual_seal}; use sc_executor::NativeElseWasmExecutor; use sc_network_common::service::NetworkBlock; +use sc_service::error::Error as ServiceError; use sc_service::{Configuration, PartialComponents, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; use primitives::*; -use std::{sync::Arc, time::Duration}; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; use cumulus_client_cli::CollatorOptions; use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; +use fc_consensus::FrontierBlockImport; +use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; +use futures::StreamExt; +use sc_client_api::BlockchainEvents; +use sp_blockchain::HeaderBackend; + pub use sc_executor::NativeExecutionDispatch; // Native executor instance. @@ -62,9 +71,36 @@ impl sc_executor::NativeExecutionDispatch for HeikoExecutor { } } +pub struct VanillaExecutor; +impl sc_executor::NativeExecutionDispatch for VanillaExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + vanilla_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + vanilla_runtime::native_version() + } +} + +pub struct KerriaExecutor; +impl sc_executor::NativeExecutionDispatch for KerriaExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + kerria_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + kerria_runtime::native_version() + } +} + pub type FullBackend = sc_service::TFullBackend; pub type FullClient = sc_service::TFullClient>; +pub type FullSelectChain = sc_consensus::LongestChain; pub trait IdentifyVariant { fn is_parallel(&self) -> bool; @@ -113,7 +149,11 @@ pub fn new_partial( (), sc_consensus::DefaultImportQueue>, sc_transaction_pool::FullPool>, - (Option, Option), + ( + Option, + Option, + Arc>, + ), >, sc_service::Error, > @@ -167,6 +207,10 @@ where task_manager.spawn_essential_handle(), client.clone(), ); + let frontier_backend = crate::rpc::open_frontier_backend(client.clone(), config)?; + let frontier_block_import = + FrontierBlockImport::new(client.clone(), client.clone(), frontier_backend.clone()); + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let import_queue = cumulus_client_consensus_aura::import_queue::< @@ -177,7 +221,7 @@ where _, _, >(cumulus_client_consensus_aura::ImportQueueParams { - block_import: client.clone(), + block_import: frontier_block_import, client: client.clone(), create_inherent_data_providers: move |_, _| async move { let time = sp_timestamp::InherentDataProvider::from_system_time(); @@ -203,7 +247,7 @@ where task_manager, transaction_pool, select_chain: (), - other: (telemetry, telemetry_worker_handle), + other: (telemetry, telemetry_worker_handle, frontier_backend), }; Ok(params) @@ -255,7 +299,7 @@ where let parachain_config = prepare_node_config(parachain_config); let params = new_partial(¶chain_config)?; - let (mut telemetry, telemetry_worker_handle) = params.other; + let (mut telemetry, telemetry_worker_handle, frontier_backend) = params.other; let client = params.client.clone(); let backend = params.backend.clone(); @@ -295,18 +339,87 @@ where warp_sync: None, })?; + let filter_pool: FilterPool = Arc::new(std::sync::Mutex::new(BTreeMap::new())); + let fee_history_cache: FeeHistoryCache = Arc::new(std::sync::Mutex::new(BTreeMap::new())); + let overrides = crate::rpc::overrides_handle(client.clone()); + + // Frontier offchain DB task. Essential. + // Maps emulated ethereum data to substrate native data. + task_manager.spawn_essential_handle().spawn( + "frontier-mapping-sync-worker", + Some("frontier"), + fc_mapping_sync::MappingSyncWorker::new( + client.import_notification_stream(), + Duration::new(6, 0), + client.clone(), + backend.clone(), + frontier_backend.clone(), + 3, + 0, + fc_mapping_sync::SyncStrategy::Parachain, + ) + .for_each(|()| futures::future::ready(())), + ); + + // Frontier `EthFilterApi` maintenance. Manages the pool of user-created Filters. + // Each filter is allowed to stay in the pool for 100 blocks. + const FILTER_RETAIN_THRESHOLD: u64 = 100; + task_manager.spawn_essential_handle().spawn( + "frontier-filter-pool", + Some("frontier"), + fc_rpc::EthTask::filter_pool_task( + client.clone(), + filter_pool.clone(), + FILTER_RETAIN_THRESHOLD, + ), + ); + + const FEE_HISTORY_LIMIT: u64 = 2048; + task_manager.spawn_essential_handle().spawn( + "frontier-fee-history", + Some("frontier"), + fc_rpc::EthTask::fee_history_task( + client.clone(), + overrides.clone(), + fee_history_cache.clone(), + FEE_HISTORY_LIMIT, + ), + ); + + let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( + task_manager.spawn_handle(), + overrides.clone(), + 50, + 50, + prometheus_registry.clone(), + )); + let rpc_builder = { let client = client.clone(); - let pool = transaction_pool.clone(); + let network = network.clone(); + let transaction_pool = transaction_pool.clone(); + let frontier_backend = frontier_backend.clone(); + let overrides = overrides.clone(); + let fee_history_cache = fee_history_cache.clone(); + let block_data_cache = block_data_cache.clone(); - Box::new(move |deny_unsafe, _| { + Box::new(move |deny_unsafe, subscription| { let deps = crate::rpc::FullDeps { client: client.clone(), - pool: pool.clone(), + pool: transaction_pool.clone(), + graph: transaction_pool.pool().clone(), + network: network.clone(), + is_authority: validator, deny_unsafe, + frontier_backend: frontier_backend.clone(), + filter_pool: filter_pool.clone(), + fee_history_limit: FEE_HISTORY_LIMIT, + fee_history_cache: fee_history_cache.clone(), + block_data_cache: block_data_cache.clone(), + overrides: overrides.clone(), }; - crate::rpc::create_full(deps).map_err(Into::into) + crate::rpc::create_full(deps, subscription).map_err(Into::into) }) }; @@ -325,7 +438,7 @@ where keystore: params.keystore_container.sync_keystore(), task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), - rpc_builder: Box::new(rpc_builder), + rpc_builder, backend: backend.clone(), system_rpc_tx, tx_handler_controller, @@ -452,3 +565,320 @@ where { start_node_impl(parachain_config, polkadot_config, collator_options, id).await } + +/// Build a partial chain component config +pub fn new_dev_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue>, + sc_transaction_pool::FullPool>, + ( + FrontierBlockImport< + Block, + Arc>, + FullClient, + >, + Option, + Arc>, + ), + >, + ServiceError, +> +where + RuntimeApi: + ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: crate::client::RuntimeApiCollection< + StateBackend = sc_client_api::StateBackendFor, + >, + Executor: NativeExecutionDispatch + 'static, +{ + if config.keystore_remote.is_some() { + return Err(ServiceError::Other( + "Remote Keystores are not supported.".to_string(), + )); + } + + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + let executor = sc_executor::NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + config.runtime_cache_size, + ); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager + .spawn_handle() + .spawn("telemetry", None, worker.run()); + telemetry + }); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + let frontier_backend = crate::rpc::open_frontier_backend(client.clone(), config)?; + + let frontier_block_import = + FrontierBlockImport::new(client.clone(), client.clone(), frontier_backend.clone()); + + let import_queue = sc_consensus_manual_seal::import_queue( + Box::new(frontier_block_import.clone()), + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ); + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (frontier_block_import, telemetry, frontier_backend), + }) +} + +/// Builds a new service. +pub fn start_dev_node( + config: Configuration, +) -> Result +where + RuntimeApi: + ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: crate::client::RuntimeApiCollection< + StateBackend = sc_client_api::StateBackendFor, + >, + Executor: NativeExecutionDispatch + 'static, +{ + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (block_import, mut telemetry, frontier_backend), + } = new_dev_partial::(&config)?; + + let (network, system_rpc_tx, tx_handler_controller, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync: None, + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + ); + } + + let filter_pool: FilterPool = Arc::new(std::sync::Mutex::new(BTreeMap::new())); + let fee_history_cache: FeeHistoryCache = Arc::new(std::sync::Mutex::new(BTreeMap::new())); + let overrides = crate::rpc::overrides_handle(client.clone()); + + // Frontier offchain DB task. Essential. + // Maps emulated ethereum data to substrate native data. + task_manager.spawn_essential_handle().spawn( + "frontier-mapping-sync-worker", + Some("frontier"), + fc_mapping_sync::MappingSyncWorker::new( + client.import_notification_stream(), + Duration::new(6, 0), + client.clone(), + backend.clone(), + frontier_backend.clone(), + 3, + 0, + fc_mapping_sync::SyncStrategy::Parachain, + ) + .for_each(|()| futures::future::ready(())), + ); + + // Frontier `EthFilterApi` maintenance. Manages the pool of user-created Filters. + // Each filter is allowed to stay in the pool for 100 blocks. + const FILTER_RETAIN_THRESHOLD: u64 = 100; + task_manager.spawn_essential_handle().spawn( + "frontier-filter-pool", + Some("frontier"), + fc_rpc::EthTask::filter_pool_task( + client.clone(), + filter_pool.clone(), + FILTER_RETAIN_THRESHOLD, + ), + ); + + const FEE_HISTORY_LIMIT: u64 = 2048; + task_manager.spawn_essential_handle().spawn( + "frontier-fee-history", + Some("frontier"), + fc_rpc::EthTask::fee_history_task( + client.clone(), + overrides.clone(), + fee_history_cache.clone(), + FEE_HISTORY_LIMIT, + ), + ); + + let role = config.role.clone(); + let prometheus_registry = config.prometheus_registry().cloned(); + let is_authority = config.role.is_authority(); + + let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( + task_manager.spawn_handle(), + overrides.clone(), + 50, + 50, + prometheus_registry.clone(), + )); + + let rpc_extensions_builder = { + let client = client.clone(); + let network = network.clone(); + let transaction_pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, subscription| { + let deps = crate::rpc::FullDeps { + client: client.clone(), + pool: transaction_pool.clone(), + graph: transaction_pool.pool().clone(), + network: network.clone(), + is_authority, + deny_unsafe, + frontier_backend: frontier_backend.clone(), + filter_pool: filter_pool.clone(), + fee_history_limit: FEE_HISTORY_LIMIT, + fee_history_cache: fee_history_cache.clone(), + block_data_cache: block_data_cache.clone(), + overrides: overrides.clone(), + }; + + let io = crate::rpc::create_full(deps, subscription) + .map_err::(Into::into)?; + + Ok(io) + }) + }; + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + config, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if role.is_authority() { + let env = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + let target_gas_price = 1; + let commands_stream = transaction_pool + .pool() + .clone() + .validated_pool() + .import_notification_stream() + .map( + |_| sc_consensus_manual_seal::rpc::EngineCommand::SealNewBlock { + create_empty: true, + finalize: true, + parent_hash: None, + sender: None, + }, + ); + let client_for_cidp = client.clone(); + + // Background authorship future + let authorship_future = manual_seal::run_manual_seal(manual_seal::ManualSealParams { + block_import, + env, + client: client.clone(), + pool: transaction_pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: move |block: Hash, _| { + let current_para_block = client_for_cidp + .number(block) + .expect("Header lookup should succeed") + .expect("Header passed in as parent should be present in backend."); + + async move { + let dynamic_fee = + fp_dynamic_fee::InherentDataProvider(sp_core::U256::from(target_gas_price)); + + let mocked_parachain = MockValidationDataInherentDataProvider { + current_para_block, + relay_offset: 1000, + relay_blocks_per_para_block: 2, + para_blocks_per_relay_epoch: 10, + relay_randomness_config: (), + xcm_config: Default::default(), + raw_downward_messages: vec![], + raw_horizontal_messages: vec![], + }; + + Ok(( + sp_timestamp::InherentDataProvider::from_system_time(), + mocked_parachain, + dynamic_fee, + )) + } + }, + }); + // we spawn the future on a background thread managed by service. + task_manager.spawn_essential_handle().spawn_blocking( + "instant-seal", + None, + authorship_future, + ); + } + log::info!("Manual Seal Ready"); + + network_starter.start_network(); + Ok(task_manager) +} diff --git a/runtime/heiko/Cargo.toml b/runtime/heiko/Cargo.toml index 280a819bc..cd73b5d1a 100644 --- a/runtime/heiko/Cargo.toml +++ b/runtime/heiko/Cargo.toml @@ -87,6 +87,20 @@ orml-xcm = { version = '0.4.1-dev', default-features = false orml-xcm-support = { version = '0.4.1-dev', default-features = false } orml-xtokens = { version = '0.4.1-dev', default-features = false } +# Frontier dependencies +fp-rpc = { version = '3.0.0-dev', default-features = false } +fp-self-contained = { version = '1.0.0-dev', default-features = false } +pallet-base-fee = { version = '1.0.0', default-features = false } +pallet-ethereum = { version = '4.0.0-dev', default-features = false } +pallet-evm = { version = '6.0.0-dev', default-features = false } +pallet-evm-precompile-blake2 = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-bn128 = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-dispatch = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-ed25519 = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-modexp = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-sha3fips = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-simple = { version = '2.0.0-dev', default-features = false } + # Parallel dependencies pallet-amm = { path = '../../pallets/amm', default-features = false } pallet-asset-registry = { path = '../../pallets/asset-registry', default-features = false } @@ -94,6 +108,9 @@ pallet-bridge = { path = '../../pallets/bridge', default-feature pallet-crowdloans = { path = '../../pallets/crowdloans', default-features = false } pallet-currency-adapter = { path = '../../pallets/currency-adapter', default-features = false } pallet-emergency-shutdown = { path = '../../pallets/emergency-shutdown', default-features = false } +pallet-evm-precompile-assets-erc20 = { path = '../../precompiles/assets-erc20', default-features = false } +pallet-evm-precompile-balances-erc20 = { path = '../../precompiles/balances-erc20', default-features = false } +pallet-evm-signatures = { path = '../../pallets/evm-signatures', default-features = false } pallet-farming = { path = '../../pallets/farming', default-features = false } pallet-liquid-staking = { path = '../../pallets/liquid-staking', default-features = false } pallet-loans = { path = '../../pallets/loans', default-features = false } @@ -121,22 +138,23 @@ runtime-benchmarks = [ 'frame-support/runtime-benchmarks', 'frame-system-benchmarking/runtime-benchmarks', 'frame-system/runtime-benchmarks', + 'pallet-ethereum/runtime-benchmarks', 'pallet-bridge/runtime-benchmarks', 'pallet-balances/runtime-benchmarks', 'pallet-timestamp/runtime-benchmarks', - 'pallet-collective/runtime-benchmarks', - 'pallet-crowdloans/runtime-benchmarks', 'pallet-loans/runtime-benchmarks', 'pallet-liquid-staking/runtime-benchmarks', + 'pallet-asset-registry/runtime-benchmarks', + 'pallet-amm/runtime-benchmarks', + 'pallet-collective/runtime-benchmarks', 'pallet-xcm/runtime-benchmarks', 'xcm-builder/runtime-benchmarks', 'pallet-multisig/runtime-benchmarks', 'pallet-membership/runtime-benchmarks', - 'pallet-amm/runtime-benchmarks', 'pallet-router/runtime-benchmarks', - 'pallet-xcm-helper/runtime-benchmarks', 'pallet-farming/runtime-benchmarks', - 'pallet-asset-registry/runtime-benchmarks', + 'pallet-crowdloans/runtime-benchmarks', + 'pallet-xcm-helper/runtime-benchmarks', 'pallet-streaming/runtime-benchmarks', 'pallet-assets/runtime-benchmarks', 'pallet-collator-selection/runtime-benchmarks', @@ -154,6 +172,19 @@ std = [ 'codec/std', 'serde', 'scale-info/std', + 'fp-rpc/std', + 'fp-self-contained/std', + 'pallet-base-fee/std', + 'pallet-ethereum/std', + 'pallet-evm/std', + 'pallet-evm-precompile-blake2/std', + 'pallet-evm-precompile-simple/std', + 'pallet-evm-precompile-bn128/std', + 'pallet-evm-precompile-dispatch/std', + 'pallet-evm-precompile-ed25519/std', + 'pallet-evm-precompile-modexp/std', + 'pallet-evm-precompile-sha3fips/std', + 'pallet-evm-signatures/std', 'sp-api/std', 'sp-std/std', 'sp-core/std', @@ -171,7 +202,6 @@ std = [ 'frame-benchmarking/std', 'frame-system-rpc-runtime-api/std', 'pallet-balances/std', - 'pallet-crowdloans/std', 'pallet-xcm/std', 'pallet-timestamp/std', 'pallet-transaction-payment/std', @@ -220,12 +250,16 @@ std = [ 'orml-xcm/std', 'orml-vesting/std', 'pallet-amm/std', + 'pallet-crowdloans/std', 'pallet-router/std', + 'pallet-currency-adapter/std', + 'pallet-farming/std', 'pallet-emergency-shutdown/std', 'pallet-xcm-helper/std', - 'pallet-farming/std', 'pallet-asset-registry/std', 'pallet-traits/std', + 'pallet-evm-precompile-assets-erc20/std', + 'pallet-evm-precompile-balances-erc20/std', ] try-runtime = [ 'frame-support/try-runtime', @@ -273,6 +307,11 @@ try-runtime = [ 'pallet-liquid-staking/try-runtime', 'pallet-streaming/try-runtime', 'pallet-prices/try-runtime', + 'pallet-crowdloans/try-runtime', 'pallet-xcm-helper/try-runtime', 'pallet-asset-registry/try-runtime', + 'pallet-ethereum/try-runtime', + 'pallet-evm/try-runtime', + 'pallet-base-fee/try-runtime', + 'pallet-evm-signatures/try-runtime', ] diff --git a/runtime/heiko/src/lib.rs b/runtime/heiko/src/lib.rs index 112528eff..3e923b930 100644 --- a/runtime/heiko/src/lib.rs +++ b/runtime/heiko/src/lib.rs @@ -25,17 +25,17 @@ use frame_support::{ fungibles::{InspectMetadata, Mutate}, tokens::BalanceConversion, ChangeMembers, ConstU32, Contains, EitherOfDiverse, EqualPrivilegeOnly, Everything, - InstanceFilter, Nothing, + FindAuthor, InstanceFilter, Nothing, }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, ConstantMultiplier, }, - PalletId, WeakBoundedVec, + ConsensusEngineId, PalletId, WeakBoundedVec, }; use frame_system::{ limits::{BlockLength, BlockWeights}, - EnsureRoot, + EnsureRoot, EnsureSigned, }; use orml_traits::{ location::AbsoluteReserveProvider, parameter_type_with_key, DataFeeder, DataProvider, @@ -47,16 +47,19 @@ use polkadot_parachain::primitives::Sibling; use polkadot_runtime_common::SlowAdjustingFeeUpdate; use scale_info::TypeInfo; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +use sp_core::{OpaqueMetadata, H160, H256, U256}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ self, AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, - BlockNumberProvider, Convert, Zero, + BlockNumberProvider, Convert, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + UniqueSaturatedInto, Verify, Zero, + }, + transaction_validity::{ + TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, }, - transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, FixedPointNumber, KeyTypeId, Perbill, Permill, RuntimeDebug, SaturatedConversion, }; @@ -96,6 +99,7 @@ pub use pallet_prices; pub use pallet_router; pub use pallet_streaming; +use pallet_evm::{FeeCalculator, Runner}; use pallet_traits::{ xcm::{ AccountIdToMultiLocation, AsAssetType, AssetType, CurrencyIdConvert, FirstAssetTrader, @@ -112,6 +116,38 @@ use primitives::{ KSM_U, }; +use pallet_evm_precompile_balances_erc20::Erc20Metadata; + +mod precompiles; +use pallet_evm_precompile_assets_erc20::AddressToAssetId; +pub use precompiles::{ParallelPrecompiles, ASSET_PRECOMPILE_ADDRESS_PREFIX}; + +pub struct NativeErc20Metadata; + +/// ERC20 metadata for the native token. +impl Erc20Metadata for NativeErc20Metadata { + /// Returns the name of the token. + fn name() -> &'static str { + "HKO token" + } + + /// Returns the symbol of the token. + fn symbol() -> &'static str { + "HKO" + } + + /// Returns the decimals places of the token. + fn decimals() -> u8 { + 12 + } + + /// Must return `true` only if it represents the main native currency of + /// the network. It must be the currency used in `pallet_evm`. + fn is_native_currency() -> bool { + true + } +} + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -239,22 +275,22 @@ impl Contains for WhiteListFilter { RuntimeCall::CumulusXcm(_) | // Consensus RuntimeCall::Authorship(_) | - RuntimeCall::Session(_) | // Call::CollatorSelection(_) | + RuntimeCall::Session(_) | // Utility + RuntimeCall::Multisig(_) | RuntimeCall::Utility(_) | - RuntimeCall::Multisig(_) | RuntimeCall::Proxy(_) | RuntimeCall::Identity(_) | RuntimeCall::EmergencyShutdown(_) | RuntimeCall::XcmHelper(_) | // Membership - RuntimeCall::OracleMembership(_) | RuntimeCall::GeneralCouncilMembership(_) | RuntimeCall::TechnicalCommitteeMembership(_) | - RuntimeCall::LiquidStakingAgentsMembership(_) | + RuntimeCall::OracleMembership(_) | + RuntimeCall::BridgeMembership(_) | RuntimeCall::CrowdloansAutomatorsMembership(_) | - RuntimeCall::BridgeMembership(_) + RuntimeCall::LiquidStakingAgentsMembership(_) ) } } @@ -278,20 +314,20 @@ impl Contains for BaseCallFilter { RuntimeCall::Assets(pallet_assets::Call::destroy { .. }) | RuntimeCall::CurrencyAdapter(_) | // 3rd Party - RuntimeCall::Vesting(_) | RuntimeCall::Oracle(_) | RuntimeCall::XTokens(_) | RuntimeCall::OrmlXcm(_) | + RuntimeCall::Vesting(_) | // Loans RuntimeCall::Loans(_) | RuntimeCall::Prices(_) | + // LiquidStaking + RuntimeCall::LiquidStaking(_) | // AMM RuntimeCall::AMM(_) | RuntimeCall::AMMRoute(_) | // Crowdloans RuntimeCall::Crowdloans(_) | - // Liquid Staking - RuntimeCall::LiquidStaking(_) | // Bridge RuntimeCall::Bridge(_) | // Farming @@ -299,7 +335,12 @@ impl Contains for BaseCallFilter { // Streaming RuntimeCall::Streaming(_) | // Asset Management - RuntimeCall::AssetRegistry(_) + RuntimeCall::AssetRegistry(_) | + // EVM + RuntimeCall::EVM(_) | + RuntimeCall::Ethereum(_) | + RuntimeCall::BaseFee(_) | + RuntimeCall::EVMSignatureCall(_) )) && EmergencyShutdown::contains(call) } @@ -358,7 +399,7 @@ impl frame_system::Config for Runtime { } parameter_types! { - pub TreasuryAccount: AccountId = TreasuryPalletId::get().into_account_truncating(); + pub TreasuryAccount: AccountId = TreasuryPalletId::get().into_account_truncating(); } impl orml_xcm::Config for Runtime { @@ -417,6 +458,26 @@ parameter_types! { pub const MetadataDepositPerByte: Balance = deposit(0, 1); } +impl AddressToAssetId for Runtime { + fn address_to_asset_id(address: H160) -> Option { + let mut data = [0u8; 4]; + let address_bytes: [u8; 20] = address.into(); + if ASSET_PRECOMPILE_ADDRESS_PREFIX.eq(&address_bytes[0..4]) { + data.copy_from_slice(&address_bytes[16..20]); + Some(u32::from_be_bytes(data)) + } else { + None + } + } + + fn asset_id_to_address(asset_id: CurrencyId) -> H160 { + let mut data = [0u8; 20]; + data[0..4].copy_from_slice(ASSET_PRECOMPILE_ADDRESS_PREFIX); + data[16..20].copy_from_slice(&asset_id.to_be_bytes()); + H160::from(data) + } +} + impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -477,19 +538,19 @@ impl pallet_liquid_staking::Config for Runtime { type RuntimeCall = RuntimeCall; type PalletId = StakingPalletId; type LoansPalletId = LoansPalletId; + type RelayOrigin = EnsureRootOrMoreThanHalfGeneralCouncil; + type UpdateOrigin = EnsureRootOrMoreThanHalfGeneralCouncil; type WeightInfo = weights::pallet_liquid_staking::WeightInfo; type SelfParaId = ParachainInfo; type Assets = CurrencyAdapter; - type RelayOrigin = EnsureRootOrMoreThanHalfGeneralCouncil; - type UpdateOrigin = EnsureRootOrMoreThanHalfGeneralCouncil; + type StakingCurrency = StakingCurrency; + type LiquidCurrency = LiquidCurrency; + type CollateralCurrency = CollateralCurrency; type DerivativeIndexList = DerivativeIndexList; type DistributionStrategy = pallet_liquid_staking::distribution::MaxMinDistribution; type XcmFees = XcmFees; type LoansInstantUnstakeFee = LoansInstantUnstakeFee; type MatchingPoolFastUnstakeFee = MatchingPoolFastUnstakeFee; - type StakingCurrency = StakingCurrency; - type LiquidCurrency = LiquidCurrency; - type CollateralCurrency = CollateralCurrency; type EraLength = EraLength; type MinStake = MinStake; type MinUnstake = MinUnstake; @@ -710,6 +771,119 @@ impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; } +parameter_types! { + pub DefaultElasticity: Permill = Permill::zero(); + pub DefaultBaseFeePerGas: U256 = (10_000_000).into(); +} + +pub struct BaseFeeThreshold; +impl pallet_base_fee::BaseFeeThreshold for BaseFeeThreshold { + fn lower() -> Permill { + Permill::zero() + } + fn ideal() -> Permill { + Permill::from_parts(500_000) + } + fn upper() -> Permill { + Permill::from_parts(1_000_000) + } +} + +impl pallet_base_fee::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Threshold = BaseFeeThreshold; + type DefaultElasticity = DefaultElasticity; + type DefaultBaseFeePerGas = DefaultBaseFeePerGas; +} + +/// Current approximation of the gas/s consumption considering +/// EVM execution over compiled WASM (on 4.4Ghz CPU). +/// Given the 500ms Weight, from which 75% only are used for transactions, +/// the total EVM execution gas limit is: GAS_PER_SECOND * 0.500 * 0.75 ~= 15_000_000. +pub const GAS_PER_SECOND: u64 = 40_000_000; + +/// Approximate ratio of the amount of Weight per Gas. +/// u64 works for approximations because Weight is a very small unit compared to gas. +pub const WEIGHT_PER_GAS: u64 = WEIGHT_PER_SECOND.ref_time() / GAS_PER_SECOND; + +pub struct FindAuthorTruncated(sp_std::marker::PhantomData); +impl> FindAuthor for FindAuthorTruncated { + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + if let Some(author_index) = F::find_author(digests) { + let authority_id = Aura::authorities()[author_index as usize].clone(); + return Some(H160::from_slice(&authority_id.encode()[4..24])); + } + + None + } +} + +pub type ParallelPrecompilesType = ParallelPrecompiles; + +parameter_types! { + /// Ethereum-compatible chain_id: + /// * Heiko: 2085 + pub EVMChainId: u64 = 2085; + pub CallMagicNumber: u16 = EVMChainId::get() as u16; + /// EVM gas limit + pub BlockGasLimit: U256 = U256::from( + NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT.ref_time() / WEIGHT_PER_GAS + ); + pub ParallelPrecompilesValue: ParallelPrecompilesType = ParallelPrecompiles::::new(); + pub WeightPerGas: u64 = WEIGHT_PER_GAS; +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = BaseFee; + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type BlockHashMapping = pallet_ethereum::EthereumBlockHashMapping; + type CallOrigin = pallet_evm::EnsureAddressRoot; + type WithdrawOrigin = pallet_evm::EnsureAddressTruncated; + type AddressMapping = pallet_evm::HashedAddressMapping; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = ParallelPrecompilesType; + type PrecompilesValue = ParallelPrecompilesValue; + type ChainId = EVMChainId; + type OnChargeTransaction = pallet_evm::EVMCurrencyAdapter; + type BlockGasLimit = BlockGasLimit; + type FindAuthor = FindAuthorTruncated; + type WeightPerGas = WeightPerGas; +} + +impl pallet_ethereum::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type StateRoot = pallet_ethereum::IntermediateStateRoot; +} + +parameter_types! { + pub const EcdsaUnsignedPriority: TransactionPriority = TransactionPriority::MAX / 2; + pub const CallFee: Balance = 1 * DOLLARS / 10; + pub const VerifySignature: bool = false; +} + +impl pallet_evm_signatures::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Signature = pallet_evm_signatures::ethereum::EthereumSignature; + type Signer = ::Signer; + type CallMagicNumber = CallMagicNumber; + type Currency = Balances; + type CallFee = CallFee; + type OnChargeTransaction = Treasury; + type UnsignedPriority = EcdsaUnsignedPriority; + type WithdrawOrigin = pallet_evm::EnsureAddressTruncated; + type GetNativeCurrencyId = NativeCurrencyId; + type VerifySignature = VerifySignature; + type Assets = Assets; + type AddressMapping = pallet_evm::HashedAddressMapping; + type WeightInfo = pallet_evm_signatures::weights::SubstrateWeight; +} + #[derive( Copy, Clone, @@ -732,6 +906,7 @@ pub enum ProxyType { Streaming, Governance, AMM, + EVM, } impl Default for ProxyType { fn default() -> Self { @@ -821,6 +996,15 @@ impl InstanceFilter for ProxyType { ) ) } + ProxyType::EVM => { + matches!( + c, + RuntimeCall::Ethereum(..) + | RuntimeCall::EVM(_) + | RuntimeCall::BaseFee(_) + | RuntimeCall::EVMSignatureCall(_) + ) + } } } fn is_superset(&self, o: &Self) -> bool { @@ -1549,7 +1733,7 @@ impl orml_vesting::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type MinVestedTransfer = MinVestedTransfer; - type VestedTransferOrigin = frame_system::EnsureSigned; + type VestedTransferOrigin = EnsureSigned; type WeightInfo = weights::orml_vesting::WeightInfo; type MaxVestingSchedules = MaxVestingSchedules; type BlockNumberProvider = frame_system::Pallet; @@ -1568,9 +1752,9 @@ impl pallet_amm::Config for Runtime { type Assets = CurrencyAdapter; type PalletId = AMMPalletId; type LockAccountId = OneAccount; + type AMMWeightInfo = weights::pallet_amm::WeightInfo; type CreatePoolOrigin = EnsureRootOrMoreThanHalfGeneralCouncil; type ProtocolFeeUpdateOrigin = EnsureRootOrMoreThanHalfGeneralCouncil; - type AMMWeightInfo = weights::pallet_amm::WeightInfo; type LpFee = DefaultLpFee; type MinimumLiquidity = MinimumLiquidity; type MaxLengthRoute = MaxLengthRoute; @@ -1805,6 +1989,12 @@ construct_runtime!( Streaming: pallet_streaming::{Pallet, Call, Storage, Event} = 94, AssetRegistry: pallet_asset_registry::{Pallet, Call, Storage, Event} = 95, + // EVM + EVM: pallet_evm::{Pallet, Config, Call, Storage, Event} = 110, + Ethereum: pallet_ethereum::{Pallet, Call, Storage, Event, Origin, Config} = 111, + BaseFee: pallet_base_fee::{Pallet, Call, Storage, Config, Event} = 112, + EVMSignatureCall: pallet_evm_signatures::{Pallet, Call, Event, ValidateUnsigned} = 113, + // Parachain System, always put it at the end ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned} = 20, } @@ -1833,11 +2023,12 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + fp_self_contained::UncheckedExtrinsic; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = + fp_self_contained::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1848,6 +2039,64 @@ pub type Executive = frame_executive::Executive< (), >; +impl fp_self_contained::SelfContainedCall for RuntimeCall { + type SignedInfo = H160; + + fn is_self_contained(&self) -> bool { + match self { + RuntimeCall::Ethereum(call) => call.is_self_contained(), + _ => false, + } + } + + fn check_self_contained(&self) -> Option> { + match self { + RuntimeCall::Ethereum(call) => call.check_self_contained(), + _ => None, + } + } + + fn validate_self_contained( + &self, + info: &Self::SignedInfo, + dispatch_info: &DispatchInfoOf, + len: usize, + ) -> Option { + match self { + RuntimeCall::Ethereum(call) => call.validate_self_contained(info, dispatch_info, len), + _ => None, + } + } + + fn pre_dispatch_self_contained( + &self, + info: &Self::SignedInfo, + dispatch_info: &DispatchInfoOf, + len: usize, + ) -> Option> { + match self { + RuntimeCall::Ethereum(call) => { + call.pre_dispatch_self_contained(info, dispatch_info, len) + } + _ => None, + } + } + + fn apply_self_contained( + self, + info: Self::SignedInfo, + ) -> Option>> { + match self { + call @ RuntimeCall::Ethereum(pallet_ethereum::Call::transact { .. }) => { + Some(call.dispatch(RuntimeOrigin::from( + pallet_ethereum::RawOrigin::EthereumTransaction(info), + ))) + } + _ => None, + } + } +} + impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { @@ -1995,6 +2244,164 @@ impl_runtime_apis! { } } + impl fp_rpc::EthereumRuntimeRPCApi for Runtime { + fn chain_id() -> u64 { + EVMChainId::get() + } + + fn account_basic(address: H160) -> pallet_evm::Account { + let (account, _) = EVM::account_basic(&address); + account + } + + fn gas_price() -> U256 { + let (gas_price, _) = ::FeeCalculator::min_gas_price(); + gas_price + } + + fn account_code_at(address: H160) -> Vec { + EVM::account_codes(address) + } + + fn author() -> H160 { + >::find_author() + } + + fn storage_at(address: H160, index: U256) -> H256 { + let mut tmp = [0u8; 32]; + index.to_big_endian(&mut tmp); + EVM::account_storages(address, H256::from_slice(&tmp[..])) + } + + fn call( + from: H160, + to: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + _access_list: Option)>>, + ) -> Result { + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + let is_transactional = false; + let validate = true; + ::Runner::call( + from, + to, + data, + value, + gas_limit.unique_saturated_into(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + Vec::new(), + is_transactional, + validate, + config + .as_ref() + .unwrap_or_else(|| ::config()), + ) + .map_err(|err| err.error.into()) + } + + fn create( + from: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + _access_list: Option)>>, + ) -> Result { + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + let is_transactional = false; + let validate = true; + #[allow(clippy::or_fun_call)] // suggestion not helpful here + ::Runner::create( + from, + data, + value, + gas_limit.unique_saturated_into(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + Vec::new(), + is_transactional, + validate, + config + .as_ref() + .unwrap_or(::config()), + ) + .map_err(|err| err.error.into()) + } + + fn current_transaction_statuses() -> Option> { + Ethereum::current_transaction_statuses() + } + + fn current_block() -> Option { + Ethereum::current_block() + } + + fn current_receipts() -> Option> { + Ethereum::current_receipts() + } + + fn current_all() -> ( + Option, + Option>, + Option>, + ) { + ( + Ethereum::current_block(), + Ethereum::current_receipts(), + Ethereum::current_transaction_statuses(), + ) + } + + fn extrinsic_filter( + xts: Vec<::Extrinsic>, + ) -> Vec { + xts.into_iter().filter_map(|xt| match xt.0.function { + RuntimeCall::Ethereum(pallet_ethereum::Call::transact { transaction }) => Some(transaction), + _ => None + }).collect::>() + } + + fn elasticity() -> Option { + Some(BaseFee::elasticity()) + } + } + + impl fp_rpc::ConvertTransactionRuntimeApi for Runtime { + fn convert_transaction( + transaction: pallet_ethereum::Transaction + ) -> ::Extrinsic { + UncheckedExtrinsic::new_unsigned( + pallet_ethereum::Call::::transact { transaction }.into(), + ) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( @@ -2022,9 +2429,9 @@ impl_runtime_apis! { list_benchmark!(list, extra, pallet_amm, AMM); list_benchmark!(list, extra, pallet_liquid_staking, LiquidStaking); list_benchmark!(list, extra, pallet_router, AMMRoute); + list_benchmark!(list, extra, pallet_farming, Farming); list_benchmark!(list, extra, pallet_crowdloans, Crowdloans); list_benchmark!(list, extra, pallet_xcm_helper, XcmHelper); - list_benchmark!(list, extra, pallet_farming, Farming); list_benchmark!(list, extra, pallet_asset_registry, AssetRegistry); list_benchmark!(list, extra, pallet_streaming, Streaming); list_benchmark!(list, extra, pallet_assets, Assets); @@ -2081,9 +2488,9 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_amm, AMM); add_benchmark!(params, batches, pallet_liquid_staking, LiquidStaking); add_benchmark!(params, batches, pallet_router, AMMRoute); + add_benchmark!(params, batches, pallet_farming, Farming); add_benchmark!(params, batches, pallet_crowdloans, Crowdloans); add_benchmark!(params, batches, pallet_xcm_helper, XcmHelper); - add_benchmark!(params, batches, pallet_farming, Farming); add_benchmark!(params, batches, pallet_asset_registry, AssetRegistry); add_benchmark!(params, batches, pallet_streaming, Streaming); add_benchmark!(params, batches, pallet_assets, Assets); diff --git a/runtime/heiko/src/precompiles.rs b/runtime/heiko/src/precompiles.rs new file mode 100644 index 000000000..686deba1e --- /dev/null +++ b/runtime/heiko/src/precompiles.rs @@ -0,0 +1,117 @@ +// Copyright 2021 Parallel Finance Developer. +// This file is part of Parallel Finance. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::dispatch::GetDispatchInfo; +use frame_support::dispatch::PostDispatchInfo; +use pallet_evm::{ + ExitRevert, Precompile, PrecompileFailure, PrecompileHandle, PrecompileResult, PrecompileSet, +}; +use sp_core::H160; +use sp_runtime::traits::Dispatchable; +use sp_std::fmt::Debug; +use sp_std::marker::PhantomData; + +use pallet_evm_precompile_assets_erc20::{AddressToAssetId, Erc20AssetsPrecompileSet}; +use pallet_evm_precompile_balances_erc20::Erc20BalancesPrecompile; +use pallet_evm_precompile_balances_erc20::Erc20Metadata; +use pallet_evm_precompile_blake2::Blake2F; +use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing}; +use pallet_evm_precompile_dispatch::Dispatch; +use pallet_evm_precompile_ed25519::Ed25519Verify; +use pallet_evm_precompile_modexp::Modexp; +use pallet_evm_precompile_sha3fips::Sha3FIPS256; +use pallet_evm_precompile_simple::{ECRecover, ECRecoverPublicKey, Identity, Ripemd160, Sha256}; + +/// The asset precompile address prefix. Addresses that match against this prefix will be routed +/// to Erc20AssetsPrecompileSet +pub const ASSET_PRECOMPILE_ADDRESS_PREFIX: &[u8] = &[255u8; 4]; + +#[derive(Debug, Default, Clone, Copy)] +pub struct ParallelPrecompiles(PhantomData<(R, M)>); + +impl ParallelPrecompiles +where + R: pallet_evm::Config, + M: Erc20Metadata, +{ + pub fn new() -> Self { + Self(Default::default()) + } + pub fn used_addresses() -> impl Iterator { + sp_std::vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 1024, 1025, 1026, 1027, 2050] + .into_iter() + .map(hash) + } +} + +impl PrecompileSet for ParallelPrecompiles +where + Erc20AssetsPrecompileSet: PrecompileSet, + Erc20BalancesPrecompile: Precompile, + Dispatch: Precompile, + R: pallet_evm::Config + + AddressToAssetId<::AssetId> + + pallet_assets::Config + + pallet_balances::Config, + R::RuntimeCall: Dispatchable + GetDispatchInfo, + ::RuntimeCall: From>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + From::AccountId>>, + ::Balance: TryFrom, + ::Balance: Into, + ::Moment: Into, + M: Erc20Metadata, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + let address = handle.code_address(); + if self.is_precompile(address) && address > hash(9) && handle.context().address != address { + return Some(Err(PrecompileFailure::Revert { + exit_status: ExitRevert::Reverted, + output: b"cannot be called with DELEGATECALL or CALLCODE".to_vec(), + })); + } + match address { + // Ethereum precompiles : + a if a == hash(1) => Some(ECRecover::execute(handle)), + a if a == hash(2) => Some(Sha256::execute(handle)), + a if a == hash(3) => Some(Ripemd160::execute(handle)), + a if a == hash(4) => Some(Identity::execute(handle)), + a if a == hash(5) => Some(Modexp::execute(handle)), + a if a == hash(6) => Some(Bn128Add::execute(handle)), + a if a == hash(7) => Some(Bn128Mul::execute(handle)), + a if a == hash(8) => Some(Bn128Pairing::execute(handle)), + a if a == hash(9) => Some(Blake2F::execute(handle)), + // Non-Frontier specific nor Ethereum precompiles : + a if a == hash(1024) => Some(Sha3FIPS256::execute(handle)), + a if a == hash(1025) => Some(ECRecoverPublicKey::execute(handle)), + a if a == hash(1026) => Some(ECRecoverPublicKey::execute(handle)), + a if a == hash(1027) => Some(Ed25519Verify::execute(handle)), + //Parallel precompiles: + a if a == hash(2050) => Some(Erc20BalancesPrecompile::::execute(handle)), + a if &a.to_fixed_bytes()[0..4] == ASSET_PRECOMPILE_ADDRESS_PREFIX => { + Erc20AssetsPrecompileSet::::new().execute(handle) + } + _ => None, + } + } + + fn is_precompile(&self, address: H160) -> bool { + Self::used_addresses().any(|x| x == address) + || Erc20AssetsPrecompileSet::::new().is_precompile(address) + } +} + +fn hash(a: u64) -> H160 { + H160::from_low_u64_be(a) +} diff --git a/runtime/kerria/src/lib.rs b/runtime/kerria/src/lib.rs index 38884b764..0111e618a 100644 --- a/runtime/kerria/src/lib.rs +++ b/runtime/kerria/src/lib.rs @@ -761,7 +761,7 @@ impl pallet_transaction_payment::Config for Runtime { parameter_types! { pub DefaultElasticity: Permill = Permill::zero(); - pub DefaultBaseFeePerGas: U256 = (1_000_000_000).into(); + pub DefaultBaseFeePerGas: U256 = (10_000_000).into(); } pub struct BaseFeeThreshold; @@ -812,8 +812,8 @@ impl> FindAuthor for FindAuthorTruncated { pub type ParallelPrecompilesType = ParallelPrecompiles; parameter_types! { - /// * Kerria: 1593 - pub EVMChainId: u64 = 1593; + /// * Kerria: 2012 + pub EVMChainId: u64 = 2012; pub CallMagicNumber: u16 = EVMChainId::get() as u16; /// EVM gas limit pub BlockGasLimit: U256 = U256::from( @@ -1991,10 +1991,10 @@ construct_runtime!( AssetRegistry: pallet_asset_registry::{Pallet, Call, Storage, Event} = 95, // EVM - EVM: pallet_evm::{Pallet, Config, Call, Storage, Event} = 97, - Ethereum: pallet_ethereum::{Pallet, Call, Storage, Event, Origin, Config} = 98, - BaseFee: pallet_base_fee::{Pallet, Call, Storage, Config, Event} = 99, - EVMSignatureCall: pallet_evm_signatures::{Pallet, Call, Event, ValidateUnsigned} = 100, + EVM: pallet_evm::{Pallet, Config, Call, Storage, Event} = 110, + Ethereum: pallet_ethereum::{Pallet, Call, Storage, Event, Origin, Config} = 111, + BaseFee: pallet_base_fee::{Pallet, Call, Storage, Config, Event} = 112, + EVMSignatureCall: pallet_evm_signatures::{Pallet, Call, Event, ValidateUnsigned} = 113, // Parachain System, always put it at the end ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned} = 20, diff --git a/runtime/parallel/Cargo.toml b/runtime/parallel/Cargo.toml index fbcb1d2c5..09572b203 100644 --- a/runtime/parallel/Cargo.toml +++ b/runtime/parallel/Cargo.toml @@ -87,6 +87,20 @@ orml-xcm = { version = '0.4.1-dev', default-features = false orml-xcm-support = { version = '0.4.1-dev', default-features = false } orml-xtokens = { version = '0.4.1-dev', default-features = false } +# Frontier dependencies +fp-rpc = { version = '3.0.0-dev', default-features = false } +fp-self-contained = { version = '1.0.0-dev', default-features = false } +pallet-base-fee = { version = '1.0.0', default-features = false } +pallet-ethereum = { version = '4.0.0-dev', default-features = false } +pallet-evm = { version = '6.0.0-dev', default-features = false } +pallet-evm-precompile-blake2 = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-bn128 = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-dispatch = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-ed25519 = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-modexp = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-sha3fips = { version = '2.0.0-dev', default-features = false } +pallet-evm-precompile-simple = { version = '2.0.0-dev', default-features = false } + # Parallel dependencies pallet-amm = { path = '../../pallets/amm', default-features = false } pallet-asset-registry = { path = '../../pallets/asset-registry', default-features = false } @@ -94,6 +108,8 @@ pallet-bridge = { path = '../../pallets/bridge', default-feature pallet-crowdloans = { path = '../../pallets/crowdloans', default-features = false } pallet-currency-adapter = { path = '../../pallets/currency-adapter', default-features = false } pallet-emergency-shutdown = { path = '../../pallets/emergency-shutdown', default-features = false } +pallet-evm-precompile-assets-erc20 = { path = '../../precompiles/assets-erc20', default-features = false } +pallet-evm-precompile-balances-erc20 = { path = '../../precompiles/balances-erc20', default-features = false } pallet-farming = { path = '../../pallets/farming', default-features = false } pallet-liquid-staking = { path = '../../pallets/liquid-staking', default-features = false } pallet-loans = { path = '../../pallets/loans', default-features = false } @@ -105,6 +121,7 @@ pallet-streaming = { path = '../../pallets/streaming', default-feat pallet-traits = { path = '../../pallets/traits', default-features = false } pallet-xcm-helper = { path = '../../pallets/xcm-helper', default-features = false } primitives = { package = 'parallel-primitives', path = '../../primitives', default-features = false } +pallet-evm-signatures = { path = '../../pallets/evm-signatures', default-features = false } [build-dependencies.substrate-wasm-builder] branch = 'polkadot-v0.9.32' @@ -149,6 +166,7 @@ runtime-benchmarks = [ 'pallet-preimage/runtime-benchmarks', 'pallet-scheduler/runtime-benchmarks', 'pallet-treasury/runtime-benchmarks', + 'pallet-ethereum/runtime-benchmarks', ] std = [ 'codec/std', @@ -194,7 +212,6 @@ std = [ 'pallet-router-rpc-runtime-api/std', 'pallet-liquid-staking/std', 'pallet-streaming/std', - 'pallet-asset-registry/std', 'pallet-prices/std', 'pallet-multisig/std', 'pallet-utility/std', @@ -226,7 +243,21 @@ std = [ 'pallet-emergency-shutdown/std', 'pallet-xcm-helper/std', 'pallet-farming/std', + 'pallet-asset-registry/std', 'pallet-traits/std', + 'pallet-base-fee/std', + 'pallet-ethereum/std', + 'pallet-evm/std', + 'pallet-evm-precompile-blake2/std', + 'pallet-evm-precompile-simple/std', + 'pallet-evm-precompile-bn128/std', + 'pallet-evm-precompile-dispatch/std', + 'pallet-evm-precompile-ed25519/std', + 'pallet-evm-precompile-modexp/std', + 'pallet-evm-precompile-sha3fips/std', + 'pallet-evm-precompile-assets-erc20/std', + 'pallet-evm-precompile-balances-erc20/std', + 'pallet-evm-signatures/std', ] try-runtime = [ 'frame-support/try-runtime', @@ -277,4 +308,8 @@ try-runtime = [ 'pallet-crowdloans/try-runtime', 'pallet-xcm-helper/try-runtime', 'pallet-asset-registry/try-runtime', -] + 'pallet-ethereum/try-runtime', + 'pallet-evm/try-runtime', + 'pallet-base-fee/try-runtime', + 'pallet-evm-signatures/try-runtime', +] \ No newline at end of file diff --git a/runtime/parallel/src/lib.rs b/runtime/parallel/src/lib.rs index 78d8d5d6e..dfe7b1269 100644 --- a/runtime/parallel/src/lib.rs +++ b/runtime/parallel/src/lib.rs @@ -26,13 +26,13 @@ use frame_support::{ fungibles::{InspectMetadata, Mutate}, tokens::BalanceConversion, ChangeMembers, ConstU32, Contains, EitherOfDiverse, EqualPrivilegeOnly, Everything, - InstanceFilter, Nothing, + FindAuthor, InstanceFilter, Nothing, }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, ConstantMultiplier, }, - PalletId, WeakBoundedVec, + ConsensusEngineId, PalletId, WeakBoundedVec, }; use frame_system::{ limits::{BlockLength, BlockWeights}, @@ -43,21 +43,25 @@ use orml_traits::{ DataProviderExtended, }; use orml_xcm_support::{IsNativeConcrete, MultiNativeAsset}; +use pallet_evm::{FeeCalculator, Runner}; use pallet_xcm::XcmPassthrough; use polkadot_parachain::primitives::Sibling; use polkadot_runtime_common::SlowAdjustingFeeUpdate; use scale_info::TypeInfo; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +use sp_core::{OpaqueMetadata, H160, H256, U256}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ self, AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, - BlockNumberProvider, Convert, Zero, + BlockNumberProvider, Convert, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + UniqueSaturatedInto, Verify, Zero, + }, + transaction_validity::{ + TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, }, - transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, FixedPointNumber, KeyTypeId, Perbill, Permill, RuntimeDebug, SaturatedConversion, }; @@ -112,6 +116,38 @@ use primitives::{ Index, Liquidity, Moment, PersistedValidationData, Price, Rate, Ratio, Shortfall, Signature, }; +use pallet_evm_precompile_balances_erc20::Erc20Metadata; + +mod precompiles; +use pallet_evm_precompile_assets_erc20::AddressToAssetId; +pub use precompiles::{ParallelPrecompiles, ASSET_PRECOMPILE_ADDRESS_PREFIX}; + +pub struct NativeErc20Metadata; + +/// ERC20 metadata for the native token. +impl Erc20Metadata for NativeErc20Metadata { + /// Returns the name of the token. + fn name() -> &'static str { + "PARA token" + } + + /// Returns the symbol of the token. + fn symbol() -> &'static str { + "PARA" + } + + /// Returns the decimals places of the token. + fn decimals() -> u8 { + 12 + } + + /// Must return `true` only if it represents the main native currency of + /// the network. It must be the currency used in `pallet_evm`. + fn is_native_currency() -> bool { + true + } +} + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -282,20 +318,20 @@ impl Contains for BaseCallFilter { RuntimeCall::Assets(pallet_assets::Call::destroy { .. }) | RuntimeCall::CurrencyAdapter(_) | // 3rd Party - RuntimeCall::Vesting(_) | RuntimeCall::Oracle(_) | RuntimeCall::XTokens(_) | RuntimeCall::OrmlXcm(_) | + RuntimeCall::Vesting(_) | // Loans RuntimeCall::Loans(_) | RuntimeCall::Prices(_) | + // LiquidStaking + RuntimeCall::LiquidStaking(_) | // AMM RuntimeCall::AMM(_) | RuntimeCall::AMMRoute(_) | // Crowdloans RuntimeCall::Crowdloans(_) | - // LiquidStaking - RuntimeCall::LiquidStaking(_) | // Bridge RuntimeCall::Bridge(_) | // Farming @@ -303,7 +339,12 @@ impl Contains for BaseCallFilter { // Streaming RuntimeCall::Streaming(_) | // Asset Management - RuntimeCall::AssetRegistry(_) + RuntimeCall::AssetRegistry(_) | + // EVM + RuntimeCall::EVM(_) | + RuntimeCall::Ethereum(_) | + RuntimeCall::BaseFee(_) | + RuntimeCall::EVMSignatureCall(_) )) && EmergencyShutdown::contains(call) } @@ -413,14 +454,34 @@ impl orml_xtokens::Config for Runtime { parameter_types! { pub const AssetDeposit: Balance = DOLLARS; // 1 UNIT deposit to create asset pub const ApprovalDeposit: Balance = EXISTENTIAL_DEPOSIT; - pub const AssetAccountDeposit: Balance = deposit(1, 16); pub const AssetsStringLimit: u32 = 50; + pub const AssetAccountDeposit: Balance = deposit(1, 16); /// Key = 32 bytes, Value = 36 bytes (32+1+1+1+1) // https://github.com/paritytech/substrate/blob/069917b/frame/assets/src/lib.rs#L257L271 pub const MetadataDepositBase: Balance = deposit(1, 68); pub const MetadataDepositPerByte: Balance = deposit(0, 1); } +impl AddressToAssetId for Runtime { + fn address_to_asset_id(address: H160) -> Option { + let mut data = [0u8; 4]; + let address_bytes: [u8; 20] = address.into(); + if ASSET_PRECOMPILE_ADDRESS_PREFIX.eq(&address_bytes[0..4]) { + data.copy_from_slice(&address_bytes[16..20]); + Some(u32::from_be_bytes(data)) + } else { + None + } + } + + fn asset_id_to_address(asset_id: CurrencyId) -> H160 { + let mut data = [0u8; 20]; + data[0..4].copy_from_slice(ASSET_PRECOMPILE_ADDRESS_PREFIX); + data[16..20].copy_from_slice(&asset_id.to_be_bytes()); + H160::from(data) + } +} + impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -714,6 +775,118 @@ impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; } +parameter_types! { + pub DefaultElasticity: Permill = Permill::zero(); + pub DefaultBaseFeePerGas: U256 = (10_000_000).into(); +} + +pub struct BaseFeeThreshold; +impl pallet_base_fee::BaseFeeThreshold for BaseFeeThreshold { + fn lower() -> Permill { + Permill::zero() + } + fn ideal() -> Permill { + Permill::from_parts(500_000) + } + fn upper() -> Permill { + Permill::from_parts(1_000_000) + } +} + +impl pallet_base_fee::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Threshold = BaseFeeThreshold; + type DefaultElasticity = DefaultElasticity; + type DefaultBaseFeePerGas = DefaultBaseFeePerGas; +} + +/// Current approximation of the gas/s consumption considering +/// EVM execution over compiled WASM (on 4.4Ghz CPU). +/// Given the 500ms Weight, from which 75% only are used for transactions, +/// the total EVM execution gas limit is: GAS_PER_SECOND * 0.500 * 0.75 ~= 15_000_000. +pub const GAS_PER_SECOND: u64 = 40_000_000; + +/// Approximate ratio of the amount of Weight per Gas. +/// u64 works for approximations because Weight is a very small unit compared to gas. +pub const WEIGHT_PER_GAS: u64 = WEIGHT_PER_SECOND.ref_time() / GAS_PER_SECOND; + +pub struct FindAuthorTruncated(sp_std::marker::PhantomData); +impl> FindAuthor for FindAuthorTruncated { + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + if let Some(author_index) = F::find_author(digests) { + let authority_id = Aura::authorities()[author_index as usize].clone(); + return Some(H160::from_slice(&authority_id.encode()[4..24])); + } + + None + } +} + +pub type ParallelPrecompilesType = ParallelPrecompiles; + +parameter_types! { + /// * Parallel: 2012 + pub EVMChainId: u64 = 2012; + pub CallMagicNumber: u16 = EVMChainId::get() as u16; + /// EVM gas limit + pub BlockGasLimit: U256 = U256::from( + NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT.ref_time() / WEIGHT_PER_GAS + ); + pub ParallelPrecompilesValue: ParallelPrecompilesType = ParallelPrecompiles::::new(); + pub WeightPerGas: u64 = WEIGHT_PER_GAS; +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = BaseFee; + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type BlockHashMapping = pallet_ethereum::EthereumBlockHashMapping; + type CallOrigin = pallet_evm::EnsureAddressRoot; + type WithdrawOrigin = pallet_evm::EnsureAddressTruncated; + type AddressMapping = pallet_evm::HashedAddressMapping; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = ParallelPrecompilesType; + type PrecompilesValue = ParallelPrecompilesValue; + type ChainId = EVMChainId; + type OnChargeTransaction = pallet_evm::EVMCurrencyAdapter; + type BlockGasLimit = BlockGasLimit; + type FindAuthor = FindAuthorTruncated; + type WeightPerGas = WeightPerGas; +} + +impl pallet_ethereum::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type StateRoot = pallet_ethereum::IntermediateStateRoot; +} + +parameter_types! { + pub const EcdsaUnsignedPriority: TransactionPriority = TransactionPriority::MAX / 2; + pub const CallFee: Balance = 1 * DOLLARS / 10; + pub const VerifySignature: bool = true; +} + +impl pallet_evm_signatures::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Signature = pallet_evm_signatures::ethereum::EthereumSignature; + type Signer = ::Signer; + type CallMagicNumber = CallMagicNumber; + type Currency = Balances; + type CallFee = CallFee; + type OnChargeTransaction = Treasury; + type UnsignedPriority = EcdsaUnsignedPriority; + type WithdrawOrigin = pallet_evm::EnsureAddressTruncated; + type GetNativeCurrencyId = NativeCurrencyId; + type VerifySignature = VerifySignature; + type Assets = Assets; + type AddressMapping = pallet_evm::HashedAddressMapping; + type WeightInfo = pallet_evm_signatures::weights::SubstrateWeight; +} + #[derive( Copy, Clone, @@ -736,6 +909,7 @@ pub enum ProxyType { Streaming, Governance, AMM, + EVM, } impl Default for ProxyType { fn default() -> Self { @@ -825,6 +999,16 @@ impl InstanceFilter for ProxyType { ) ) } + // EVM + ProxyType::EVM => { + matches!( + c, + RuntimeCall::Ethereum(..) + | RuntimeCall::EVM(_) + | RuntimeCall::BaseFee(_) + | RuntimeCall::EVMSignatureCall(_) + ) + } } } fn is_superset(&self, o: &Self) -> bool { @@ -1808,6 +1992,12 @@ construct_runtime!( Streaming: pallet_streaming::{Pallet, Call, Storage, Event} = 94, AssetRegistry: pallet_asset_registry::{Pallet, Call, Storage, Event} = 95, + // EVM + EVM: pallet_evm::{Pallet, Config, Call, Storage, Event} = 110, + Ethereum: pallet_ethereum::{Pallet, Call, Storage, Event, Origin, Config} = 111, + BaseFee: pallet_base_fee::{Pallet, Call, Storage, Config, Event} = 112, + EVMSignatureCall: pallet_evm_signatures::{Pallet, Call, Event, ValidateUnsigned} = 113, + // Parachain System, always put it at the end ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned} = 20, } @@ -1836,11 +2026,12 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + fp_self_contained::UncheckedExtrinsic; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = + fp_self_contained::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1851,6 +2042,64 @@ pub type Executive = frame_executive::Executive< (), >; +impl fp_self_contained::SelfContainedCall for RuntimeCall { + type SignedInfo = H160; + + fn is_self_contained(&self) -> bool { + match self { + RuntimeCall::Ethereum(call) => call.is_self_contained(), + _ => false, + } + } + + fn check_self_contained(&self) -> Option> { + match self { + RuntimeCall::Ethereum(call) => call.check_self_contained(), + _ => None, + } + } + + fn validate_self_contained( + &self, + info: &Self::SignedInfo, + dispatch_info: &DispatchInfoOf, + len: usize, + ) -> Option { + match self { + RuntimeCall::Ethereum(call) => call.validate_self_contained(info, dispatch_info, len), + _ => None, + } + } + + fn pre_dispatch_self_contained( + &self, + info: &Self::SignedInfo, + dispatch_info: &DispatchInfoOf, + len: usize, + ) -> Option> { + match self { + RuntimeCall::Ethereum(call) => { + call.pre_dispatch_self_contained(info, dispatch_info, len) + } + _ => None, + } + } + + fn apply_self_contained( + self, + info: Self::SignedInfo, + ) -> Option>> { + match self { + call @ RuntimeCall::Ethereum(pallet_ethereum::Call::transact { .. }) => { + Some(call.dispatch(RuntimeOrigin::from( + pallet_ethereum::RawOrigin::EthereumTransaction(info), + ))) + } + _ => None, + } + } +} + impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { @@ -1998,6 +2247,164 @@ impl_runtime_apis! { } } + impl fp_rpc::EthereumRuntimeRPCApi for Runtime { + fn chain_id() -> u64 { + EVMChainId::get() + } + + fn account_basic(address: H160) -> pallet_evm::Account { + let (account, _) = EVM::account_basic(&address); + account + } + + fn gas_price() -> U256 { + let (gas_price, _) = ::FeeCalculator::min_gas_price(); + gas_price + } + + fn account_code_at(address: H160) -> Vec { + EVM::account_codes(address) + } + + fn author() -> H160 { + >::find_author() + } + + fn storage_at(address: H160, index: U256) -> H256 { + let mut tmp = [0u8; 32]; + index.to_big_endian(&mut tmp); + EVM::account_storages(address, H256::from_slice(&tmp[..])) + } + + fn call( + from: H160, + to: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + _access_list: Option)>>, + ) -> Result { + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + let is_transactional = false; + let validate = true; + ::Runner::call( + from, + to, + data, + value, + gas_limit.unique_saturated_into(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + Vec::new(), + is_transactional, + validate, + config + .as_ref() + .unwrap_or_else(|| ::config()), + ) + .map_err(|err| err.error.into()) + } + + fn create( + from: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + _access_list: Option)>>, + ) -> Result { + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + let is_transactional = false; + let validate = true; + #[allow(clippy::or_fun_call)] // suggestion not helpful here + ::Runner::create( + from, + data, + value, + gas_limit.unique_saturated_into(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + Vec::new(), + is_transactional, + validate, + config + .as_ref() + .unwrap_or(::config()), + ) + .map_err(|err| err.error.into()) + } + + fn current_transaction_statuses() -> Option> { + Ethereum::current_transaction_statuses() + } + + fn current_block() -> Option { + Ethereum::current_block() + } + + fn current_receipts() -> Option> { + Ethereum::current_receipts() + } + + fn current_all() -> ( + Option, + Option>, + Option>, + ) { + ( + Ethereum::current_block(), + Ethereum::current_receipts(), + Ethereum::current_transaction_statuses(), + ) + } + + fn extrinsic_filter( + xts: Vec<::Extrinsic>, + ) -> Vec { + xts.into_iter().filter_map(|xt| match xt.0.function { + RuntimeCall::Ethereum(pallet_ethereum::Call::transact { transaction }) => Some(transaction), + _ => None + }).collect::>() + } + + fn elasticity() -> Option { + Some(BaseFee::elasticity()) + } + } + + impl fp_rpc::ConvertTransactionRuntimeApi for Runtime { + fn convert_transaction( + transaction: pallet_ethereum::Transaction + ) -> ::Extrinsic { + UncheckedExtrinsic::new_unsigned( + pallet_ethereum::Call::::transact { transaction }.into(), + ) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( diff --git a/runtime/parallel/src/precompiles.rs b/runtime/parallel/src/precompiles.rs new file mode 100644 index 000000000..686deba1e --- /dev/null +++ b/runtime/parallel/src/precompiles.rs @@ -0,0 +1,117 @@ +// Copyright 2021 Parallel Finance Developer. +// This file is part of Parallel Finance. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::dispatch::GetDispatchInfo; +use frame_support::dispatch::PostDispatchInfo; +use pallet_evm::{ + ExitRevert, Precompile, PrecompileFailure, PrecompileHandle, PrecompileResult, PrecompileSet, +}; +use sp_core::H160; +use sp_runtime::traits::Dispatchable; +use sp_std::fmt::Debug; +use sp_std::marker::PhantomData; + +use pallet_evm_precompile_assets_erc20::{AddressToAssetId, Erc20AssetsPrecompileSet}; +use pallet_evm_precompile_balances_erc20::Erc20BalancesPrecompile; +use pallet_evm_precompile_balances_erc20::Erc20Metadata; +use pallet_evm_precompile_blake2::Blake2F; +use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing}; +use pallet_evm_precompile_dispatch::Dispatch; +use pallet_evm_precompile_ed25519::Ed25519Verify; +use pallet_evm_precompile_modexp::Modexp; +use pallet_evm_precompile_sha3fips::Sha3FIPS256; +use pallet_evm_precompile_simple::{ECRecover, ECRecoverPublicKey, Identity, Ripemd160, Sha256}; + +/// The asset precompile address prefix. Addresses that match against this prefix will be routed +/// to Erc20AssetsPrecompileSet +pub const ASSET_PRECOMPILE_ADDRESS_PREFIX: &[u8] = &[255u8; 4]; + +#[derive(Debug, Default, Clone, Copy)] +pub struct ParallelPrecompiles(PhantomData<(R, M)>); + +impl ParallelPrecompiles +where + R: pallet_evm::Config, + M: Erc20Metadata, +{ + pub fn new() -> Self { + Self(Default::default()) + } + pub fn used_addresses() -> impl Iterator { + sp_std::vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 1024, 1025, 1026, 1027, 2050] + .into_iter() + .map(hash) + } +} + +impl PrecompileSet for ParallelPrecompiles +where + Erc20AssetsPrecompileSet: PrecompileSet, + Erc20BalancesPrecompile: Precompile, + Dispatch: Precompile, + R: pallet_evm::Config + + AddressToAssetId<::AssetId> + + pallet_assets::Config + + pallet_balances::Config, + R::RuntimeCall: Dispatchable + GetDispatchInfo, + ::RuntimeCall: From>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + From::AccountId>>, + ::Balance: TryFrom, + ::Balance: Into, + ::Moment: Into, + M: Erc20Metadata, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + let address = handle.code_address(); + if self.is_precompile(address) && address > hash(9) && handle.context().address != address { + return Some(Err(PrecompileFailure::Revert { + exit_status: ExitRevert::Reverted, + output: b"cannot be called with DELEGATECALL or CALLCODE".to_vec(), + })); + } + match address { + // Ethereum precompiles : + a if a == hash(1) => Some(ECRecover::execute(handle)), + a if a == hash(2) => Some(Sha256::execute(handle)), + a if a == hash(3) => Some(Ripemd160::execute(handle)), + a if a == hash(4) => Some(Identity::execute(handle)), + a if a == hash(5) => Some(Modexp::execute(handle)), + a if a == hash(6) => Some(Bn128Add::execute(handle)), + a if a == hash(7) => Some(Bn128Mul::execute(handle)), + a if a == hash(8) => Some(Bn128Pairing::execute(handle)), + a if a == hash(9) => Some(Blake2F::execute(handle)), + // Non-Frontier specific nor Ethereum precompiles : + a if a == hash(1024) => Some(Sha3FIPS256::execute(handle)), + a if a == hash(1025) => Some(ECRecoverPublicKey::execute(handle)), + a if a == hash(1026) => Some(ECRecoverPublicKey::execute(handle)), + a if a == hash(1027) => Some(Ed25519Verify::execute(handle)), + //Parallel precompiles: + a if a == hash(2050) => Some(Erc20BalancesPrecompile::::execute(handle)), + a if &a.to_fixed_bytes()[0..4] == ASSET_PRECOMPILE_ADDRESS_PREFIX => { + Erc20AssetsPrecompileSet::::new().execute(handle) + } + _ => None, + } + } + + fn is_precompile(&self, address: H160) -> bool { + Self::used_addresses().any(|x| x == address) + || Erc20AssetsPrecompileSet::::new().is_precompile(address) + } +} + +fn hash(a: u64) -> H160 { + H160::from_low_u64_be(a) +} diff --git a/runtime/vanilla/Cargo.toml b/runtime/vanilla/Cargo.toml index 2f17ca856..c92191197 100644 --- a/runtime/vanilla/Cargo.toml +++ b/runtime/vanilla/Cargo.toml @@ -247,7 +247,6 @@ std = [ 'pallet-router/std', 'pallet-currency-adapter/std', 'pallet-farming/std', - 'pallet-crowdloans/std', 'pallet-emergency-shutdown/std', 'pallet-xcm-helper/std', 'pallet-stableswap/std', diff --git a/runtime/vanilla/src/lib.rs b/runtime/vanilla/src/lib.rs index dd861ac30..ddb1a3452 100644 --- a/runtime/vanilla/src/lib.rs +++ b/runtime/vanilla/src/lib.rs @@ -761,7 +761,7 @@ impl pallet_transaction_payment::Config for Runtime { parameter_types! { pub DefaultElasticity: Permill = Permill::zero(); - pub DefaultBaseFeePerGas: U256 = (1_000_000_000).into(); + pub DefaultBaseFeePerGas: U256 = (10_000_000).into(); } pub struct BaseFeeThreshold; @@ -813,8 +813,8 @@ pub type ParallelPrecompilesType = ParallelPrecompiles} = 96, // EVM - EVM: pallet_evm::{Pallet, Config, Call, Storage, Event} = 97, - Ethereum: pallet_ethereum::{Pallet, Call, Storage, Event, Origin, Config} = 98, - BaseFee: pallet_base_fee::{Pallet, Call, Storage, Config, Event} = 99, - EVMSignatureCall: pallet_evm_signatures::{Pallet, Call, Event, ValidateUnsigned} = 100, + EVM: pallet_evm::{Pallet, Config, Call, Storage, Event} = 110, + Ethereum: pallet_ethereum::{Pallet, Call, Storage, Event, Origin, Config} = 111, + BaseFee: pallet_base_fee::{Pallet, Call, Storage, Config, Event} = 112, + EVMSignatureCall: pallet_evm_signatures::{Pallet, Call, Event, ValidateUnsigned} = 113, // Parachain System, always put it at the end ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned} = 20,