Squashed 'interfaces/' content from commit 156e93e

git-subtree-dir: interfaces
git-subtree-split: 156e93ea05f776e36f89e0827d8e4ef77905bb79
This commit is contained in:
alex.sharov 2021-07-09 20:21:13 +07:00
commit f048b64318
30 changed files with 1671 additions and 0 deletions

51
.github/workflows/rust.yml vendored Normal file
View File

@ -0,0 +1,51 @@
on: [push, pull_request]
name: Rust
jobs:
ci:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
components: rustfmt, clippy
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check --config merge_imports=true
- uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- uses: actions-rs/cargo@v1
with:
command: hack
args: check --all --ignore-private --each-feature --no-dev-deps
- uses: actions-rs/cargo@v1
with:
command: check
args: --all --all-targets --all-features
- uses: actions-rs/cargo@v1
with:
command: test
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target
Cargo.lock

27
Cargo.toml Normal file
View File

@ -0,0 +1,27 @@
[package]
name = "ethereum-interfaces"
version = "0.1.0"
authors = ["Artem Vorotnikov <artem@vorotnikov.me>"]
edition = "2018"
license = "Apache-2.0"
[features]
consensus = []
sentry = []
remotekv = []
snapshotsync = []
txpool = []
db = ["once_cell", "serde", "toml"]
[dependencies]
arrayref = "0.3"
ethereum-types = { version = "0.11", default-features = false }
once_cell = { version = "1", optional = true }
prost = "0.7"
serde = { version = "1", features = ["derive"], optional = true }
toml = { version = "0.5", optional = true }
tonic = "0.4"
[build-dependencies]
prost-build = "0.7"
tonic-build = "0.4"

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

34
README.md Normal file
View File

@ -0,0 +1,34 @@
# Interfaces
Interfaces for turbo-geth components. Currently it is a collection of `.proto` files describing gRPC interfaces between components, but later documentation about each interface, its components, as well as required version of gRPC will be added
<img src="turbo-geth-architecture.png">
See more info on the component and descriptions in [Components](./_docs/README.md)
# What's in this repo
- Protobuf definitions
- Wrappers:
- Rust crate with autogenerated client and server based on [Tonic](https://github.com/hyperium/tonic)
NOTE: You are free to ignore provided wrappers and use the .proto files directly
# Suggested integration into other repositories
```
git subtree add --prefix interfaces --squash https://github.com/ledgerwatch/interfaces master
```
When you need to update the subtree to a specific commit or tag, you can use these commands:
```
git rm -rf interfaces
git commit -m"Remove interfaces for replacement"
git subtree add --prefix interfaces --squash https://github.com/ledgerwatch/interfaces <tag_or_commit>
```
Unfortunately `git subtree pull` does not work if we use Squash-Merge for pull requests in this repository
and also automatically delete merged branches.
# Style guide
[https://developers.google.com/protocol-buffers/docs/style](https://developers.google.com/protocol-buffers/docs/style)

138
_docs/README.md Normal file
View File

@ -0,0 +1,138 @@
# Erigon Architecture
The architectural diagram
![](../turbo-geth-architecture.png)
# Loosely Coupled Architecture
The node consists of loosely coupled components with well defined "edges" -- protocols that are used between these components.
Its a reminiscence of [microservices architecture](https://en.wikipedia.org/wiki/Microservices), where each component has clearly defined reponsibilities and interface. Implementation might vary. In case of Erigon, we use gRPC/protobuf definitions, that allows the components to be written in different languages.
In our experience, each p2p blockchain node has more or less these components, even when those aren't explicitly set up. In that case we have a highly coupled system of the same components but with more resistance to changes.
## Advantages of loosely coupled architecture
* Less dependencies between components -- less side-effects of chaging one component is on another.
* Team scalability -- with well specified components, its easy to make sub-teams that work on each component with less coordination overhead. Most cross-team communication is around the interface definition and interpretation.
* Learning curve reduction -- it is not that easy to find a full-fledged blockchain node developer, but narrowing down the area of responsiblities, makes it easier to both find candidates and coach/mentor the right skillset for them.
* Innovation and improvements of each layer independently -- for specialized teams for each sub-component, its easier to find some more improvements or optimizations or innovative approaches than in a team that has to keep everything about the node in the head.
## Designing for upgradeabilty
One important part of the design of a node is to make sure that we leave ourselves a room to upgrade it in a simple way.
That means a couple of things:
- protocols for each components should be versioned, to make sure that we can't run inconsistent versions together. [semver](https://semver.org) is a better approach there because it allows to parse even future versions and figure out how compatible they are based on a simple convention;
- trying to keep compatiblity as much as possible, unless there is a very good reason to break it, we will try to keep it. In practice that means:
- adding new APIs is safe;
- adding new parameters is safe, taking into account that we can always support them missing and revert to the old behaviour;
- renaming parameters and methods considered harmful;
- removing paramters and methods considered harmful;
- radically changing the behaviour of the method w/o any changes to the protocol considered harmful;
Tools for automatic checks about compabilitity are available for Protobuf: https://github.com/bufbuild/buf
## Implementation variants
### Microservices
Erigon uses gRPC-powered variant; each component implements gRPC interface, defined in the protobuf files. No language dependency across components.
**Advantages**
- it is possible to run a single node spread on multiple machines (and specialize each machine to its job, like GPU/CPU for hash/proof calculations, memory-heavy TX pool, etc)
- it is possible to plug & play multiple variants of each component
- it is possible to write each component in its own language and use the power of each language to the most (perf-critical in Rust or C++, Go for networking, some parts in Python and JS for fast prototyping, etc)
- it is possible to replace components as better version in another language is written
**Challenges**
- deployment process for average users could be clumsy
- managing multiple sub-projects
- testing interfaces, extensive integration testing is needed
### Single binary
That's when each module is in the same language and compiles to the same binary either as a static library or a dynamic library or just a subfolder in the code.
**Advantages**
- simpler deployment process
- simpler component compatibility
**Challenges**
- have to settle on a single language/framework for the whole project
- less flexibility with upgrades
# Components
## 1. API Service (RPCDaemon, SilkRPC, etc)
Each node exposes an API to plug it into other components. For Ethereum nodes, the example is JSON-RPC APIs or GraphQL APIs. It is an interface between DApps and the nodes.
The API Service's responsibilities are to expose these APIs.
The API design is not limited to JSON-RPC/http with `eth_call`s, it could be something completely else: gRPC, GraphQL or even some REST to power some webUIs.
The API Service connects to the [Core].
In Erigon, there are with two interfaces:
- [ETH Backend, proto](../remote/ethbackend.proto) -- blockchain events and core technical information (versions, etc)
- [KV, proto](../remote/kv.proto) -- database access
## 2. Sentry
Sentry is the component, connecting the node to the p2p network of the blockchain. In case of Erigon and Ethereum, it implements [`eth/65`, `eth/66`, etc](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#change-log) protocols via [devp2p](https://github.com/ethereum/devp2p).
Sentry accepts connections from [Core] and [Transaction Pool] components.
Erigon has the following interface for sentry:
- [P2Psentry, proto](../p2psentry/sentry.proto) -- sending/receiving messages, and peer penalization mechanism.
Both the [transaction pool] and the [core] use the same interface.
## 3. Transaction Pool
Transaction pool contains valid transactions that are gossiped around the network but aren't mined yet. Transaction pool validates transactions that it gets from [Sentry] and, in case, the transaction is valid, adds it to its on in-memory storage. Please note that at the time of writing, Transaction Pool component
has not been split yet, but this should happen relatively soon.
Miners use this component to get candidate transactions for the block.
Separating tx pool in a separate components, makes forks like [mev-geth](https://github.com/flashbots/mev-geth) unnecessary, because it could be just a separate tx pool implementation.
Transaction Pool connects to both Sentry and Core. Sentry provides new transactions to the tx pool, and Core either sends events to remove txs when a block with them is discovered, either from peers or through mining. Also, Core can re-add txs into the transaction pool in cases of chain splits.
Erigon has the following interfaces for the transaction pool
- [txpool, proto](../txpool/txpool.proto)
- [txpool_control, proto](../txpool/txpool_control.proto)
- [mining, proto](../txpool/mining.proto)
See more about the architecture: https://github.com/ledgerwatch/erigon/wiki/Transaction-Pool-Design
## 4. Core
Core is the passive part of the replicating state machine that is a blockchain. Core maintains its state and reacts to the protocol messages from the
outside, with the goal of synchronizing its state with other nodes in the network. This synchronization is achieved by applying or reverting state
transitions.
Currently, Core is the largest and the most complex component, and it has its own internal structure. State transitions are split into stages,
and that gives rise to "[Staged Sync](./staged-sync.md)". In the staged sync, we consider two forward state transitions and reverts of previous state transitions
(also called "Unwind"). Forward state transitions are split into the invocation of functions in certain order. At the time of writing, there are
18 such functions, representing "stages". Reverts of previous state transitions are performed by invocation of another array of functions, also
in the specific order. See [Staged Sync Architecture](./staged-sync.md) for more information on Staged Sync.
Core connects to [Sentry] and [Consensus Engine], and accepts connections from [Transaction Pool] and [API Service].
## 5. Consensus Engine
Consensus Engine is the component that abstracts away consensus mechanism like EtHash Proof Of Work, ProgPOW Proof of Work, Clique Proof Of Authority,
and in the future also AuRa Proof Of Authority and Proof Of Stake mechanism. Note that at the time of writing, Consensus Engine split has not been
done yet, but some [work has been done on the interface](https://github.com/ledgerwatch/erigon/wiki/Consensus-Engine-separation).
Erigon has the following interface for the consensus engine:
- [consensus_engine, proto](../consensus_engine/consensus.proto)
## 6. Downloader
Downloader component abstracts away the functionality of deliverying some parts of the database using "out of band" protocols like BitTorrent,
IPFS, Swarm and others.

1
_docs/staged-sync.drawio Normal file

File diff suppressed because one or more lines are too long

103
_docs/staged-sync.md Normal file
View File

@ -0,0 +1,103 @@
Staged Sync (Erigon)
---
Staged Sync is a version of [Go-Ethereum](https://github.com/ethereum/go-ethereum)'s [[Full Sync]] that was rearchitected for better performance.
## How The Sync Works
Staged sync consists of independent stages, that are launched in special order one after another. This architecture allows for batch processing of data.
After the last stage is finished, the process starts from the beginning, by looking for the new headers to download.
If the app is restarted in between stages, it restarts from the first stage.
![](./stages-overview.png)
### Stage
A Stage consists of:
* stage ID;
* progress function;
* unwind function;
* prune function;
Only ID and progress functions are required.
Both progress and unwind functions can have side-effects. In practice, usually only progress do (downloader interaction).
Each function (progress, unwind, prune) have **input** DB buckets and **output** DB buckets. That allows to build a dependency graph and run them in order.
![](./stages-ordering.png)
That is important because unwinds not always follow the reverse order of progress. A good example of that is tx pool update, that is always the final stage.
Each stage saves its own progress. In Ethereum, at least a couple of stages' progress is "special", so we accord to that. Say, progress of the _execution stage_ is basis of many index-building stages.
### Batch Processing
![](./stages-batch-process.png)
Each stage can work on a range of blocks. That is a huge performance improvement over sequential processing.
In Erigon genesis sync:
- first stage downloads all headers
- then we download all bodies
- then we execute all blocks
- then we add a Merkle commitment (either from scratch or incremental)
- then we build all indices
- then we update the tx pool
That allows to group similar operations together and optimize each stage for throughput. Also, some stages, like the commitment stage, require way less hashes computation on genesis sync.
That also allows DB inserts optimisations, see next part.
### ETL and optimial DB inserts
![](./stages-etl.png)
B-tree based DBs (lmdb, mdbx, etc) usually store data using pages. During random inserts, those pages get fragmented (remember Windows 98?) and often data needs to be moved between them to free up space in a certain page.
That all is called **write amplification**. The more random stuff you insert into a DB, the more expensive it is to insert it.
Luckily, if we insert keys in a sorted order, this effect is not there, we fill pages one by one.
That is where our ETL framework comes to the rescue. When batch processing data, instead of wrting it directly to a database, we first extract it to a temp folder (could be in ram if fits). When extraction happens, we generate the keys for insertion. Then, we load data from these data files in a sorted manner using a heap. That way, the keys are always inserted sorted.
This approach also allows us to avoid overwrites in certain scenarios, because we can specify the right strategy on loading data: do we want to keep only the latest data, convert it into a list or anything else.
### RPC calls and indices
![](./stages-rpc-methods.png)
Some stages are building indices that serve the RPC API calls later on. That is why often we can introduce a new sync stage together with an API call that uses it. API module can always request state of any stage it needs to plan the execution accordingly.
### Commitment As A Stage
![](./stages-commitment.png)
One more benefit of this approach, that the Merkle commitment (hex tries) in Erigon is its own stage with it own couple of buckets. Other stages are independent enough to either not be changed at all when/if the commitment mechanism changes or be changes minimaly.
### What About Parallel Execution?
Some parallel execution could be possible, in case stages aren't dependent on each other in any way.
However, in practice, most stages are bound by their I/O performance, so making those parallel won't bring any performance benefits.
There could be benefits in having parallelism **inside** stages. For Erigon, there is **senders recovery** stage that is very CPU intensive and could benefit from multicore execution. So it launches as many worker threads as there are CPU cores.
### Adding/Removing stages
Adding stages is usually a simple task. On the first launch the stage will launch like it was launched from genesis even though the node might be in a synced state.
Removing or altering a sync stage could be more tricky because then the dependent stages should be considered.
### Offline Capabilities
Not every stage needs network to work. Therefore, it could be possible to run some stages, especially during genesis sync, no matter if the node has a connection or not. An example of that is indices building.
### Risks & Tradeoffs
* Malicious unwinds on genesis sync. Since we are checking commitments once per batch, that could be used to slow down the genesis sync significantly, if we sync/execute everything but get a wrong root hash in the end. After genesis sync is done, this is not an issue because even though we do batch processing, but in practice at the tip this architecture becomes block-by-block processor and is not worse than anything else.
* Batch processing doesn't allow most of the API calls on partial data during genesis sync. Basically, regular geth at 1/2 of the sync will respond to the RPC requests but Erigon requires the commitment stage to complete to allow these requests.
Those tradeoffs are related to genesis sync, so in Erigon we are focusing on reducing the need for genesis sync (such as off-chain snapshot distribution) to minimize those issues.

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

BIN
_docs/stages-commitment.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

BIN
_docs/stages-etl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

BIN
_docs/stages-ordering.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

BIN
_docs/stages-overview.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

45
build.rs Normal file
View File

@ -0,0 +1,45 @@
fn config() -> prost_build::Config {
let mut config = prost_build::Config::new();
config.protoc_arg("--experimental_allow_proto3_optional");
config.bytes(&["."]);
config
}
fn make_protos(protos: &[&str]) {
tonic_build::configure()
.format(false)
.compile_with_config(config(), &protos, &["."])
.unwrap();
}
fn main() {
let mut protos = vec!["types/types.proto"];
if cfg!(feature = "consensus") {
protos.push("consensus_engine/consensus.proto");
}
if cfg!(feature = "sentry") {
protos.push("p2psentry/sentry.proto");
}
if cfg!(feature = "remotekv") {
protos.push("remote/ethbackend.proto");
protos.push("remote/kv.proto");
}
if cfg!(feature = "snapshotsync") {
protos.push("snapshot_downloader/external_downloader.proto");
}
if cfg!(feature = "txpool") {
protos.push("txpool/txpool.proto");
protos.push("txpool/txpool_control.proto");
}
if cfg!(feature = "consensus") {
protos.push("consensus_engine/consensus.proto");
}
make_protos(&protos);
}

View File

@ -0,0 +1,163 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
import "types/types.proto";
package consensus;
option go_package = "./consensus;consensus";
message ChainSpecMessage {
string mechanism = 1; // Name of consensus mechanism, e.g. ethash, clique, aura
bytes mechanism_config = 2; // Configuration of specific consensus mechanism - format is specific to the mechanism
Genesis genesis = 3; // Description of genesis block
repeated Fork forks = 4; // Description of forks (upgrades)
}
message Genesis {
types.H256 chain_id = 1; // Chain id starting from genesis block and until the first fork
Template template = 2; // Genesis header without values like "uncle hash", "tx hash" and "state root" calculated
}
message Fork {
string name = 1; // Code name of the fork
uint64 number = 2; // First block number at which rules of the fork activate
types.H256 chain_id = 3; // Chain id starting from this fork until the next fork
}
message Error {
uint32 code = 1;
string description = 2;
}
message Result {
bool ok = 1;
optional Error error = 2;
}
message Template {
types.H256 parent_hash = 1;
types.H160 coinbase = 2;
types.H256 difficulty = 3;
uint64 number = 4;
uint64 gas_limit = 5;
uint64 time = 6;
bytes extra = 7;
uint64 nonce = 8;
}
message BlockHeader {
Template template = 1;
types.H256 uncle_hash = 2;
types.H256 root_hash = 3;
types.H256 tx_hash = 4;
types.H256 receipt_hash = 5;
bytes bloom = 6;
uint64 gas_used = 7;
types.H256 mix_digest = 8;
}
message Transaction {
}
message Block {
BlockHeader header = 1;
repeated BlockHeader uncles = 2;
repeated Transaction transactions = 3;
bytes total_difficulty = 4;
}
message GetAuthorRequest {
BlockHeader header = 1;
}
message GetAuthorResponse {
Result result = 1;
types.H160 address = 2;
}
message VerifyHeaderRequest {
BlockHeader header = 1;
bool seal = 2;
}
message VerifyHeaderResponse {
types.H256 hash = 1;
Result result = 2;
bytes finaliseCode = 3; // Code (in TEVM to execute at the end of the block to finalise it according to the consensus engine rules)
}
message HeadersRequest {
types.H256 hash = 1; // Hash of the highest header requested
uint32 amount = 2; // Number of headers requested
}
message HeadersResponse {
BlockHeader header = 1;
}
message VerifyUnclesRequest {
Block block = 1;
}
message VerifyUnclesResponse {
Result result = 1;
}
message SealBlockRequest {
Result result = 1;
Block block = 2;
}
message SealBlockResponse {
Result result = 1;
Block block = 2;
}
message PrepareRequest {
BlockHeader header = 1;
}
message PrepareResponse {
Result result = 1;
}
message FinalizeRequest {
BlockHeader header = 1;
repeated BlockHeader uncles = 2;
}
message FinalizeResponse {
Result result = 1;
types.H256 miner_reward = 2;
repeated types.H256 uncle_rewards = 3;
}
service ConsensusEngine {
rpc GetAuthor(GetAuthorRequest) returns(GetAuthorResponse);
rpc ChainSpec(google.protobuf.Empty) returns(ChainSpecMessage);
// Core requests verifications from the Consensus Engine via this function
rpc VerifyHeaders(stream VerifyHeaderRequest) returns(stream VerifyHeaderResponse);
// Consensis Engine may ask for extra informaton (more headers) from the core, and these requests are coming through the stream
// returned by the ProvideHeaders function
rpc ProvideHeaders(stream HeadersResponse) returns(stream HeadersRequest);
rpc VerifyUncles(stream VerifyUnclesRequest) returns(stream VerifyUnclesResponse);
rpc Prepare(stream PrepareRequest) returns(stream PrepareResponse);
rpc Finalize(stream FinalizeRequest) returns(stream FinalizeResponse);
rpc Seal(SealBlockRequest) returns(stream SealBlockResponse);
}
message StartTestCaseMessage {
string mechanism = 1; // Consensus mechanism used in the test case
bytes config = 2; // Configuration specific to the consensus engine tested
}
// Test is only run by consensus engine in the testing mode, and allows the test driver to inject the Configuration
// (which includes chain spec) into the Consensus Engine and reset it's state
service Test {
rpc StartTestCase(StartTestCaseMessage) returns(google.protobuf.Empty);
}

49
db_tables.toml Normal file
View File

@ -0,0 +1,49 @@
PlainState = { dup_sort = { auto = { from = 60, to = 28 } } }
PlainCodeHash = {}
AccountChangeSet = { dup_sort = {} }
StorageChangeSet = { dup_sort = {} }
HashedAccount = {}
HashedStorage = { dup_sort = { auto = { from = 72, to = 40 } } }
AccountHistory = {}
StorageHistory = {}
Code = {}
HashedCodeHash = {}
IncarnationMap = {}
TEVMCodeStatus = { dup_sort = {} }
TEVMCode = {}
TrieAccount = {}
TrieStorage = {}
DbInfo = {}
SnapshotInfo = {}
BittorrentInfo = {}
HeadersSnapshotInfo = {}
BodiesSnapshotInfo = {}
StateSnapshotInfo = {}
HeaderNumber = {}
CanonicalHeader = {}
Header = {}
HeadersTotalDifficulty = {}
BlockBody = {}
BlockTransaction = {}
Receipt = {}
TransactionLog = {}
LogTopicIndex = {}
LogAddressIndex = {}
CallTraceSet = { dup_sort = {} }
CallFromIndex = {}
CallToIndex = {}
BlockTransactionLookup = {}
BloomBits = {}
Preimage = {}
Config = {}
BloomBitsIndex = {}
SyncStage = {}
SyncStageUnwind = {}
CliqueSeparate = {}
CliqueSnapshot = {}
CliqueLastSnapshot = {}
TxSender = {}
LastBlock = {}
Migration = {}
Sequence = {}
LastHeader = {}

160
p2psentry/sentry.proto Normal file
View File

@ -0,0 +1,160 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
import "types/types.proto";
package sentry;
option go_package = "./sentry;sentry";
enum MessageId {
// ======= eth 65 protocol ===========
STATUS_65 = 0;
GET_BLOCK_HEADERS_65 = 1;
BLOCK_HEADERS_65 = 2;
BLOCK_HASHES_65 = 3;
GET_BLOCK_BODIES_65 = 4;
BLOCK_BODIES_65 = 5;
GET_NODE_DATA_65 = 6;
NODE_DATA_65 = 7;
GET_RECEIPTS_65 = 8;
RECEIPTS_65 = 9;
NEW_BLOCK_HASHES_65 = 10;
NEW_BLOCK_65 = 11;
TRANSACTIONS_65 = 12;
NEW_POOLED_TRANSACTION_HASHES_65 = 13;
GET_POOLED_TRANSACTIONS_65 = 14;
POOLED_TRANSACTIONS_65 = 15;
// ======= eth 66 protocol ===========
// eth64 announcement messages (no id)
STATUS_66 = 17;
NEW_BLOCK_HASHES_66 = 18;
NEW_BLOCK_66 = 19;
TRANSACTIONS_66 = 20;
// eth65 announcement messages (no id)
NEW_POOLED_TRANSACTION_HASHES_66 = 21;
// eth66 messages with request-id
GET_BLOCK_HEADERS_66 = 22;
GET_BLOCK_BODIES_66 = 23;
GET_NODE_DATA_66 = 24;
GET_RECEIPTS_66 = 25;
GET_POOLED_TRANSACTIONS_66 = 26;
BLOCK_HEADERS_66 = 27;
BLOCK_BODIES_66 = 28;
NODE_DATA_66 = 29;
RECEIPTS_66 = 30;
POOLED_TRANSACTIONS_66 = 31;
// ======= eth 67 protocol ===========
// ...
}
message OutboundMessageData {
MessageId id = 1;
bytes data = 2;
}
message SendMessageByMinBlockRequest {
OutboundMessageData data = 1;
uint64 min_block = 2;
}
message SendMessageByIdRequest {
OutboundMessageData data = 1;
types.H512 peer_id = 2;
}
message SendMessageToRandomPeersRequest {
OutboundMessageData data = 1;
uint64 max_peers = 2;
}
message SentPeers {repeated types.H512 peers = 1;}
enum PenaltyKind {Kick = 0;}
message PenalizePeerRequest {
types.H512 peer_id = 1;
PenaltyKind penalty = 2;
}
message PeerMinBlockRequest {
types.H512 peer_id = 1;
uint64 min_block = 2;
}
message InboundMessage {
MessageId id = 1;
bytes data = 2;
types.H512 peer_id = 3;
}
message Forks {
types.H256 genesis = 1;
repeated uint64 forks = 2;
}
message StatusData {
uint64 network_id = 1;
types.H256 total_difficulty = 2;
types.H256 best_hash = 3;
Forks fork_data = 4;
uint64 max_block = 5;
}
enum Protocol {
ETH65 = 0;
ETH66 = 1;
}
message SetStatusReply {
Protocol protocol = 1;
}
message MessagesRequest {
repeated MessageId ids = 1;
}
message PeerCountRequest {}
message PeerCountReply {uint64 count = 1;}
message PeersRequest {}
message PeersReply {
enum PeerEvent {
Connect = 0;
Disconnect = 1;
}
types.H512 peer_id = 1;
PeerEvent event = 2;
}
service Sentry {
rpc PenalizePeer(PenalizePeerRequest) returns (google.protobuf.Empty);
rpc PeerMinBlock(PeerMinBlockRequest) returns (google.protobuf.Empty);
rpc SendMessageByMinBlock(SendMessageByMinBlockRequest) returns (SentPeers);
rpc SendMessageById(SendMessageByIdRequest) returns (SentPeers);
rpc SendMessageToRandomPeers(SendMessageToRandomPeersRequest)
returns (SentPeers);
rpc SendMessageToAll(OutboundMessageData) returns (SentPeers);
rpc SetStatus(StatusData) returns (SetStatusReply);
// Subscribe to receive messages.
// Calling multiple times with a different set of ids starts separate streams.
// It is possible to subscribe to the same set if ids more than once.
rpc Messages(MessagesRequest) returns (stream InboundMessage);
rpc PeerCount(PeerCountRequest) returns (PeerCountReply);
// Notifications about connected (after sub-protocol handshake) or lost peer
rpc Peers(PeersRequest) returns (stream PeersReply);
}

63
remote/ethbackend.proto Normal file
View File

@ -0,0 +1,63 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
import "types/types.proto";
package remote;
option go_package = "./remote;remote";
service ETHBACKEND {
rpc Etherbase(EtherbaseRequest) returns (EtherbaseReply);
rpc NetVersion(NetVersionRequest) returns (NetVersionReply);
rpc NetPeerCount(NetPeerCountRequest) returns (NetPeerCountReply);
// Version returns the service version number
rpc Version(google.protobuf.Empty) returns (types.VersionReply);
// ProtocolVersion returns the Ethereum protocol version number (e.g. 66 for ETH66).
rpc ProtocolVersion(ProtocolVersionRequest) returns (ProtocolVersionReply);
// ClientVersion returns the Ethereum client version string using node name convention (e.g. TurboGeth/v2021.03.2-alpha/Linux).
rpc ClientVersion(ClientVersionRequest) returns (ClientVersionReply);
rpc Subscribe(SubscribeRequest) returns (stream SubscribeReply);
}
enum Event {
HEADER = 0;
PENDING_LOGS = 1;
PENDING_BLOCK = 2;
}
message EtherbaseRequest {}
message EtherbaseReply { types.H160 address = 1; }
message NetVersionRequest {}
message NetVersionReply { uint64 id = 1; }
message NetPeerCountRequest {}
message NetPeerCountReply { uint64 count = 1; }
message ProtocolVersionRequest {}
message ProtocolVersionReply { uint64 id = 1; }
message ClientVersionRequest {}
message ClientVersionReply { string nodeName = 1; }
message SubscribeRequest {
Event type = 1;
}
message SubscribeReply {
Event type = 1;
bytes data = 2; // serialized data
}

88
remote/kv.proto Normal file
View File

@ -0,0 +1,88 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
import "types/types.proto";
package remote;
option go_package = "./remote;remote";
// Provides methods to access key-value data
service KV {
// Version returns the service version number
rpc Version(google.protobuf.Empty) returns (types.VersionReply);
// Tx exposes read-only transactions for the key-value store
rpc Tx(stream Cursor) returns (stream Pair);
rpc ReceiveStateChanges(google.protobuf.Empty) returns (stream StateChange);
}
enum Op {
FIRST = 0;
FIRST_DUP = 1;
SEEK = 2;
SEEK_BOTH = 3;
CURRENT = 4;
LAST = 6;
LAST_DUP = 7;
NEXT = 8;
NEXT_DUP = 9;
NEXT_NO_DUP = 11;
PREV = 12;
PREV_DUP = 13;
PREV_NO_DUP = 14;
SEEK_EXACT = 15;
SEEK_BOTH_EXACT = 16;
OPEN = 30;
CLOSE = 31;
}
message Cursor {
Op op = 1;
string bucketName = 2;
uint32 cursor = 3;
bytes k = 4;
bytes v = 5;
}
message Pair {
bytes k = 1;
bytes v = 2;
uint32 cursorID = 3;
}
enum Action {
STORAGE = 0; // Change only in the storage
UPSERT = 1; // Change of balance or nonce (and optionally storage)
CODE = 2; // Change of code (and optionally storage)
UPSERT_CODE = 3; // Change in (balance or nonce) and code (and optinally storage)
DELETE = 4; // Account is deleted
}
message StorageChange {
types.H256 location = 1;
bytes data = 2;
}
message AccountChange {
types.H160 address = 1;
uint64 incarnation = 2;
Action action = 3;
bytes data = 4; // nil if there is no UPSERT in action
bytes code = 5; // nil if there is no CODE in action
repeated StorageChange storageChanges = 6;
}
enum Direction {
FORWARD = 0;
UNWIND = 1;
}
message StateChange {
Direction direction = 1;
uint64 blockHeight = 2;
types.H256 blockHash = 3;
repeated AccountChange changes = 4;
}

View File

@ -0,0 +1,40 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
option go_package = "./snapshotsync;snapshotsync";
package snapshotsync;
enum SnapshotType {
headers = 0;
bodies = 1;
state = 2;
receipts = 3;
}
service Downloader {
rpc Download (DownloadSnapshotRequest) returns (google.protobuf.Empty) {}
rpc Snapshots (SnapshotsRequest) returns (SnapshotsInfoReply) {}
}
message DownloadSnapshotRequest {
uint64 network_id = 1;
repeated SnapshotType type = 2;
}
message SnapshotsRequest {
uint64 network_id = 1;
}
message SnapshotsInfo {
SnapshotType type = 1;
bool gotInfoByte = 2;
int32 readiness = 3;
uint64 snapshotBlock = 4;
string dbpath = 5;
}
message SnapshotsInfoReply {
repeated SnapshotsInfo info = 1;
}

166
src/lib.rs Normal file
View File

@ -0,0 +1,166 @@
pub mod types {
use arrayref::array_ref;
tonic::include_proto!("types");
macro_rules! U {
($proto:ty, $h:ty, $u:ty) => {
impl From<$u> for $proto {
fn from(value: $u) -> Self {
Self::from(<$h>::from(<[u8; <$h>::len_bytes()]>::from(value)))
}
}
impl From<$proto> for $u {
fn from(value: $proto) -> Self {
Self::from(<$h>::from(value).0)
}
}
};
}
// to PB
impl From<ethereum_types::H128> for H128 {
fn from(value: ethereum_types::H128) -> Self {
Self {
hi: u64::from_be_bytes(*array_ref!(value, 0, 8)),
lo: u64::from_be_bytes(*array_ref!(value, 8, 8)),
}
}
}
impl From<ethereum_types::H160> for H160 {
fn from(value: ethereum_types::H160) -> Self {
Self {
hi: Some(ethereum_types::H128::from_slice(&value[..16]).into()),
lo: u32::from_be_bytes(*array_ref!(value, 16, 4)),
}
}
}
impl From<ethereum_types::H256> for H256 {
fn from(value: ethereum_types::H256) -> Self {
Self {
hi: Some(ethereum_types::H128::from_slice(&value[..16]).into()),
lo: Some(ethereum_types::H128::from_slice(&value[16..]).into()),
}
}
}
impl From<ethereum_types::H512> for H512 {
fn from(value: ethereum_types::H512) -> Self {
Self {
hi: Some(ethereum_types::H256::from_slice(&value[..32]).into()),
lo: Some(ethereum_types::H256::from_slice(&value[32..]).into()),
}
}
}
// from PB
impl From<H128> for ethereum_types::H128 {
fn from(value: H128) -> Self {
let mut v = [0; Self::len_bytes()];
v[..8].copy_from_slice(&value.hi.to_be_bytes());
v[8..].copy_from_slice(&value.lo.to_be_bytes());
v.into()
}
}
impl From<H160> for ethereum_types::H160 {
fn from(value: H160) -> Self {
type H = ethereum_types::H128;
let mut v = [0; Self::len_bytes()];
v[..H::len_bytes()]
.copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes());
v[H::len_bytes()..].copy_from_slice(&value.lo.to_be_bytes());
v.into()
}
}
impl From<H256> for ethereum_types::H256 {
fn from(value: H256) -> Self {
type H = ethereum_types::H128;
let mut v = [0; Self::len_bytes()];
v[..H::len_bytes()]
.copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes());
v[H::len_bytes()..]
.copy_from_slice(H::from(value.lo.unwrap_or_default()).as_fixed_bytes());
v.into()
}
}
impl From<H512> for ethereum_types::H512 {
fn from(value: H512) -> Self {
type H = ethereum_types::H256;
let mut v = [0; Self::len_bytes()];
v[..H::len_bytes()]
.copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes());
v[H::len_bytes()..]
.copy_from_slice(H::from(value.lo.unwrap_or_default()).as_fixed_bytes());
v.into()
}
}
U!(H128, ethereum_types::H128, ethereum_types::U128);
U!(H256, ethereum_types::H256, ethereum_types::U256);
U!(H512, ethereum_types::H512, ethereum_types::U512);
}
#[cfg(feature = "consensus")]
pub mod consensus {
tonic::include_proto!("consensus");
}
#[cfg(feature = "sentry")]
pub mod sentry {
tonic::include_proto!("sentry");
}
#[cfg(feature = "remotekv")]
pub mod remotekv {
tonic::include_proto!("remote");
}
#[cfg(feature = "snapshotsync")]
pub mod snapshotsync {
tonic::include_proto!("snapshotsync");
}
#[cfg(feature = "txpool")]
pub mod txpool {
tonic::include_proto!("txpool");
tonic::include_proto!("txpool_control");
}
#[cfg(feature = "db")]
pub mod db {
use once_cell::sync::Lazy;
use serde::Deserialize;
use std::collections::HashMap;
#[derive(Deserialize)]
pub struct AutoDupSortConfig {
pub from: usize,
pub to: usize,
}
#[derive(Deserialize)]
pub struct DupSortConfig {
pub auto: Option<AutoDupSortConfig>,
}
#[derive(Deserialize)]
pub struct TableInfo {
pub dup_sort: Option<DupSortConfig>,
}
pub static TABLES: Lazy<HashMap<String, TableInfo>> =
Lazy::new(|| toml::from_str(include_str!("../db_tables.toml")).unwrap());
}

27
testing/testing.proto Normal file
View File

@ -0,0 +1,27 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
package testing;
option go_package = "./testing;testing";
message TestCaseNumber {
uint64 count = 1;
}
message TestReport {
// End of test signal
bool end = 1;
// Indication whether the report is about success of part of the test, or failure
bool success = 2;
string log = 3;
}
service TestDriver {
// Returns number of available integration test caes in the test driver
rpc TestCaseCount(google.protobuf.Empty) returns (TestCaseNumber);
// Ask the test driver to start the test case with given number. As test case progresses, the driver sends reports via the stream
// Test drivier also notifies about the end of test case via the stream
rpc StartTestCase(TestCaseNumber) returns (stream TestReport);
}

BIN
turbo-geth-architecture.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

1
turbo-geth.drawio Normal file

File diff suppressed because one or more lines are too long

45
txpool/README.md Normal file
View File

@ -0,0 +1,45 @@
# txpool interface
Transaction pool is supposed to import and track pending transactions. As such, it should conduct at least two checks:
- Transactions must have correct nonce
- Gas fees must be covered
## State streaming
For transaction checks to function, the pool must also track balance and nonce for sending accounts.
On import of transactions from unknown sender, transaction pool can request balance and nonce at a particular block.
To track existing accounts, transaction pool connects to Ethereum client and receives a stream of BlockDiffs. Each of these represents one block, applied or reverted, and contains all the necessary information for transaction pool to track its accounts.
For applied blocks:
- Block's hash
- Parent block's hash
- New balances and nonces for all accounts changed in this block
For reverted blocks:
- Reverted block's hash
- New (reverted's parent) hash
- New parent (reverted's grandfather) hash
- List of reverted transactions
- Balances and nonces for all accounts changed in reverted block, at new (reverted's parent) state.
BlockDiffs must be streamed in the chain's order without any gaps. If BlockDiff's parent does not match current block hash, transaction pool must make sure that it is not left in inconsistent state. One option is to reset the transaction pool, reimport transactions and rerequest state for those senders.
## Reorg handling
Simple example:
```
A - D -- E -- F
\
- B -- C
```
Transaction pool is at block C, canonical chain reorganizes to F.
We backtrack to common ancestor and apply new chain, block by block.
Client must send the following BlockDiffs to txpool, in order:
- revert C to B
- revert B to A
- apply D on A
- apply E on D
- apply F on E

103
txpool/mining.proto Normal file
View File

@ -0,0 +1,103 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
import "types/types.proto";
package txpool;
option go_package = "./txpool;txpool";
message OnPendingBlockRequest {}
message OnPendingBlockReply {
bytes rplBlock = 1;
}
message OnMinedBlockRequest {}
message OnMinedBlockReply {
bytes rplBlock = 1;
}
message OnPendingLogsRequest {}
message OnPendingLogsReply {
bytes rplLogs = 1;
}
message GetWorkRequest {}
message GetWorkReply {
string headerHash = 1; // 32 bytes hex encoded current block header pow-hash
string seedHash = 2; // 32 bytes hex encoded seed hash used for DAG
string target = 3; // 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
string blockNumber = 4; // hex encoded block number
}
message SubmitWorkRequest {
bytes blockNonce = 1;
bytes powHash = 2;
bytes digest = 3;
}
message SubmitWorkReply {
bool ok = 1;
}
message SubmitHashRateRequest {
uint64 rate = 1;
bytes id = 2;
}
message SubmitHashRateReply {
bool ok = 1;
}
message HashRateRequest {}
message HashRateReply {
uint64 hashRate = 1;
}
message MiningRequest {}
message MiningReply {
bool enabled = 1;
bool running = 2;
}
service Mining {
// Version returns the service version number
rpc Version(google.protobuf.Empty) returns (types.VersionReply);
// subscribe to pending blocks event
rpc OnPendingBlock(OnPendingBlockRequest) returns (stream OnPendingBlockReply);
// subscribe to mined blocks event
rpc OnMinedBlock(OnMinedBlockRequest) returns (stream OnMinedBlockReply);
// subscribe to pending blocks event
rpc OnPendingLogs(OnPendingLogsRequest) returns (stream OnPendingLogsReply);
// GetWork returns a work package for external miner.
//
// The work package consists of 3 strings:
// result[0] - 32 bytes hex encoded current block header pow-hash
// result[1] - 32 bytes hex encoded seed hash used for DAG
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3] - hex encoded block number
rpc GetWork(GetWorkRequest) returns (GetWorkReply);
// SubmitWork can be used by external miner to submit their POW solution.
// It returns an indication if the work was accepted.
// Note either an invalid solution, a stale work a non-existent work will return false.
rpc SubmitWork(SubmitWorkRequest) returns (SubmitWorkReply);
// SubmitHashRate can be used for remote miners to submit their hash rate.
// This enables the node to report the combined hash rate of all miners
// which submit work through this node.
//
// It accepts the miner hash rate and an identifier which must be unique
// between nodes.
rpc SubmitHashRate(SubmitHashRateRequest) returns (SubmitHashRateReply);
// HashRate returns the current hashrate for local CPU miner and remote miner.
rpc HashRate(HashRateRequest) returns (HashRateReply);
// Mining returns an indication if this node is currently mining and it's mining configuration
rpc Mining(MiningRequest) returns (MiningReply);
}

61
txpool/txpool.proto Normal file
View File

@ -0,0 +1,61 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
import "types/types.proto";
package txpool;
option go_package = "./txpool;txpool";
message TxHashes { repeated types.H256 hashes = 1; }
message AddRequest { repeated bytes rlpTxs = 1; }
enum ImportResult {
SUCCESS = 0;
ALREADY_EXISTS = 1;
FEE_TOO_LOW = 2;
STALE = 3;
INVALID = 4;
INTERNAL_ERROR = 5;
}
message AddReply { repeated ImportResult imported = 1; repeated string errors = 2; }
message TransactionsRequest { repeated types.H256 hashes = 1; }
message TransactionsReply { repeated bytes rlpTxs = 1; }
message OnAddRequest {}
message OnAddReply {
repeated bytes rplTxs = 1;
}
message AllRequest {}
message AllReply {
enum Type {
PENDING = 0; // All currently processable transactions
QUEUED = 1; // Queued but non-processable transactions
}
message Tx {
Type type = 1;
bytes sender = 2;
bytes rlpTx = 3;
}
repeated Tx txs = 1;
}
service Txpool {
// Version returns the service version number
rpc Version(google.protobuf.Empty) returns (types.VersionReply);
// preserves incoming order, changes amount, unknown hashes will be omitted
rpc FindUnknown(TxHashes) returns (TxHashes);
// Expecting signed transactions. Preserves incoming order and amount
// Adding txs as local (use P2P to add remote txs)
rpc Add(AddRequest) returns (AddReply);
// preserves incoming order and amount, if some transaction doesn't exists in pool - returns nil in this slot
rpc Transactions(TransactionsRequest) returns (TransactionsReply);
// returns all transactions from tx pool
rpc All(AllRequest) returns (AllReply);
// subscribe to new transactions add event
rpc OnAdd(OnAddRequest) returns (stream OnAddReply);
}

View File

@ -0,0 +1,57 @@
syntax = "proto3";
import "google/protobuf/empty.proto";
import "types/types.proto";
package txpool_control;
option go_package = "./txpool;txpool";
message AccountInfoRequest {
types.H256 block_hash = 1;
types.H160 account = 2;
}
message AccountInfoReply {
types.H256 balance = 1;
uint64 nonce = 2;
}
message BlockStreamRequest {
oneof start_with {
google.protobuf.Empty latest = 1;
types.H256 block_hash = 2;
}
}
message AccountInfo {
types.H160 address = 1;
types.H256 balance = 2;
uint64 nonce = 3;
}
message AppliedBlock {
types.H256 hash = 1;
types.H256 parent_hash = 2;
repeated AccountInfo changed_accounts = 3;
}
message RevertedBlock {
types.H256 reverted_hash = 1;
repeated bytes reverted_transactions = 2;
types.H256 new_hash = 3;
types.H256 new_parent = 4;
repeated AccountInfo reverted_accounts = 5;
}
message BlockDiff {
oneof diff {
AppliedBlock applied = 1;
RevertedBlock reverted = 2;
}
}
service TxpoolControl {
rpc AccountInfo(AccountInfoRequest) returns (AccountInfoReply);
rpc BlockStream(BlockStreamRequest) returns (stream BlockDiff);
}

46
types/types.proto Normal file
View File

@ -0,0 +1,46 @@
syntax = "proto3";
import "google/protobuf/descriptor.proto";
package types;
option go_package = "./types;types";
/* Service-level versioning shall use a 3-part version number (M.m.p) following semver rules */
/* 1. MAJOR version (M): increment when you make incompatible changes */
/* 2. MINOR version (m): increment when you add functionality in backward compatible manner */
/* 3. PATCH version (p): increment when you make backward compatible bug fixes */
// Extensions of file-level options for service versioning: should *not* be modified
extend google.protobuf.FileOptions {
uint32 service_major_version = 50001;
uint32 service_minor_version = 50002;
uint32 service_patch_version = 50003;
}
message H128 {
uint64 hi = 1;
uint64 lo = 2;
}
message H160 {
H128 hi = 1;
uint32 lo = 2;
}
message H256 {
H128 hi = 1;
H128 lo = 2;
}
message H512 {
H256 hi = 1;
H256 lo = 2;
}
// Reply message containing the current service version on the service side
message VersionReply {
uint32 major = 1;
uint32 minor = 2;
uint32 patch = 3;
}