erigon-pulse/txpool/pool_fuzz_test.go

637 lines
19 KiB
Go
Raw Normal View History

2021-07-30 07:10:58 +00:00
//go:build gofuzzbeta
2021-07-29 08:23:17 +00:00
// +build gofuzzbeta
package txpool
import (
2021-08-05 11:38:37 +00:00
"bytes"
"context"
2021-07-30 14:52:23 +00:00
"encoding/binary"
2021-07-29 08:23:17 +00:00
"testing"
2021-07-31 08:10:42 +00:00
"github.com/holiman/uint256"
2021-09-18 13:58:20 +00:00
"github.com/ledgerwatch/erigon-lib/chain"
"github.com/ledgerwatch/erigon-lib/common/u256"
2021-09-13 07:31:15 +00:00
"github.com/ledgerwatch/erigon-lib/gointerfaces"
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
2021-08-23 07:16:39 +00:00
"github.com/ledgerwatch/erigon-lib/kv"
2021-09-13 07:31:15 +00:00
"github.com/ledgerwatch/erigon-lib/kv/kvcache"
2021-08-23 07:16:39 +00:00
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
2021-09-13 07:31:15 +00:00
"github.com/ledgerwatch/erigon-lib/kv/memdb"
2021-08-22 10:06:38 +00:00
"github.com/ledgerwatch/erigon-lib/rlp"
2021-08-20 09:17:34 +00:00
"github.com/ledgerwatch/log/v3"
2021-07-31 08:10:42 +00:00
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
2021-07-29 08:23:17 +00:00
)
// https://blog.golang.org/fuzz-beta
// golang.org/s/draft-fuzzing-design
//gotip doc testing
//gotip doc testing.F
2021-09-02 05:25:34 +00:00
//gotip doc testing.F.AddRemoteTxs
2021-07-29 08:23:17 +00:00
//gotip doc testing.F.Fuzz
2021-07-29 08:33:22 +00:00
// gotip test -trimpath -v -fuzz=Fuzz -fuzztime=10s ./txpool
2021-07-31 08:10:42 +00:00
2021-08-20 09:17:34 +00:00
func init() {
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
2021-08-20 09:17:34 +00:00
}
2021-07-30 07:10:58 +00:00
func FuzzTwoQueue(f *testing.F) {
f.Add([]uint8{0b11000, 0b00101, 0b000111})
2021-07-30 09:24:20 +00:00
f.Add([]uint8{0b10101, 0b11110, 0b11101, 0b10001})
2021-07-30 07:10:58 +00:00
f.Fuzz(func(t *testing.T, in []uint8) {
t.Parallel()
assert := assert.New(t)
{
sub := NewPendingSubPool(PendingSubPool, 1024)
for _, i := range in {
sub.UnsafeAdd(&metaTx{subPool: SubPoolMarker(i & 0b11111), Tx: &TxSlot{nonce: 1, value: *uint256.NewInt(1)}})
}
sub.EnforceWorstInvariants()
sub.EnforceBestInvariants()
assert.Equal(len(in), sub.best.Len())
assert.Equal(len(in), sub.worst.Len())
assert.Equal(len(in), sub.Len())
var prevBest *uint8
for i := range sub.best {
2021-09-18 13:58:20 +00:00
current := uint8(sub.best[i].subPool)
if prevBest != nil {
2021-09-18 13:58:20 +00:00
assert.LessOrEqual(current, *prevBest)
}
assert.Equal(i, sub.best[i].bestIndex)
2021-09-18 13:58:20 +00:00
prevBest = &current
2021-07-30 09:31:45 +00:00
}
}
2021-07-30 09:24:20 +00:00
{
sub := NewSubPool(BaseFeeSubPool, 1024)
2021-07-30 09:24:20 +00:00
for _, i := range in {
2021-08-23 13:03:09 +00:00
sub.Add(&metaTx{subPool: SubPoolMarker(i & 0b11111), Tx: &TxSlot{nonce: 1, value: *uint256.NewInt(1)}})
2021-07-30 09:24:20 +00:00
}
assert.Equal(len(in), sub.best.Len())
assert.Equal(len(in), sub.worst.Len())
assert.Equal(len(in), sub.Len())
for i := range *sub.best {
assert.Equal(i, (*sub.best)[i].bestIndex)
}
for i := range *sub.worst {
assert.Equal(i, (*sub.worst)[i].worstIndex)
}
2021-07-30 09:24:20 +00:00
var prevBest *uint8
i := sub.Len()
for sub.Len() > 0 {
2021-08-06 14:20:34 +00:00
best := uint8(sub.Best().subPool)
assert.Equal(best, uint8(sub.PopBest().subPool))
2021-07-30 09:24:20 +00:00
if prevBest != nil {
assert.LessOrEqual(best, *prevBest)
}
prevBest = &best
i--
}
assert.Zero(i)
2021-07-30 09:26:56 +00:00
assert.Zero(sub.Len())
assert.Zero(sub.best.Len())
assert.Zero(sub.worst.Len())
2021-07-30 07:10:58 +00:00
}
2021-07-30 09:24:20 +00:00
{
sub := NewSubPool(QueuedSubPool, 1024)
2021-07-30 09:24:20 +00:00
for _, i := range in {
2021-08-23 13:03:09 +00:00
sub.Add(&metaTx{subPool: SubPoolMarker(i & 0b11111), Tx: &TxSlot{nonce: 1, value: *uint256.NewInt(1)}})
2021-07-30 09:24:20 +00:00
}
var prev *uint8
i := sub.Len()
for sub.Len() > 0 {
2021-08-06 14:20:34 +00:00
worst := uint8(sub.Worst().subPool)
assert.Equal(worst, uint8(sub.PopWorst().subPool))
2021-07-30 09:24:20 +00:00
if prev != nil {
assert.GreaterOrEqual(worst, *prev)
}
prev = &worst
i--
}
assert.Zero(i)
2021-07-30 09:26:56 +00:00
assert.Zero(sub.Len())
assert.Zero(sub.best.Len())
assert.Zero(sub.worst.Len())
2021-07-30 07:10:58 +00:00
}
})
}
2021-07-30 14:52:23 +00:00
2021-07-31 09:29:29 +00:00
func u64Slice(in []byte) ([]uint64, bool) {
2021-08-05 11:38:37 +00:00
if len(in) < 8 {
2021-07-31 09:29:29 +00:00
return nil, false
}
res := make([]uint64, len(in)/8)
2021-08-05 07:44:24 +00:00
for i := 0; i < len(res); i++ {
2021-07-31 09:29:29 +00:00
res[i] = binary.BigEndian.Uint64(in[i*8:])
}
return res, true
}
2021-08-05 12:41:18 +00:00
func u8Slice(in []byte) ([]uint64, bool) {
if len(in) < 1 {
return nil, false
}
res := make([]uint64, len(in))
for i := 0; i < len(res); i++ {
2021-08-05 13:39:08 +00:00
res[i] = uint64(in[i] % 32)
2021-08-05 12:41:18 +00:00
}
return res, true
}
func u16Slice(in []byte) ([]uint64, bool) {
if len(in) < 2 {
return nil, false
}
res := make([]uint64, len(in)/2)
for i := 0; i < len(res); i++ {
res[i] = uint64(binary.BigEndian.Uint16(in[i*2:]))
}
return res, true
}
2021-07-31 09:29:29 +00:00
func u256Slice(in []byte) ([]uint256.Int, bool) {
2021-08-05 12:41:18 +00:00
if len(in) < 1 {
2021-07-31 09:29:29 +00:00
return nil, false
}
2021-08-05 12:41:18 +00:00
res := make([]uint256.Int, len(in))
2021-08-05 07:44:24 +00:00
for i := 0; i < len(res); i++ {
2021-08-05 13:39:08 +00:00
res[i].SetUint64(uint64(in[i] % 32))
2021-07-31 09:29:29 +00:00
}
return res, true
}
2021-09-17 02:56:04 +00:00
func parseSenders(in []byte) (nonces []uint64, balances []uint256.Int) {
for i := 0; i < len(in)-(1+1-1); i += 1 + 1 {
nonce := uint64(in[i] % 8)
2021-08-05 12:41:18 +00:00
if nonce == 0 {
nonce = 1
}
nonces = append(nonces, nonce)
2021-09-17 02:56:04 +00:00
balances = append(balances, *uint256.NewInt(uint64(in[i+1])))
2021-08-05 12:41:18 +00:00
}
return
}
func parseTxs(in []byte) (nonces, tips []uint64, values []uint256.Int) {
for i := 0; i < len(in)-(1+1+1-1); i += 1 + 1 + 1 {
nonce := uint64(in[i])
2021-08-05 11:38:37 +00:00
if nonce == 0 {
nonce = 1
2021-08-05 02:30:59 +00:00
}
2021-08-05 11:38:37 +00:00
nonces = append(nonces, nonce)
2021-08-05 12:41:18 +00:00
tips = append(tips, uint64(in[i+1]))
values = append(values, *uint256.NewInt(uint64(in[i+1+1])))
2021-08-05 02:30:59 +00:00
}
2021-08-05 11:38:37 +00:00
return
}
2021-08-05 02:30:59 +00:00
func poolsFromFuzzBytes(rawTxNonce, rawValues, rawTips, rawFeeCap, rawSender []byte) (sendersInfo map[uint64]*sender, senderIDs map[string]uint64, txs TxSlots, ok bool) {
2021-09-17 02:56:04 +00:00
if len(rawTxNonce) < 1 || len(rawValues) < 1 || len(rawTips) < 1 || len(rawFeeCap) < 1 || len(rawSender) < 1+1 {
2021-08-05 11:38:37 +00:00
return nil, nil, txs, false
}
2021-09-17 02:56:04 +00:00
senderNonce, senderBalance := parseSenders(rawSender)
2021-08-05 12:41:18 +00:00
txNonce, ok := u8Slice(rawTxNonce)
2021-07-31 09:29:29 +00:00
if !ok {
2021-08-04 08:34:10 +00:00
return nil, nil, txs, false
2021-07-31 08:10:42 +00:00
}
2021-08-05 13:39:08 +00:00
feeCap, ok := u8Slice(rawFeeCap)
if !ok {
return nil, nil, txs, false
}
2021-08-05 12:41:18 +00:00
tips, ok := u8Slice(rawTips)
2021-08-05 06:42:23 +00:00
if !ok {
return nil, nil, txs, false
}
2021-08-05 02:30:59 +00:00
values, ok := u256Slice(rawValues)
2021-07-31 09:29:29 +00:00
if !ok {
2021-08-04 08:34:10 +00:00
return nil, nil, txs, false
2021-07-31 08:10:42 +00:00
}
2021-07-30 07:10:58 +00:00
sendersInfo = map[uint64]*sender{}
2021-08-04 08:34:10 +00:00
senderIDs = map[string]uint64{}
2021-09-17 02:56:04 +00:00
senders := make(Addresses, 20*len(senderNonce))
2021-08-05 02:30:59 +00:00
for i := 0; i < len(senderNonce); i++ {
2021-08-05 07:44:24 +00:00
senderID := uint64(i + 1) //non-zero expected
2021-09-17 02:56:04 +00:00
binary.BigEndian.PutUint64(senders.At(i%senders.Len()), senderID)
sendersInfo[senderID] = newSender(senderNonce[i], senderBalance[i%len(senderBalance)])
2021-08-05 11:38:37 +00:00
senderIDs[string(senders.At(i%senders.Len()))] = senderID
2021-07-31 09:29:29 +00:00
}
2021-08-07 05:44:01 +00:00
txs.txs = make([]*TxSlot, len(txNonce))
2021-09-18 13:58:20 +00:00
parseCtx := NewTxParseContext(chain.MainnetRules, *u256.N1)
2021-08-22 13:43:46 +00:00
parseCtx.WithSender(false)
2021-07-31 11:21:56 +00:00
for i := range txNonce {
2021-08-07 05:44:01 +00:00
txs.txs[i] = &TxSlot{
2021-08-05 13:39:08 +00:00
nonce: txNonce[i],
value: values[i%len(values)],
tip: tips[i%len(tips)],
feeCap: feeCap[i%len(feeCap)],
2021-08-07 05:44:01 +00:00
}
2021-08-23 03:17:26 +00:00
txRlp := fakeRlpTx(txs.txs[i], senders.At(i%senders.Len()))
_, err := parseCtx.ParseTransaction(txRlp, 0, txs.txs[i], nil)
2021-08-22 13:43:46 +00:00
if err != nil {
panic(err)
}
2021-08-05 11:38:37 +00:00
txs.senders = append(txs.senders, senders.At(i%senders.Len())...)
2021-09-20 05:44:29 +00:00
txs.isLocal = append(txs.isLocal, true)
2021-07-31 08:10:42 +00:00
}
2021-08-05 07:44:24 +00:00
2021-08-04 08:34:10 +00:00
return sendersInfo, senderIDs, txs, true
2021-07-31 11:21:56 +00:00
}
2021-08-23 03:17:26 +00:00
// fakeRlpTx add anything what identifying tx to `data` to make hash unique
func fakeRlpTx(slot *TxSlot, data []byte) []byte {
2021-08-22 10:06:38 +00:00
dataLen := rlp.U64Len(1) + //chainID
rlp.U64Len(slot.nonce) + rlp.U64Len(slot.tip) + rlp.U64Len(slot.feeCap) +
rlp.U64Len(0) + // gas
rlp.StringLen(0) + // dest addr
rlp.U256Len(&slot.value) +
2021-08-23 03:17:26 +00:00
rlp.StringLen(len(data)) + // data
2021-08-22 10:06:38 +00:00
rlp.ListPrefixLen(0) + //access list
+3 // v,r,s
buf := make([]byte, 1+rlp.ListPrefixLen(dataLen)+dataLen)
buf[0] = byte(DynamicFeeTxType)
p := 1
p += rlp.EncodeListPrefix(dataLen, buf[p:])
p += rlp.EncodeU64(1, buf[p:])
p += rlp.EncodeU64(slot.nonce, buf[p:])
p += rlp.EncodeU64(slot.tip, buf[p:])
p += rlp.EncodeU64(slot.feeCap, buf[p:])
p += rlp.EncodeU64(0, buf[p:]) //gas
p += rlp.EncodeString([]byte{}, buf[p:]) //destrination addr
bb := bytes.NewBuffer(buf[p:p])
2021-08-22 10:39:47 +00:00
_ = slot.value.EncodeRLP(bb)
2021-08-22 10:06:38 +00:00
p += rlp.U256Len(&slot.value)
2021-08-23 03:17:26 +00:00
p += rlp.EncodeString(data, buf[p:]) //data
p += rlp.EncodeListPrefix(0, buf[p:]) // access list
p += rlp.EncodeU64(1, buf[p:]) //v
p += rlp.EncodeU64(1, buf[p:]) //r
p += rlp.EncodeU64(1, buf[p:]) //s
2021-08-22 10:06:38 +00:00
return buf[:]
}
2021-07-31 08:10:42 +00:00
2021-08-06 14:20:34 +00:00
func iterateSubPoolUnordered(subPool *SubPool, f func(tx *metaTx)) {
2021-07-31 11:21:56 +00:00
for i := 0; i < subPool.best.Len(); i++ {
f((*subPool.best)[i])
}
2021-07-31 08:10:42 +00:00
}
2021-08-05 06:42:23 +00:00
func splitDataset(in TxSlots) (TxSlots, TxSlots, TxSlots, TxSlots) {
p1, p2, p3, p4 := TxSlots{}, TxSlots{}, TxSlots{}, TxSlots{}
l := len(in.txs) / 4
p1.txs = in.txs[:l]
2021-08-05 07:44:24 +00:00
p1.isLocal = in.isLocal[:l]
p1.senders = in.senders[:l*20]
2021-08-05 06:42:23 +00:00
p2.txs = in.txs[l : 2*l]
2021-08-05 07:44:24 +00:00
p2.isLocal = in.isLocal[l : 2*l]
p2.senders = in.senders[l*20 : 2*l*20]
2021-08-05 06:42:23 +00:00
p3.txs = in.txs[2*l : 3*l]
2021-08-05 07:44:24 +00:00
p3.isLocal = in.isLocal[2*l : 3*l]
p3.senders = in.senders[2*l*20 : 3*l*20]
p4.txs = in.txs[3*l : 4*l]
p4.isLocal = in.isLocal[3*l : 4*l]
p4.senders = in.senders[3*l*20 : 4*l*20]
2021-08-05 06:42:23 +00:00
return p1, p2, p3, p4
}
2021-09-08 12:21:13 +00:00
func FuzzOnNewBlocks(f *testing.F) {
2021-08-05 12:41:18 +00:00
var u64 = [1 * 4]byte{1}
var senderAddr = [1 + 1 + 1]byte{1}
f.Add(u64[:], u64[:], u64[:], u64[:], senderAddr[:], uint8(12))
f.Add(u64[:], u64[:], u64[:], u64[:], senderAddr[:], uint8(14))
f.Add(u64[:], u64[:], u64[:], u64[:], senderAddr[:], uint8(123))
2021-09-26 12:47:11 +00:00
f.Fuzz(func(t *testing.T, txNonce, values, tips, feeCap, senderAddr []byte, pendingBaseFee1 uint8) {
2021-08-22 10:06:38 +00:00
//t.Parallel()
2021-09-13 07:31:15 +00:00
ctx := context.Background()
2021-08-22 10:06:38 +00:00
2021-09-26 12:47:11 +00:00
pendingBaseFee := uint64(pendingBaseFee1%16 + 1)
if pendingBaseFee == 0 {
2021-08-05 07:44:24 +00:00
t.Skip()
}
senders, senderIDs, txs, ok := poolsFromFuzzBytes(txNonce, values, tips, feeCap, senderAddr)
2021-07-31 08:10:42 +00:00
if !ok {
t.Skip()
2021-07-29 08:23:17 +00:00
}
2021-08-05 11:38:37 +00:00
2021-08-28 03:00:23 +00:00
assert, require := assert.New(t), require.New(t)
2021-08-05 09:45:58 +00:00
err := txs.Valid()
assert.NoError(err)
2021-08-04 08:34:10 +00:00
var prevHashes Hashes
2021-08-06 14:20:34 +00:00
2021-08-05 02:00:00 +00:00
ch := make(chan Hashes, 100)
2021-08-24 12:33:44 +00:00
db := mdbx.NewMDBX(log.New()).InMem().WithTablessCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }).MustOpen()
t.Cleanup(db.Close)
2021-09-13 07:31:15 +00:00
coreDB := memdb.NewTestDB(t)
2021-08-24 12:33:44 +00:00
2021-08-28 03:00:23 +00:00
cfg := DefaultConfig
sendersCache := kvcache.New(kvcache.DefaultCoherentConfig)
2021-09-18 13:58:20 +00:00
pool, err := New(ch, coreDB, cfg, sendersCache, chain.MainnetRules, *u256.N1)
2021-08-13 05:23:14 +00:00
assert.NoError(err)
pool.senders.senderIDs = senderIDs
2021-09-13 07:31:15 +00:00
for addr, id := range senderIDs {
pool.senders.senderID2Addr[id] = addr
}
pool.senders.senderID = uint64(len(senderIDs))
2021-08-06 03:36:44 +00:00
check := func(unwindTxs, minedTxs TxSlots, msg string) {
2021-08-05 06:42:23 +00:00
pending, baseFee, queued := pool.pending, pool.baseFee, pool.queued
best, worst := pending.Best(), pending.Worst()
2021-09-18 13:58:20 +00:00
assert.LessOrEqual(pending.Len(), cfg.PendingSubPoolLimit)
2021-08-06 03:36:44 +00:00
assert.False(worst != nil && best == nil, msg)
assert.False(worst == nil && best != nil, msg)
2021-08-06 14:20:34 +00:00
if worst != nil && worst.subPool < 0b11110 {
t.Fatalf("pending worst too small %b", worst.subPool)
2021-08-01 03:01:55 +00:00
}
for _, tx := range pending.best {
2021-08-05 06:42:23 +00:00
i := tx.Tx
2021-08-06 14:20:34 +00:00
if tx.subPool&NoNonceGaps > 0 {
2021-09-17 02:56:04 +00:00
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce, msg, i.senderID)
2021-08-06 03:36:44 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughBalance > 0 {
//assert.True(tx.SenderHasEnoughBalance)
2021-08-05 06:42:23 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughFeeCapProtocol > 0 {
2021-09-26 12:47:11 +00:00
assert.LessOrEqual(calcProtocolBaseFee(pendingBaseFee), tx.Tx.feeCap, msg)
2021-08-06 03:36:44 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughFeeCapBlock > 0 {
2021-09-26 12:47:11 +00:00
assert.LessOrEqual(pendingBaseFee, tx.Tx.feeCap, msg)
2021-08-06 03:36:44 +00:00
}
2021-08-02 11:53:28 +00:00
2021-08-05 06:42:23 +00:00
// side data structures must have all txs
assert.True(pool.all.has(tx), msg)
2021-08-05 06:42:23 +00:00
_, ok = pool.byHash[string(i.idHash[:])]
assert.True(ok)
2021-08-02 11:53:28 +00:00
2021-08-05 06:42:23 +00:00
// pools can't have more then 1 tx with same SenderID+Nonce
2021-08-06 14:20:34 +00:00
iterateSubPoolUnordered(baseFee, func(mtx2 *metaTx) {
2021-08-05 06:42:23 +00:00
tx2 := mtx2.Tx
2021-08-06 03:36:44 +00:00
assert.False(tx2.senderID == i.senderID && tx2.nonce == i.nonce, msg)
2021-08-05 06:42:23 +00:00
})
2021-08-06 14:20:34 +00:00
iterateSubPoolUnordered(queued, func(mtx2 *metaTx) {
2021-08-05 06:42:23 +00:00
tx2 := mtx2.Tx
2021-08-06 03:36:44 +00:00
assert.False(tx2.senderID == i.senderID && tx2.nonce == i.nonce, msg)
2021-08-05 06:42:23 +00:00
})
}
2021-07-30 03:28:03 +00:00
2021-08-05 06:42:23 +00:00
best, worst = baseFee.Best(), baseFee.Worst()
2021-07-31 11:21:56 +00:00
2021-08-06 03:36:44 +00:00
assert.False(worst != nil && best == nil, msg)
assert.False(worst == nil && best != nil, msg)
2021-09-18 13:58:20 +00:00
assert.LessOrEqual(baseFee.Len(), cfg.BaseFeeSubPoolLimit, msg)
2021-08-06 14:20:34 +00:00
if worst != nil && worst.subPool < 0b11100 {
t.Fatalf("baseFee worst too small %b", worst.subPool)
2021-08-01 03:01:55 +00:00
}
2021-08-06 14:20:34 +00:00
iterateSubPoolUnordered(baseFee, func(tx *metaTx) {
2021-08-05 06:42:23 +00:00
i := tx.Tx
2021-08-06 14:20:34 +00:00
if tx.subPool&NoNonceGaps > 0 {
2021-08-06 03:36:44 +00:00
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce, msg)
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughBalance != 0 {
//assert.True(tx.SenderHasEnoughBalance, msg)
2021-08-06 03:36:44 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughFeeCapProtocol > 0 {
2021-09-26 12:47:11 +00:00
assert.LessOrEqual(calcProtocolBaseFee(pendingBaseFee), tx.Tx.feeCap, msg)
2021-08-06 03:36:44 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughFeeCapBlock > 0 {
2021-09-26 12:47:11 +00:00
assert.LessOrEqual(pendingBaseFee, tx.Tx.feeCap, msg)
2021-08-05 06:42:23 +00:00
}
2021-07-31 11:21:56 +00:00
assert.True(pool.all.has(tx), msg)
2021-08-05 06:42:23 +00:00
_, ok = pool.byHash[string(i.idHash[:])]
2021-08-06 03:36:44 +00:00
assert.True(ok, msg)
2021-08-05 06:42:23 +00:00
})
2021-07-30 03:28:03 +00:00
2021-08-05 06:42:23 +00:00
best, worst = queued.Best(), queued.Worst()
2021-09-18 13:58:20 +00:00
assert.LessOrEqual(queued.Len(), cfg.QueuedSubPoolLimit)
2021-08-06 03:36:44 +00:00
assert.False(worst != nil && best == nil, msg)
assert.False(worst == nil && best != nil, msg)
2021-08-06 14:20:34 +00:00
if worst != nil && worst.subPool < 0b10000 {
t.Fatalf("queued worst too small %b", worst.subPool)
2021-08-01 03:01:55 +00:00
}
2021-08-06 14:20:34 +00:00
iterateSubPoolUnordered(queued, func(tx *metaTx) {
2021-08-05 06:42:23 +00:00
i := tx.Tx
2021-08-06 14:20:34 +00:00
if tx.subPool&NoNonceGaps > 0 {
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce, msg, i.senderID, senders[i.senderID].nonce)
2021-08-06 03:36:44 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughBalance > 0 {
//assert.True(tx.SenderHasEnoughBalance, msg)
2021-08-06 03:36:44 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughFeeCapProtocol > 0 {
2021-09-26 12:47:11 +00:00
assert.LessOrEqual(calcProtocolBaseFee(pendingBaseFee), tx.Tx.feeCap, msg)
2021-08-06 03:36:44 +00:00
}
2021-08-06 14:20:34 +00:00
if tx.subPool&EnoughFeeCapBlock > 0 {
2021-09-26 12:47:11 +00:00
assert.LessOrEqual(pendingBaseFee, tx.Tx.feeCap, msg)
2021-08-05 06:42:23 +00:00
}
2021-07-31 11:21:56 +00:00
assert.True(pool.all.has(tx), "%s, %d, %x", msg, tx.Tx.nonce, tx.Tx.idHash)
2021-08-05 06:42:23 +00:00
_, ok = pool.byHash[string(i.idHash[:])]
2021-08-06 03:36:44 +00:00
assert.True(ok, msg)
2021-10-02 10:34:33 +00:00
assert.GreaterOrEqual(tx.Tx.feeCap, pool.cfg.MinFeeCap)
2021-08-02 11:53:28 +00:00
})
2021-08-05 06:42:23 +00:00
// all txs in side data structures must be in some queue
2021-08-27 04:22:24 +00:00
for _, txn := range pool.byHash {
2021-08-28 03:00:23 +00:00
require.True(txn.bestIndex >= 0, msg)
2021-08-06 03:36:44 +00:00
assert.True(txn.worstIndex >= 0, msg)
2021-08-05 06:42:23 +00:00
}
2021-08-26 04:21:47 +00:00
for id := range senders {
//assert.True(senders[i].all.Len() > 0)
pool.all.ascend(id, func(mt *metaTx) bool {
2021-08-28 03:00:23 +00:00
require.True(mt.worstIndex >= 0, msg)
2021-08-06 03:36:44 +00:00
assert.True(mt.bestIndex >= 0, msg)
2021-08-05 06:42:23 +00:00
return true
})
}
// mined txs must be removed
for i := range minedTxs.txs {
_, ok = pool.byHash[string(minedTxs.txs[i].idHash[:])]
2021-08-06 03:36:44 +00:00
assert.False(ok, msg)
2021-08-05 06:42:23 +00:00
}
2021-08-06 14:20:34 +00:00
if queued.Len() > 3 {
// Less func must be transitive (choose 3 semi-random elements)
i := queued.Len() - 1
a, b, c := (*queued.best)[i], (*queued.best)[i-1], (*queued.best)[i-2]
if a.Less(b) && b.Less(c) {
assert.True(a.Less(c))
}
}
2021-08-04 03:01:52 +00:00
}
2021-08-05 06:42:23 +00:00
2021-08-06 03:36:44 +00:00
checkNotify := func(unwindTxs, minedTxs TxSlots, msg string) {
2021-08-06 14:20:34 +00:00
pending, baseFee, queued := pool.pending, pool.baseFee, pool.queued
_, _ = baseFee, queued
2021-08-05 11:38:37 +00:00
select {
case newHashes := <-ch:
2021-08-22 13:43:46 +00:00
//assert.Equal(len(txs1.txs), newHashes.Len())
2021-08-05 11:38:37 +00:00
assert.Greater(len(newHashes), 0)
for i := 0; i < newHashes.Len(); i++ {
foundInUnwind := false
foundInMined := false
newHash := newHashes.At(i)
for j := range unwindTxs.txs {
if bytes.Equal(unwindTxs.txs[j].idHash[:], newHash) {
foundInUnwind = true
break
}
}
for j := range minedTxs.txs {
2021-08-05 15:48:56 +00:00
if bytes.Equal(minedTxs.txs[j].idHash[:], newHash) {
2021-08-05 11:38:37 +00:00
foundInMined = true
break
}
}
2021-08-06 03:36:44 +00:00
assert.True(foundInUnwind, msg)
assert.False(foundInMined, msg)
2021-08-05 11:38:37 +00:00
}
default: // no notifications - means pools must be unchanged or drop some txs
pendingHashes := copyHashes(pending)
require.Zero(extractNewHashes(pendingHashes, prevHashes).Len())
2021-08-05 11:38:37 +00:00
}
prevHashes = copyHashes(pending)
_ = prevHashes
2021-08-05 11:38:37 +00:00
}
2021-09-08 12:21:13 +00:00
//TODO: check that id=>addr and addr=>id mappings have same len
2021-08-05 11:38:37 +00:00
2021-09-13 07:31:15 +00:00
tx, err := db.BeginRw(ctx)
require.NoError(err)
defer tx.Rollback()
// start blocks from 0, set empty hash - then kvcache will also work on this
h1, h22 := gointerfaces.ConvertHashToH256([32]byte{}), gointerfaces.ConvertHashToH256([32]byte{22})
var txID uint64
_ = coreDB.View(ctx, func(tx kv.Tx) error {
2021-09-17 02:56:04 +00:00
txID = tx.ViewID()
return nil
})
2021-09-17 02:56:04 +00:00
change := &remote.StateChangeBatch{
2021-09-26 12:47:11 +00:00
DatabaseViewID: txID,
PendingBlockBaseFee: pendingBaseFee,
2021-09-17 02:56:04 +00:00
ChangeBatch: []*remote.StateChange{
2021-09-26 12:47:11 +00:00
{BlockHeight: 0, BlockHash: h1},
2021-09-17 02:56:04 +00:00
},
}
2021-09-13 07:31:15 +00:00
for id, sender := range senders {
var addr [20]byte
copy(addr[:], pool.senders.senderID2Addr[id])
v := make([]byte, EncodeSenderLengthForStorage(sender.nonce, sender.balance))
EncodeSender(sender.nonce, sender.balance, v)
2021-09-17 02:56:04 +00:00
change.ChangeBatch[0].Changes = append(change.ChangeBatch[0].Changes, &remote.AccountChange{
2021-09-13 07:31:15 +00:00
Action: remote.Action_UPSERT,
Address: gointerfaces.ConvertAddressToH160(addr),
Data: v,
})
}
2021-08-05 07:44:24 +00:00
// go to first fork
2021-08-22 13:43:46 +00:00
txs1, txs2, p2pReceived, txs3 := splitDataset(txs)
err = pool.OnNewBlock(ctx, change, txs1, TxSlots{}, tx)
2021-08-05 06:42:23 +00:00
assert.NoError(err)
2021-08-22 13:43:46 +00:00
check(txs1, TxSlots{}, "fork1")
checkNotify(txs1, TxSlots{}, "fork1")
_, _, _ = p2pReceived, txs2, txs3
2021-09-17 02:56:04 +00:00
change = &remote.StateChangeBatch{
2021-09-26 12:47:11 +00:00
DatabaseViewID: txID,
PendingBlockBaseFee: pendingBaseFee,
2021-09-17 02:56:04 +00:00
ChangeBatch: []*remote.StateChange{
2021-09-26 12:47:11 +00:00
{BlockHeight: 1, BlockHash: h1},
2021-09-17 02:56:04 +00:00
},
}
err = pool.OnNewBlock(ctx, change, TxSlots{}, txs2, tx)
2021-08-22 13:43:46 +00:00
check(TxSlots{}, txs2, "fork1 mined")
checkNotify(TxSlots{}, txs2, "fork1 mined")
2021-08-05 08:03:25 +00:00
2021-08-05 07:44:24 +00:00
// unwind everything and switch to new fork (need unwind mined now)
2021-09-17 02:56:04 +00:00
change = &remote.StateChangeBatch{
2021-09-26 12:47:11 +00:00
DatabaseViewID: txID,
PendingBlockBaseFee: pendingBaseFee,
2021-09-17 02:56:04 +00:00
ChangeBatch: []*remote.StateChange{
2021-09-26 12:47:11 +00:00
{BlockHeight: 0, BlockHash: h1, Direction: remote.Direction_UNWIND},
2021-09-17 02:56:04 +00:00
},
}
err = pool.OnNewBlock(ctx, change, txs2, TxSlots{}, tx)
2021-08-23 03:17:26 +00:00
assert.NoError(err)
check(txs2, TxSlots{}, "fork2")
checkNotify(txs2, TxSlots{}, "fork2")
2021-08-05 08:03:25 +00:00
2021-09-17 02:56:04 +00:00
change = &remote.StateChangeBatch{
2021-09-26 12:47:11 +00:00
DatabaseViewID: txID,
PendingBlockBaseFee: pendingBaseFee,
2021-09-17 02:56:04 +00:00
ChangeBatch: []*remote.StateChange{
2021-09-26 12:47:11 +00:00
{BlockHeight: 1, BlockHash: h22},
2021-09-17 02:56:04 +00:00
},
}
err = pool.OnNewBlock(ctx, change, TxSlots{}, txs3, tx)
assert.NoError(err)
check(TxSlots{}, txs3, "fork2 mined")
checkNotify(TxSlots{}, txs3, "fork2 mined")
2021-08-22 10:06:38 +00:00
// add some remote txs from p2p
2021-09-13 07:31:15 +00:00
pool.AddRemoteTxs(ctx, p2pReceived)
err = pool.processRemoteTxs(ctx)
assert.NoError(err)
check(p2pReceived, TxSlots{}, "p2pmsg1")
checkNotify(p2pReceived, TxSlots{}, "p2pmsg1")
2021-09-08 12:21:13 +00:00
err = pool.flushLocked(tx) // we don't test eviction here, because dedicated test exists
2021-08-28 03:00:23 +00:00
require.NoError(err)
check(p2pReceived, TxSlots{}, "after_flush")
//checkNotify(p2pReceived, TxSlots{}, "after_flush")
2021-09-18 13:58:20 +00:00
p2, err := New(ch, coreDB, DefaultConfig, sendersCache, chain.MainnetRules, *u256.N1)
assert.NoError(err)
2021-09-08 12:21:13 +00:00
p2.senders = pool.senders // senders are not persisted
2021-09-13 07:31:15 +00:00
err = coreDB.View(ctx, func(coreTx kv.Tx) error { return p2.fromDB(ctx, tx, coreTx) })
2021-08-28 03:00:23 +00:00
require.NoError(err)
2021-08-26 11:04:38 +00:00
for _, txn := range p2.byHash {
assert.Nil(txn.Tx.rlp)
}
//todo: check that after load from db tx linked to same senderAddr
check(txs2, TxSlots{}, "fromDB")
//checkNotify(txs2, TxSlots{}, "fromDB")
2021-10-02 10:34:33 +00:00
assert.Equal(pool.senders.senderID, p2.senders.senderID)
assert.Equal(pool.lastSeenBlock.Load(), p2.lastSeenBlock.Load())
2021-08-26 08:29:33 +00:00
assert.Equal(pool.pending.Len(), p2.pending.Len())
assert.Equal(pool.baseFee.Len(), p2.baseFee.Len())
require.Equal(pool.queued.Len(), p2.queued.Len())
2021-09-26 12:47:11 +00:00
assert.Equal(pool.pendingBaseFee.Load(), p2.pendingBaseFee.Load())
2021-07-29 08:23:17 +00:00
})
2021-07-31 11:21:56 +00:00
2021-07-29 08:23:17 +00:00
}
func copyHashes(p *PendingPool) (hashes Hashes) {
for i := range p.best {
hashes = append(hashes, p.best[i].Tx.idHash[:]...)
}
return hashes
}
//extractNewHashes - extract from h1 hashes which do not exist in h2
func extractNewHashes(h1, h2 Hashes) (result Hashes) {
for i := 0; i < h1.Len(); i++ {
found := false
for j := 0; j < h2.Len(); j++ {
if bytes.Equal(h1.At(i), h2.At(j)) {
found = true
break
}
}
if !found {
result = append(result, h1.At(i)...)
}
}
return result
}