fixes for many bugs

This commit is contained in:
alex.sharov 2021-08-06 10:36:44 +07:00
parent e965c0981d
commit 922e21e8a1
2 changed files with 82 additions and 78 deletions

View File

@ -245,7 +245,7 @@ func onNewTxs(senderInfo map[uint64]*senderInfo, newTxs TxSlots, protocolBaseFee
//TODO: also check if sender is in list of local-senders
i.SubPool |= IsLocal
}
delete(byHash, string(i.Tx.idHash[:]))
byHash[string(i.Tx.idHash[:])] = i
})
for i := range senderInfo {
@ -350,17 +350,14 @@ func onNewBlock(senderInfo map[uint64]*senderInfo, unwindTxs TxSlots, minedTxs [
// they effective lose their priority over the "remote" transactions. In order to prevent that,
// somehow the fact that certain transactions were local, needs to be remembered for some
// time (up to some "immutability threshold").
if len(unwindTxs.txs) > 0 {
//TODO: restore isLocal flag in unwindTxs
unsafeAddToPool(senderInfo, unwindTxs, pending, func(i *MetaTx) {
//fmt.Printf("add: %d,%d\n", i.Tx.senderID, i.Tx.nonce)
if _, ok := localsHistory.Get(i.Tx.idHash); ok {
//TODO: also check if sender is in list of local-senders
i.SubPool |= IsLocal
}
byHash[string(i.Tx.idHash[:])] = i
})
}
unsafeAddToPool(senderInfo, unwindTxs, pending, func(i *MetaTx) {
//fmt.Printf("add: %d,%d\n", i.Tx.senderID, i.Tx.nonce)
if _, ok := localsHistory.Get(i.Tx.idHash); ok {
//TODO: also check if sender is in list of local-senders
i.SubPool |= IsLocal
}
byHash[string(i.Tx.idHash[:])] = i
})
for i := range senderInfo {
// TODO: aggregate changed senders before call this func
@ -442,6 +439,9 @@ func unsafeAddToPool(senderInfo map[uint64]*senderInfo, unwindTxs TxSlots, to *S
continue
}
}
//if sender.nonce > tx.nonce {
// continue
//}
beforeAdd(mt)
sender.txNonce2Tx.ReplaceOrInsert(&nonce2TxItem{mt})
to.UnsafeAdd(mt, PendingSubPool)
@ -449,7 +449,7 @@ func unsafeAddToPool(senderInfo map[uint64]*senderInfo, unwindTxs TxSlots, to *S
}
func onSenderChange(sender *senderInfo, protocolBaseFee, blockBaseFee uint64) {
prevNonce := -1
prevNonce := sender.nonce
accumulatedSenderSpent := uint256.NewInt(0)
sender.txNonce2Tx.Ascend(func(i btree.Item) bool {
it := i.(*nonce2TxItem)
@ -459,12 +459,13 @@ func onSenderChange(sender *senderInfo, protocolBaseFee, blockBaseFee uint64) {
needBalance.Mul(uint256.NewInt(it.MetaTx.Tx.gas), uint256.NewInt(it.MetaTx.Tx.feeCap))
needBalance.Add(&it.MetaTx.NeedBalance, &it.MetaTx.Tx.value)
it.MetaTx.SenderHasEnoughBalance = sender.balance.Gt(needBalance) || sender.balance.Eq(needBalance)
// 1. Minimum fee requirement. Set to 1 if feeCap of the transaction is no less than in-protocol
// parameter of minimal base fee. Set to 0 if feeCap is less than minimum base fee, which means
// this transaction will never be included into this particular chain.
it.MetaTx.SubPool &^= EnoughFeeCapProtocol
if it.MetaTx.Tx.feeCap >= protocolBaseFee {
//fmt.Printf("alex1: %d,%d,%d,%d\n", it.MetaTx.NeedBalance.Uint64(), it.MetaTx.Tx.gas, it.MetaTx.Tx.feeCap, it.MetaTx.Tx.value.Uint64())
//fmt.Printf("alex2: %d,%t\n", sender.balance.Uint64(), it.MetaTx.SenderHasEnoughBalance)
it.MetaTx.SubPool |= EnoughFeeCapProtocol
}
@ -472,10 +473,10 @@ func onSenderChange(sender *senderInfo, protocolBaseFee, blockBaseFee uint64) {
// the sender is M, and there are transactions for all nonces between M and N from the same
// sender. Set to 0 is the transaction's nonce is divided from the state nonce by one or more nonce gaps.
it.MetaTx.SubPool &^= NoNonceGaps
if prevNonce == -1 || uint64(prevNonce)+1 == it.MetaTx.Tx.nonce {
if uint64(prevNonce)+1 == it.MetaTx.Tx.nonce {
it.MetaTx.SubPool |= NoNonceGaps
prevNonce = it.Tx.nonce
}
prevNonce = int(it.Tx.nonce)
// 3. Sufficient balance for gas. Set to 1 if the balance of sender's account in the
// state is B, nonce of the sender in the state is M, nonce of the transaction is N, and the
@ -487,7 +488,6 @@ func onSenderChange(sender *senderInfo, protocolBaseFee, blockBaseFee uint64) {
it.MetaTx.SubPool &^= EnoughBalance
if sender.balance.Gt(accumulatedSenderSpent) || sender.balance.Eq(accumulatedSenderSpent) {
it.MetaTx.SubPool |= EnoughBalance
fmt.Printf("a3: %b,%d,%d,%d,%t\n", it.MetaTx.SubPool, accumulatedSenderSpent.Uint64(), sender.balance.Uint64(), needBalance.Uint64(), it.MetaTx.SenderHasEnoughBalance)
}
// 4. Dynamic fee requirement. Set to 1 if feeCap of the transaction is no less than

View File

@ -6,7 +6,6 @@ package txpool
import (
"bytes"
"encoding/binary"
"fmt"
"testing"
"github.com/google/btree"
@ -242,7 +241,7 @@ func FuzzOnNewBlocks10(f *testing.F) {
t.Skip()
}
protocolBaseFee, blockBaseFee := uint64(protocolBaseFee1), uint64(blockBaseFee1)
protocolBaseFeeU256, blockBaseFeeU256 := uint256.NewInt(protocolBaseFee), uint256.NewInt(blockBaseFee)
//protocolBaseFeeU256, blockBaseFeeU256 := uint256.NewInt(protocolBaseFee), uint256.NewInt(blockBaseFee)
if protocolBaseFee == 0 || blockBaseFee == 0 {
t.Skip()
}
@ -263,111 +262,116 @@ func FuzzOnNewBlocks10(f *testing.F) {
pool := New(ch)
pool.senderInfo = senders
pool.senderIDs = senderIDs
check := func(unwindTxs, minedTxs TxSlots) {
check := func(unwindTxs, minedTxs TxSlots, msg string) {
pending, baseFee, queued := pool.pending, pool.baseFee, pool.queued
if pending.Len() > 0 || baseFee.Len() > 0 || queued.Len() > 0 {
fmt.Printf("len: %d,%d,%d\n", pending.Len(), baseFee.Len(), queued.Len())
}
//if pending.Len() > 0 || baseFee.Len() > 0 || queued.Len() > 0 {
// fmt.Printf("len: %d,%d,%d\n", pending.Len(), baseFee.Len(), queued.Len())
//}
best, worst := pending.Best(), pending.Worst()
assert.LessOrEqual(pending.Len(), PendingSubPoolLimit)
assert.False(worst != nil && best == nil)
assert.False(worst == nil && best != nil)
assert.False(worst != nil && best == nil, msg)
assert.False(worst == nil && best != nil, msg)
if worst != nil && worst.SubPool < 0b11110 {
t.Fatalf("pending worst too small %b", worst.SubPool)
}
iterateSubPoolUnordered(pending, func(tx *MetaTx) {
i := tx.Tx
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce)
if tx.SubPool&NoNonceGaps > 0 {
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce, msg)
}
if tx.SubPool&EnoughBalance > 0 {
assert.True(tx.SenderHasEnoughBalance)
}
need := uint256.NewInt(i.gas)
need = need.Mul(need, uint256.NewInt(i.feeCap))
need = need.Add(need, &i.value)
assert.True(need.Lt(protocolBaseFeeU256) || need.Eq(protocolBaseFeeU256))
assert.True(need.Lt(blockBaseFeeU256) || need.Eq(blockBaseFeeU256))
if tx.SubPool&EnoughFeeCapProtocol > 0 {
assert.LessOrEqual(protocolBaseFee, tx.Tx.feeCap, msg)
}
if tx.SubPool&EnoughFeeCapBlock > 0 {
assert.LessOrEqual(blockBaseFee, tx.Tx.feeCap, msg)
}
// side data structures must have all txs
assert.True(senders[i.senderID].txNonce2Tx.Has(&nonce2TxItem{tx}))
assert.True(senders[i.senderID].txNonce2Tx.Has(&nonce2TxItem{tx}), msg)
_, ok = pool.byHash[string(i.idHash[:])]
assert.True(ok)
// pools can't have more then 1 tx with same SenderID+Nonce
iterateSubPoolUnordered(baseFee, func(mtx2 *MetaTx) {
tx2 := mtx2.Tx
assert.False(tx2.senderID == i.senderID && tx2.nonce == i.nonce, msg)
})
iterateSubPoolUnordered(queued, func(mtx2 *MetaTx) {
tx2 := mtx2.Tx
assert.False(tx2.senderID == i.senderID && tx2.nonce == i.nonce)
})
iterateSubPoolUnordered(pending, func(mtx2 *MetaTx) {
tx2 := mtx2.Tx
assert.False(tx2.senderID == i.senderID && tx2.nonce == i.nonce)
assert.False(tx2.senderID == i.senderID && tx2.nonce == i.nonce, msg)
})
})
best, worst = baseFee.Best(), baseFee.Worst()
assert.False(worst != nil && best == nil)
assert.False(worst == nil && best != nil)
assert.LessOrEqual(baseFee.Len(), BaseFeeSubPoolLimit)
assert.False(worst != nil && best == nil, msg)
assert.False(worst == nil && best != nil, msg)
assert.LessOrEqual(baseFee.Len(), BaseFeeSubPoolLimit, msg)
if worst != nil && worst.SubPool < 0b11100 {
t.Fatalf("baseFee worst too small %b", worst.SubPool)
}
iterateSubPoolUnordered(baseFee, func(tx *MetaTx) {
i := tx.Tx
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce)
if tx.SubPool&NoNonceGaps > 0 {
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce, msg)
}
if tx.SubPool&EnoughBalance != 0 {
assert.True(tx.SenderHasEnoughBalance)
assert.True(tx.SenderHasEnoughBalance, msg)
}
if tx.SubPool&EnoughFeeCapProtocol > 0 {
assert.LessOrEqual(protocolBaseFee, tx.Tx.feeCap, msg)
}
if tx.SubPool&EnoughFeeCapBlock > 0 {
assert.LessOrEqual(blockBaseFee, tx.Tx.feeCap, msg)
}
need := uint256.NewInt(i.gas)
need = need.Mul(need, uint256.NewInt(i.feeCap))
need = need.Add(need, &i.value)
assert.True(need.Lt(protocolBaseFeeU256) || need.Eq(protocolBaseFeeU256))
assert.True(need.Lt(blockBaseFeeU256) || need.Eq(blockBaseFeeU256))
assert.True(senders[i.senderID].txNonce2Tx.Has(&nonce2TxItem{tx}))
assert.True(senders[i.senderID].txNonce2Tx.Has(&nonce2TxItem{tx}), msg)
_, ok = pool.byHash[string(i.idHash[:])]
assert.True(ok)
assert.True(ok, msg)
})
best, worst = queued.Best(), queued.Worst()
assert.LessOrEqual(queued.Len(), QueuedSubPoolLimit)
assert.False(worst != nil && best == nil)
assert.False(worst == nil && best != nil)
assert.False(worst != nil && best == nil, msg)
assert.False(worst == nil && best != nil, msg)
if worst != nil && worst.SubPool < 0b10000 {
t.Fatalf("queued worst too small %b", worst.SubPool)
}
iterateSubPoolUnordered(queued, func(tx *MetaTx) {
i := tx.Tx
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce)
if tx.SubPool&NoNonceGaps > 0 {
assert.GreaterOrEqual(i.nonce, senders[i.senderID].nonce, msg)
}
if tx.SubPool&EnoughBalance > 0 {
assert.True(tx.SenderHasEnoughBalance)
assert.True(tx.SenderHasEnoughBalance, msg)
}
if tx.SubPool&EnoughFeeCapProtocol > 0 {
assert.LessOrEqual(protocolBaseFee, tx.Tx.feeCap, msg)
}
if tx.SubPool&EnoughFeeCapBlock > 0 {
assert.LessOrEqual(blockBaseFee, tx.Tx.feeCap, msg)
}
need := uint256.NewInt(i.gas)
need = need.Mul(need, uint256.NewInt(i.feeCap))
need = need.Add(need, &i.value)
assert.True(need.Lt(protocolBaseFeeU256) || need.Eq(protocolBaseFeeU256))
assert.True(need.Lt(blockBaseFeeU256) || need.Eq(blockBaseFeeU256))
assert.True(senders[i.senderID].txNonce2Tx.Has(&nonce2TxItem{tx}))
assert.True(senders[i.senderID].txNonce2Tx.Has(&nonce2TxItem{tx}), msg)
_, ok = pool.byHash[string(i.idHash[:])]
assert.True(ok)
assert.True(ok, msg)
})
// all txs in side data structures must be in some queue
for _, txn := range pool.byHash {
assert.True(txn.bestIndex >= 0)
assert.True(txn.worstIndex >= 0)
assert.True(txn.bestIndex >= 0, msg)
assert.True(txn.worstIndex >= 0, msg)
}
for i := range senders {
//assert.True(senders[i].txNonce2Tx.Len() > 0)
senders[i].txNonce2Tx.Ascend(func(i btree.Item) bool {
mt := i.(*nonce2TxItem).MetaTx
assert.True(mt.worstIndex >= 0)
assert.True(mt.bestIndex >= 0)
assert.True(mt.worstIndex >= 0, msg)
assert.True(mt.bestIndex >= 0, msg)
return true
})
}
@ -375,11 +379,11 @@ func FuzzOnNewBlocks10(f *testing.F) {
// mined txs must be removed
for i := range minedTxs.txs {
_, ok = pool.byHash[string(minedTxs.txs[i].idHash[:])]
assert.False(ok)
assert.False(ok, msg)
}
}
checkNotify := func(unwindTxs, minedTxs TxSlots) {
checkNotify := func(unwindTxs, minedTxs TxSlots, msg string) {
select {
case newHashes := <-ch:
//assert.Equal(len(unwindTxs.txs), newHashes.Len())
@ -400,8 +404,8 @@ func FuzzOnNewBlocks10(f *testing.F) {
break
}
}
assert.True(foundInUnwind)
assert.False(foundInMined)
assert.True(foundInUnwind, msg)
assert.False(foundInMined, msg)
}
default:
@ -413,20 +417,20 @@ func FuzzOnNewBlocks10(f *testing.F) {
unwindTxs, minedTxs1, p2pReceived, minedTxs2 := splitDataset(txs)
err = pool.OnNewBlock(unwindTxs, minedTxs1, protocolBaseFee, blockBaseFee)
assert.NoError(err)
check(unwindTxs, minedTxs1)
checkNotify(unwindTxs, minedTxs1)
check(unwindTxs, minedTxs1, "fork1")
checkNotify(unwindTxs, minedTxs1, "fork1")
// unwind everything and switch to new fork (need unwind mined now)
err = pool.OnNewBlock(minedTxs1, minedTxs2, protocolBaseFee, blockBaseFee)
assert.NoError(err)
check(minedTxs1, minedTxs2)
checkNotify(minedTxs1, minedTxs2)
check(minedTxs1, minedTxs2, "fork2")
checkNotify(minedTxs1, minedTxs2, "fork2")
// add some remote txs from p2p
err = pool.OnNewTxs(p2pReceived)
assert.NoError(err)
check(p2pReceived, TxSlots{})
checkNotify(p2pReceived, TxSlots{})
check(p2pReceived, TxSlots{}, "p2pmsg1")
checkNotify(p2pReceived, TxSlots{}, "p2pmsg1")
})
}