mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 19:50:36 +00:00
d8b91c4d02
When the sync loop first runs it suppresses block sync events both in the initial loop and when the blocks being processed are greater than 1000. This fix removed the first check, because otherwise the first block received by the process ends up not getting sent to the tx pool. Which means it won't produce new block for polygon. As well as this fix - I have also moved the gas initialization to the txpool start method rather than prompting it with a 'synthetic block event' As the txpool start has access to the core & tx DB's it can find the current block and chain config internally so that it doesn't need to be externally activated it can just do this itself on start up. This has the advantage of making the txpool more self contained.
160 lines
4.6 KiB
Go
160 lines
4.6 KiB
Go
/*
|
|
Copyright 2021 The Erigon contributors
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package txpooluitl
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"math/big"
|
|
"time"
|
|
|
|
"github.com/c2h5oh/datasize"
|
|
"github.com/holiman/uint256"
|
|
"github.com/ledgerwatch/log/v3"
|
|
|
|
"github.com/ledgerwatch/erigon-lib/chain"
|
|
"github.com/ledgerwatch/erigon-lib/direct"
|
|
"github.com/ledgerwatch/erigon-lib/kv"
|
|
"github.com/ledgerwatch/erigon-lib/kv/kvcache"
|
|
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
|
|
"github.com/ledgerwatch/erigon-lib/txpool"
|
|
"github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg"
|
|
"github.com/ledgerwatch/erigon-lib/types"
|
|
)
|
|
|
|
func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB, force bool, logger log.Logger) (cc *chain.Config, blockNum uint64, err error) {
|
|
if err = txPoolDB.View(ctx, func(tx kv.Tx) error {
|
|
cc, err = txpool.ChainConfig(tx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
blockNum, err = txpool.LastSeenBlock(tx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}); err != nil {
|
|
return nil, 0, err
|
|
}
|
|
if cc != nil && !force {
|
|
if cc.ChainID.Uint64() == 0 {
|
|
return nil, 0, fmt.Errorf("wrong chain config")
|
|
}
|
|
return cc, blockNum, nil
|
|
}
|
|
|
|
for {
|
|
if err = coreDB.View(ctx, func(tx kv.Tx) error {
|
|
cc, err = chain.GetConfig(tx, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
n, err := chain.CurrentBlockNumber(tx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if n != nil {
|
|
blockNum = *n
|
|
}
|
|
return nil
|
|
}); err != nil {
|
|
logger.Error("cant read chain config from core db", "err", err)
|
|
time.Sleep(5 * time.Second)
|
|
continue
|
|
} else if cc == nil {
|
|
logger.Error("cant read chain config from core db")
|
|
time.Sleep(5 * time.Second)
|
|
continue
|
|
}
|
|
break
|
|
}
|
|
|
|
if err = txPoolDB.Update(ctx, func(tx kv.RwTx) error {
|
|
if err = txpool.PutChainConfig(tx, cc, nil); err != nil {
|
|
return err
|
|
}
|
|
if err = txpool.PutLastSeenBlock(tx, blockNum, nil); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}); err != nil {
|
|
return nil, 0, err
|
|
}
|
|
if cc.ChainID.Uint64() == 0 {
|
|
return nil, 0, fmt.Errorf("wrong chain config")
|
|
}
|
|
return cc, blockNum, nil
|
|
}
|
|
|
|
func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cache, newTxs chan types.Announcements, chainDB kv.RoDB,
|
|
sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient, feeCalculator txpool.FeeCalculator, logger log.Logger) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) {
|
|
opts := mdbx.NewMDBX(logger).Label(kv.TxPoolDB).Path(cfg.DBDir).
|
|
WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }).
|
|
WriteMergeThreshold(3 * 8192).
|
|
PageSize(uint64(16 * datasize.KB)).
|
|
GrowthStep(16 * datasize.MB).
|
|
DirtySpace(uint64(128 * datasize.MB)).
|
|
MapSize(1 * datasize.TB)
|
|
|
|
if cfg.MdbxPageSize.Bytes() > 0 {
|
|
opts = opts.PageSize(cfg.MdbxPageSize.Bytes())
|
|
}
|
|
if cfg.MdbxDBSizeLimit > 0 {
|
|
opts = opts.MapSize(cfg.MdbxDBSizeLimit)
|
|
}
|
|
if cfg.MdbxGrowthStep > 0 {
|
|
opts = opts.GrowthStep(cfg.MdbxGrowthStep)
|
|
}
|
|
|
|
txPoolDB, err := opts.Open(ctx)
|
|
|
|
if err != nil {
|
|
return nil, nil, nil, nil, nil, err
|
|
}
|
|
|
|
chainConfig, _, err := SaveChainConfigIfNeed(ctx, chainDB, txPoolDB, true, logger)
|
|
if err != nil {
|
|
return nil, nil, nil, nil, nil, err
|
|
}
|
|
|
|
chainID, _ := uint256.FromBig(chainConfig.ChainID)
|
|
maxBlobsPerBlock := chainConfig.GetMaxBlobsPerBlock()
|
|
|
|
shanghaiTime := chainConfig.ShanghaiTime
|
|
var agraBlock *big.Int
|
|
if chainConfig.Bor != nil {
|
|
agraBlock = chainConfig.Bor.GetAgraBlock()
|
|
}
|
|
cancunTime := chainConfig.CancunTime
|
|
if cfg.OverrideCancunTime != nil {
|
|
cancunTime = cfg.OverrideCancunTime
|
|
}
|
|
|
|
txPool, err := txpool.New(newTxs, chainDB, cfg, cache, *chainID, shanghaiTime, agraBlock, cancunTime, maxBlobsPerBlock, feeCalculator, logger)
|
|
if err != nil {
|
|
return nil, nil, nil, nil, nil, err
|
|
}
|
|
|
|
fetch := txpool.NewFetch(ctx, sentryClients, txPool, stateChangesClient, chainDB, txPoolDB, *chainID, logger)
|
|
//fetch.ConnectCore()
|
|
//fetch.ConnectSentries()
|
|
|
|
send := txpool.NewSend(ctx, sentryClients, txPool, logger)
|
|
txpoolGrpcServer := txpool.NewGrpcServer(ctx, txPool, txPoolDB, *chainID, logger)
|
|
return txPoolDB, txPool, fetch, send, txpoolGrpcServer, nil
|
|
}
|