fix integration tests run (#7672)

This commit is contained in:
Alex Sharov 2023-06-06 13:49:01 +07:00 committed by GitHub
parent 6c129e5f1d
commit bf9f5067f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 24 additions and 21 deletions

View File

@ -29,8 +29,8 @@ jobs:
- name: test-integration
run: make test-integration
- name: history-v3-test-integration
run: make test3-integration
# - name: history-v3-test-integration
# run: make test3-integration
tests-windows:
strategy:

View File

@ -28,7 +28,8 @@ CGO_CFLAGS += -Wno-error=strict-prototypes # for Clang15, remove it when can htt
CGO_CFLAGS := CGO_CFLAGS="$(CGO_CFLAGS)"
DBG_CGO_CFLAGS += -DMDBX_DEBUG=1
BUILD_TAGS = nosqlite,noboltdb # about netgo see: https://github.com/golang/go/issues/30310#issuecomment-471669125 and https://github.com/golang/go/issues/57757
# about netgo see: https://github.com/golang/go/issues/30310#issuecomment-471669125 and https://github.com/golang/go/issues/57757
BUILD_TAGS = nosqlite,noboltdb
PACKAGE = github.com/ledgerwatch/erigon
GO_FLAGS += -trimpath -tags $(BUILD_TAGS) -buildvcs=false

View File

@ -57,7 +57,7 @@ func NewStagedSync(
ExecutionStages(ctx, cfg.Prune,
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg),
stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator),
stagedsync.StageCumulativeIndexCfg(db),
stagedsync.StageCumulativeIndexCfg(db, blockReader),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockRetire, blockWriter, blockReader, controlServer.Hd),

View File

@ -115,7 +115,7 @@ func readTransactionByHash(db kv.Tx, hash libcommon.Hash, br services.FullBlockR
if blockNumber == nil {
return nil, libcommon.Hash{}, 0, 0, nil
}
blockHash, err := rawdb.ReadCanonicalHash(db, *blockNumber)
blockHash, err := br.CanonicalHash(context.Background(), db, *blockNumber)
if err != nil {
return nil, libcommon.Hash{}, 0, 0, err
}

View File

@ -10,6 +10,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/common/dbutils"
@ -20,12 +21,14 @@ import (
)
type CumulativeIndexCfg struct {
db kv.RwDB
db kv.RwDB
blockReader services.FullBlockReader
}
func StageCumulativeIndexCfg(db kv.RwDB) CumulativeIndexCfg {
func StageCumulativeIndexCfg(db kv.RwDB, blockReader services.FullBlockReader) CumulativeIndexCfg {
return CumulativeIndexCfg{
db: db,
db: db,
blockReader: blockReader,
}
}
@ -79,7 +82,7 @@ func SpawnStageCumulativeIndex(cfg CumulativeIndexCfg, s *StageState, tx kv.RwTx
return err
}
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockNumber)
canonicalHash, err := cfg.blockReader.CanonicalHash(ctx, tx, blockNumber)
if err != nil {
return err
}

View File

@ -154,7 +154,7 @@ func NotifyNewHeaders(ctx context.Context, finishStageBeforeSync uint64, finishS
}
notifyTo = binary.BigEndian.Uint64(k)
var err error
if notifyToHash, err = rawdb.ReadCanonicalHash(tx, notifyTo); err != nil {
if notifyToHash, err = blockReader.CanonicalHash(ctx, tx, notifyTo); err != nil {
logger.Warn("[Finish] failed checking if header is cannonical")
}

View File

@ -61,7 +61,7 @@ type EthBackendServer struct {
eth EthBackend
events *shards.Events
db kv.RoDB
blockReader services.BlockAndTxnReader
blockReader services.FullBlockReader
config *chain.Config
// Block proposing for proof-of-stake
@ -85,7 +85,7 @@ type EthBackend interface {
Peers(ctx context.Context) (*remote.PeersReply, error)
}
func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RwDB, events *shards.Events, blockReader services.BlockAndTxnReader,
func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RwDB, events *shards.Events, blockReader services.FullBlockReader,
config *chain.Config, builderFunc builder.BlockBuilderFunc, hd *headerdownload.HeaderDownload, proposing bool, logger log.Logger,
) *EthBackendServer {
s := &EthBackendServer{ctx: ctx, eth: eth, events: events, db: db, blockReader: blockReader, config: config,
@ -463,7 +463,7 @@ func (s *EthBackendServer) getQuickPayloadStatusIfPossible(blockHash libcommon.H
var canonicalHash libcommon.Hash
if header != nil {
canonicalHash, err = rawdb.ReadCanonicalHash(tx, header.Number.Uint64())
canonicalHash, err = s.blockReader.CanonicalHash(context.Background(), tx, header.Number.Uint64())
}
if err != nil {
return nil, err

View File

@ -27,12 +27,11 @@ import (
"testing"
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/hexutil"
@ -115,6 +114,7 @@ func (bt *BlockTest) Run(t *testing.T, _ bool) error {
engine := ethconsensusconfig.CreateConsensusEngineBareBones(config, log.New())
m := stages.MockWithGenesisEngine(t, bt.genesis(config), engine, false)
bt.br, _ = m.NewBlocksIO()
// import pre accounts & construct test genesis block & state root
if m.Genesis.Hash() != bt.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", m.Genesis.Hash().Bytes()[:6], bt.json.Genesis.Hash[:6])

View File

@ -27,9 +27,9 @@ import (
"runtime"
"testing"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/tracers/logger"
"github.com/ledgerwatch/erigon/turbo/stages"
"github.com/ledgerwatch/log/v3"
)
@ -51,8 +51,7 @@ func TestState(t *testing.T) {
st.skipLoad(`.*vmPerformance/loop.*`)
st.walk(t, stateTestDir, func(t *testing.T, name string, test *StateTest) {
m := stages.Mock(t)
db := m.DB
db := memdb.NewTestDB(t)
for _, subtest := range test.Subtests() {
subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)

View File

@ -531,7 +531,7 @@ func readReceipt(db kv.Tx, txHash libcommon.Hash, br services.FullBlockReader) (
if blockNumber == nil {
return nil, libcommon.Hash{}, 0, 0, nil
}
blockHash, err := rawdb.ReadCanonicalHash(db, *blockNumber)
blockHash, err := br.CanonicalHash(context.Background(), db, *blockNumber)
if err != nil {
return nil, libcommon.Hash{}, 0, 0, err
}

View File

@ -372,7 +372,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
stagedsync.DefaultStages(mock.Ctx,
stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, dirs, blockRetire, snapshotsDownloader, blockReader, mock.Notifications.Events, mock.HistoryV3, mock.agg),
stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, mock.Notifications, engineapi.NewForkValidatorMock(1)),
stagedsync.StageCumulativeIndexCfg(mock.DB),
stagedsync.StageCumulativeIndexCfg(mock.DB, blockReader),
stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter),
stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, blockReader, cfg.HistoryV3, blockWriter),
stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, dirs.Tmp, prune, blockRetire, blockWriter, blockReader, mock.sentriesClient.Hd),

View File

@ -415,7 +415,7 @@ func NewDefaultStages(ctx context.Context,
return stagedsync.DefaultStages(ctx,
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg),
stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator),
stagedsync.StageCumulativeIndexCfg(db),
stagedsync.StageCumulativeIndexCfg(db, blockReader),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockRetire, blockWriter, blockReader, controlServer.Hd),