2017-04-04 22:16:29 +00:00
|
|
|
// Copyright 2017 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package ethash
|
|
|
|
|
|
|
|
import (
|
2018-08-08 09:15:08 +00:00
|
|
|
"bytes"
|
2017-04-04 22:16:29 +00:00
|
|
|
crand "crypto/rand"
|
2018-08-08 09:15:08 +00:00
|
|
|
"encoding/json"
|
2018-08-03 08:33:37 +00:00
|
|
|
"errors"
|
2017-04-04 22:16:29 +00:00
|
|
|
"math"
|
|
|
|
"math/big"
|
|
|
|
"math/rand"
|
2018-08-08 09:15:08 +00:00
|
|
|
"net/http"
|
2017-04-04 22:16:29 +00:00
|
|
|
"runtime"
|
|
|
|
"sync"
|
2018-08-03 08:33:37 +00:00
|
|
|
"time"
|
2017-04-04 22:16:29 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/common"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/common/hexutil"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/consensus"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/core/types"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/log"
|
2017-04-04 22:16:29 +00:00
|
|
|
)
|
|
|
|
|
2018-08-28 13:59:05 +00:00
|
|
|
const (
|
|
|
|
// staleThreshold is the maximum depth of the acceptable stale but valid ethash solution.
|
|
|
|
staleThreshold = 7
|
|
|
|
)
|
|
|
|
|
2018-08-03 08:33:37 +00:00
|
|
|
var (
|
|
|
|
errNoMiningWork = errors.New("no mining work available yet")
|
|
|
|
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
|
|
|
|
)
|
|
|
|
|
2017-04-04 22:16:29 +00:00
|
|
|
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
|
|
|
// the block's difficulty requirements.
|
2018-08-28 13:59:05 +00:00
|
|
|
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
2017-04-04 22:16:29 +00:00
|
|
|
// If we're running a fake PoW, simply return a 0 nonce immediately
|
2017-11-24 14:10:27 +00:00
|
|
|
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
2017-04-04 22:16:29 +00:00
|
|
|
header := block.Header()
|
|
|
|
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
|
2018-08-28 13:59:05 +00:00
|
|
|
select {
|
|
|
|
case results <- block.WithSeal(header):
|
|
|
|
default:
|
|
|
|
log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
|
|
|
}
|
|
|
|
return nil
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
|
|
|
// If we're running a shared PoW, delegate sealing to it
|
|
|
|
if ethash.shared != nil {
|
2018-08-28 13:59:05 +00:00
|
|
|
return ethash.shared.Seal(chain, block, results, stop)
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
|
|
|
// Create a runner and the multiple search threads it directs
|
|
|
|
abort := make(chan struct{})
|
|
|
|
|
|
|
|
ethash.lock.Lock()
|
|
|
|
threads := ethash.threads
|
|
|
|
if ethash.rand == nil {
|
|
|
|
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
|
|
|
if err != nil {
|
|
|
|
ethash.lock.Unlock()
|
2018-08-28 13:59:05 +00:00
|
|
|
return err
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
|
|
|
ethash.rand = rand.New(rand.NewSource(seed.Int64()))
|
|
|
|
}
|
|
|
|
ethash.lock.Unlock()
|
|
|
|
if threads == 0 {
|
|
|
|
threads = runtime.NumCPU()
|
|
|
|
}
|
2017-04-07 14:22:06 +00:00
|
|
|
if threads < 0 {
|
|
|
|
threads = 0 // Allows disabling local mining without extra logic around local/remote
|
|
|
|
}
|
2018-08-03 08:33:37 +00:00
|
|
|
// Push new work to remote sealer
|
|
|
|
if ethash.workCh != nil {
|
2018-08-28 13:59:05 +00:00
|
|
|
ethash.workCh <- &sealTask{block: block, results: results}
|
2018-08-03 08:33:37 +00:00
|
|
|
}
|
2018-08-28 13:59:05 +00:00
|
|
|
var (
|
|
|
|
pend sync.WaitGroup
|
|
|
|
locals = make(chan *types.Block)
|
|
|
|
)
|
2017-04-04 22:16:29 +00:00
|
|
|
for i := 0; i < threads; i++ {
|
|
|
|
pend.Add(1)
|
|
|
|
go func(id int, nonce uint64) {
|
|
|
|
defer pend.Done()
|
2018-08-28 13:59:05 +00:00
|
|
|
ethash.mine(block, id, nonce, abort, locals)
|
2017-04-04 22:16:29 +00:00
|
|
|
}(i, uint64(ethash.rand.Int63()))
|
|
|
|
}
|
|
|
|
// Wait until sealing is terminated or a nonce is found
|
2018-08-28 13:59:05 +00:00
|
|
|
go func() {
|
|
|
|
var result *types.Block
|
|
|
|
select {
|
|
|
|
case <-stop:
|
|
|
|
// Outside abort, stop all miner threads
|
|
|
|
close(abort)
|
|
|
|
case result = <-locals:
|
|
|
|
// One of the threads found a block, abort all others
|
|
|
|
select {
|
|
|
|
case results <- result:
|
|
|
|
default:
|
|
|
|
log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
|
|
|
}
|
|
|
|
close(abort)
|
|
|
|
case <-ethash.update:
|
|
|
|
// Thread count was changed on user request, restart
|
|
|
|
close(abort)
|
|
|
|
if err := ethash.Seal(chain, block, results, stop); err != nil {
|
|
|
|
log.Error("Failed to restart sealing after update", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Wait for all miners to terminate and return the block
|
2017-04-04 22:16:29 +00:00
|
|
|
pend.Wait()
|
2018-08-28 13:59:05 +00:00
|
|
|
}()
|
|
|
|
return nil
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// mine is the actual proof-of-work miner that searches for a nonce starting from
|
|
|
|
// seed that results in correct final block difficulty.
|
|
|
|
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
|
|
|
|
// Extract some data from the header
|
|
|
|
var (
|
2018-01-23 10:05:30 +00:00
|
|
|
header = block.Header()
|
2018-08-23 13:02:57 +00:00
|
|
|
hash = ethash.SealHash(header).Bytes()
|
2018-08-08 09:15:08 +00:00
|
|
|
target = new(big.Int).Div(two256, header.Difficulty)
|
2017-04-04 22:16:29 +00:00
|
|
|
number = header.Number.Uint64()
|
2018-08-15 10:50:16 +00:00
|
|
|
dataset = ethash.dataset(number, false)
|
2017-04-04 22:16:29 +00:00
|
|
|
)
|
|
|
|
// Start generating random nonces until we abort or find a good one
|
|
|
|
var (
|
|
|
|
attempts = int64(0)
|
|
|
|
nonce = seed
|
|
|
|
)
|
|
|
|
logger := log.New("miner", id)
|
|
|
|
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
2018-01-23 10:05:30 +00:00
|
|
|
search:
|
2017-04-04 22:16:29 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-abort:
|
|
|
|
// Mining terminated, update stats and abort
|
|
|
|
logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
|
|
|
|
ethash.hashrate.Mark(attempts)
|
2018-01-23 10:05:30 +00:00
|
|
|
break search
|
2017-04-04 22:16:29 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
|
|
|
|
attempts++
|
|
|
|
if (attempts % (1 << 15)) == 0 {
|
|
|
|
ethash.hashrate.Mark(attempts)
|
|
|
|
attempts = 0
|
|
|
|
}
|
|
|
|
// Compute the PoW value of this nonce
|
2018-01-23 10:05:30 +00:00
|
|
|
digest, result := hashimotoFull(dataset.dataset, hash, nonce)
|
2017-04-04 22:16:29 +00:00
|
|
|
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
|
|
|
// Correct nonce found, create a new header with it
|
|
|
|
header = types.CopyHeader(header)
|
|
|
|
header.Nonce = types.EncodeNonce(nonce)
|
|
|
|
header.MixDigest = common.BytesToHash(digest)
|
|
|
|
|
|
|
|
// Seal and return a block (if still needed)
|
|
|
|
select {
|
|
|
|
case found <- block.WithSeal(header):
|
|
|
|
logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce)
|
|
|
|
case <-abort:
|
|
|
|
logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
|
|
|
|
}
|
2018-01-23 10:05:30 +00:00
|
|
|
break search
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
|
|
|
nonce++
|
|
|
|
}
|
|
|
|
}
|
2018-01-23 10:05:30 +00:00
|
|
|
// Datasets are unmapped in a finalizer. Ensure that the dataset stays live
|
|
|
|
// during sealing so it's not unmapped while being read.
|
|
|
|
runtime.KeepAlive(dataset)
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
2018-08-03 08:33:37 +00:00
|
|
|
|
2018-08-08 09:15:08 +00:00
|
|
|
// remote is a standalone goroutine to handle remote mining related stuff.
|
2018-08-28 13:59:05 +00:00
|
|
|
func (ethash *Ethash) remote(notify []string, noverify bool) {
|
2018-08-03 08:33:37 +00:00
|
|
|
var (
|
2018-08-08 09:15:08 +00:00
|
|
|
works = make(map[common.Hash]*types.Block)
|
|
|
|
rates = make(map[common.Hash]hashrate)
|
|
|
|
|
2018-08-28 13:59:05 +00:00
|
|
|
results chan<- *types.Block
|
2018-08-08 09:15:08 +00:00
|
|
|
currentBlock *types.Block
|
2018-11-08 15:08:57 +00:00
|
|
|
currentWork [4]string
|
2018-08-08 09:15:08 +00:00
|
|
|
|
|
|
|
notifyTransport = &http.Transport{}
|
|
|
|
notifyClient = &http.Client{
|
|
|
|
Transport: notifyTransport,
|
|
|
|
Timeout: time.Second,
|
|
|
|
}
|
|
|
|
notifyReqs = make([]*http.Request, len(notify))
|
2018-08-03 08:33:37 +00:00
|
|
|
)
|
2018-08-08 09:15:08 +00:00
|
|
|
// notifyWork notifies all the specified mining endpoints of the availability of
|
|
|
|
// new work to be processed.
|
|
|
|
notifyWork := func() {
|
|
|
|
work := currentWork
|
|
|
|
blob, _ := json.Marshal(work)
|
2018-08-03 08:33:37 +00:00
|
|
|
|
2018-08-08 09:15:08 +00:00
|
|
|
for i, url := range notify {
|
|
|
|
// Terminate any previously pending request and create the new work
|
|
|
|
if notifyReqs[i] != nil {
|
|
|
|
notifyTransport.CancelRequest(notifyReqs[i])
|
|
|
|
}
|
|
|
|
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
|
|
|
|
notifyReqs[i].Header.Set("Content-Type", "application/json")
|
|
|
|
|
|
|
|
// Push the new work concurrently to all the remote nodes
|
|
|
|
go func(req *http.Request, url string) {
|
|
|
|
res, err := notifyClient.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Failed to notify remote miner", "err", err)
|
|
|
|
} else {
|
|
|
|
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
|
|
|
|
res.Body.Close()
|
|
|
|
}
|
|
|
|
}(notifyReqs[i], url)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// makeWork creates a work package for external miner.
|
2018-08-03 08:33:37 +00:00
|
|
|
//
|
|
|
|
// The work package consists of 3 strings:
|
|
|
|
// result[0], 32 bytes hex encoded current block header pow-hash
|
|
|
|
// result[1], 32 bytes hex encoded seed hash used for DAG
|
|
|
|
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
2018-11-08 15:08:57 +00:00
|
|
|
// result[3], hex encoded block number
|
2018-08-08 09:15:08 +00:00
|
|
|
makeWork := func(block *types.Block) {
|
2018-08-23 13:02:57 +00:00
|
|
|
hash := ethash.SealHash(block.Header())
|
2018-08-03 08:33:37 +00:00
|
|
|
|
2018-08-08 09:15:08 +00:00
|
|
|
currentWork[0] = hash.Hex()
|
|
|
|
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
|
|
|
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
2018-11-08 15:08:57 +00:00
|
|
|
currentWork[3] = hexutil.EncodeBig(block.Number())
|
2018-08-03 08:33:37 +00:00
|
|
|
|
|
|
|
// Trace the seal work fetched by remote sealer.
|
2018-08-08 09:15:08 +00:00
|
|
|
currentBlock = block
|
|
|
|
works[hash] = block
|
2018-08-03 08:33:37 +00:00
|
|
|
}
|
|
|
|
// submitWork verifies the submitted pow solution, returning
|
|
|
|
// whether the solution was accepted or not (not can be both a bad pow as well as
|
|
|
|
// any other error, like no pending work or stale mining result).
|
2018-08-28 13:59:05 +00:00
|
|
|
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
|
|
|
if currentBlock == nil {
|
|
|
|
log.Error("Pending work without block", "sealhash", sealhash)
|
|
|
|
return false
|
|
|
|
}
|
2018-08-03 08:33:37 +00:00
|
|
|
// Make sure the work submitted is present
|
2018-08-28 13:59:05 +00:00
|
|
|
block := works[sealhash]
|
2018-08-03 08:33:37 +00:00
|
|
|
if block == nil {
|
2018-08-28 13:59:05 +00:00
|
|
|
log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
|
2018-08-03 08:33:37 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Verify the correctness of submitted result.
|
|
|
|
header := block.Header()
|
|
|
|
header.Nonce = nonce
|
|
|
|
header.MixDigest = mixDigest
|
2018-08-15 10:50:16 +00:00
|
|
|
|
|
|
|
start := time.Now()
|
2018-08-28 13:59:05 +00:00
|
|
|
if !noverify {
|
|
|
|
if err := ethash.verifySeal(nil, header, true); err != nil {
|
2019-05-15 11:33:33 +00:00
|
|
|
log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
2018-08-28 13:59:05 +00:00
|
|
|
return false
|
|
|
|
}
|
2018-08-03 08:33:37 +00:00
|
|
|
}
|
2018-08-28 13:59:05 +00:00
|
|
|
// Make sure the result channel is assigned.
|
|
|
|
if results == nil {
|
2018-08-03 08:33:37 +00:00
|
|
|
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
|
|
|
return false
|
|
|
|
}
|
2019-05-15 11:33:33 +00:00
|
|
|
log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
2018-08-03 08:33:37 +00:00
|
|
|
|
|
|
|
// Solutions seems to be valid, return to the miner and notify acceptance.
|
2018-08-28 13:59:05 +00:00
|
|
|
solution := block.WithSeal(header)
|
|
|
|
|
|
|
|
// The submitted solution is within the scope of acceptance.
|
|
|
|
if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
|
|
|
|
select {
|
|
|
|
case results <- solution:
|
|
|
|
log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
|
|
|
return false
|
|
|
|
}
|
2018-08-03 08:33:37 +00:00
|
|
|
}
|
2018-08-28 13:59:05 +00:00
|
|
|
// The submitted block is too old to accept, drop it.
|
|
|
|
log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
|
|
|
return false
|
2018-08-03 08:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ticker := time.NewTicker(5 * time.Second)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2018-08-28 13:59:05 +00:00
|
|
|
case work := <-ethash.workCh:
|
2018-08-03 08:33:37 +00:00
|
|
|
// Update current work with new received block.
|
|
|
|
// Note same work can be past twice, happens when changing CPU threads.
|
2018-08-28 13:59:05 +00:00
|
|
|
results = work.results
|
|
|
|
|
|
|
|
makeWork(work.block)
|
2018-08-08 09:15:08 +00:00
|
|
|
|
|
|
|
// Notify and requested URLs of the new work availability
|
|
|
|
notifyWork()
|
2018-08-03 08:33:37 +00:00
|
|
|
|
|
|
|
case work := <-ethash.fetchWorkCh:
|
|
|
|
// Return current mining work to remote miner.
|
2018-08-08 09:15:08 +00:00
|
|
|
if currentBlock == nil {
|
|
|
|
work.errc <- errNoMiningWork
|
2018-08-03 08:33:37 +00:00
|
|
|
} else {
|
2018-08-08 09:15:08 +00:00
|
|
|
work.res <- currentWork
|
2018-08-03 08:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case result := <-ethash.submitWorkCh:
|
|
|
|
// Verify submitted PoW solution based on maintained mining blocks.
|
|
|
|
if submitWork(result.nonce, result.mixDigest, result.hash) {
|
|
|
|
result.errc <- nil
|
|
|
|
} else {
|
|
|
|
result.errc <- errInvalidSealResult
|
|
|
|
}
|
|
|
|
|
|
|
|
case result := <-ethash.submitRateCh:
|
|
|
|
// Trace remote sealer's hash rate by submitted value.
|
|
|
|
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
|
|
|
close(result.done)
|
|
|
|
|
|
|
|
case req := <-ethash.fetchRateCh:
|
|
|
|
// Gather all hash rate submitted by remote sealer.
|
|
|
|
var total uint64
|
|
|
|
for _, rate := range rates {
|
|
|
|
// this could overflow
|
|
|
|
total += rate.rate
|
|
|
|
}
|
|
|
|
req <- total
|
|
|
|
|
|
|
|
case <-ticker.C:
|
|
|
|
// Clear stale submitted hash rate.
|
|
|
|
for id, rate := range rates {
|
|
|
|
if time.Since(rate.ping) > 10*time.Second {
|
|
|
|
delete(rates, id)
|
|
|
|
}
|
|
|
|
}
|
2018-08-28 13:59:05 +00:00
|
|
|
// Clear stale pending blocks
|
|
|
|
if currentBlock != nil {
|
|
|
|
for hash, block := range works {
|
|
|
|
if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
|
|
|
|
delete(works, hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-03 08:33:37 +00:00
|
|
|
|
|
|
|
case errc := <-ethash.exitCh:
|
|
|
|
// Exit remote loop if ethash is closed and return relevant error.
|
|
|
|
errc <- nil
|
|
|
|
log.Trace("Ethash remote sealer is exiting")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|