mirror of
https://gitlab.com/pulsechaincom/go-pulse.git
synced 2024-12-24 20:37:17 +00:00
Merge pull request #3718 from karalabe/terminal-format-hash
common, eth/downloader, log: support terminal log formatting
This commit is contained in:
commit
dc0a006c7c
@ -55,6 +55,24 @@ func (h Hash) Bytes() []byte { return h[:] }
|
|||||||
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
|
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
|
||||||
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
|
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
|
||||||
|
|
||||||
|
// TerminalString implements log.TerminalStringer, formatting a string for console
|
||||||
|
// output during logging.
|
||||||
|
func (h Hash) TerminalString() string {
|
||||||
|
return fmt.Sprintf("%x…%x", h[:3], h[29:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the stringer interface and is used also by the logger when
|
||||||
|
// doing full logging into a file.
|
||||||
|
func (h Hash) String() string {
|
||||||
|
return h.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
|
||||||
|
// without going through the stringer interface used for logging.
|
||||||
|
func (h Hash) Format(s fmt.State, c rune) {
|
||||||
|
fmt.Fprintf(s, "%"+string(c), h[:])
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalJSON parses a hash in its hex from to a hash.
|
// UnmarshalJSON parses a hash in its hex from to a hash.
|
||||||
func (h *Hash) UnmarshalJSON(input []byte) error {
|
func (h *Hash) UnmarshalJSON(input []byte) error {
|
||||||
return hexutil.UnmarshalJSON("Hash", input, h[:])
|
return hexutil.UnmarshalJSON("Hash", input, h[:])
|
||||||
|
@ -383,7 +383,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
|||||||
return errTooOld
|
return errTooOld
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash.Hex()[2:10], "td", td, "mode", d.mode)
|
log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
|
||||||
defer func(start time.Time) {
|
defer func(start time.Time) {
|
||||||
log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
|
log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
@ -544,7 +544,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|||||||
return nil, errBadPeer
|
return nil, errBadPeer
|
||||||
}
|
}
|
||||||
head := headers[0]
|
head := headers[0]
|
||||||
p.logger.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash().Hex()[2:10])
|
p.logger.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash())
|
||||||
return head, nil
|
return head, nil
|
||||||
|
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
@ -657,10 +657,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
// If the head fetch already found an ancestor, return
|
// If the head fetch already found an ancestor, return
|
||||||
if !common.EmptyHash(hash) {
|
if !common.EmptyHash(hash) {
|
||||||
if int64(number) <= floor {
|
if int64(number) <= floor {
|
||||||
p.logger.Warn("Ancestor below allowance", "number", number, "hash", hash.Hex()[2:10], "allowance", floor)
|
p.logger.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
|
||||||
return 0, errInvalidAncestor
|
return 0, errInvalidAncestor
|
||||||
}
|
}
|
||||||
p.logger.Debug("Found common ancestor", "number", number, "hash", hash.Hex()[2:10])
|
p.logger.Debug("Found common ancestor", "number", number, "hash", hash)
|
||||||
return number, nil
|
return number, nil
|
||||||
}
|
}
|
||||||
// Ancestor not found, we need to binary search over our chain
|
// Ancestor not found, we need to binary search over our chain
|
||||||
@ -704,7 +704,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
}
|
}
|
||||||
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
|
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
|
||||||
if header.Number.Uint64() != check {
|
if header.Number.Uint64() != check {
|
||||||
p.logger.Debug("Received non requested header", "number", header.Number, "hash", header.Hash().Hex()[2:10], "request", check)
|
p.logger.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
|
||||||
return 0, errBadPeer
|
return 0, errBadPeer
|
||||||
}
|
}
|
||||||
start = check
|
start = check
|
||||||
@ -722,10 +722,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
}
|
}
|
||||||
// Ensure valid ancestry and return
|
// Ensure valid ancestry and return
|
||||||
if int64(start) <= floor {
|
if int64(start) <= floor {
|
||||||
p.logger.Warn("Ancestor below allowance", "number", start, "hash", hash.Hex()[2:10], "allowance", floor)
|
p.logger.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
|
||||||
return 0, errInvalidAncestor
|
return 0, errInvalidAncestor
|
||||||
}
|
}
|
||||||
p.logger.Debug("Found common ancestor", "number", start, "hash", hash.Hex()[2:10])
|
p.logger.Debug("Found common ancestor", "number", start, "hash", hash)
|
||||||
return start, nil
|
return start, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1208,7 +1208,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
if atomic.LoadUint32(&d.fsPivotFails) == 0 {
|
if atomic.LoadUint32(&d.fsPivotFails) == 0 {
|
||||||
for _, header := range rollback {
|
for _, header := range rollback {
|
||||||
if header.Number.Uint64() == pivot {
|
if header.Number.Uint64() == pivot {
|
||||||
log.Warn("Fast-sync critical section failure, locked pivot to header", "number", pivot, "hash", header.Hash().Hex()[2:10])
|
log.Warn("Fast-sync critical section failure, locked pivot to header", "number", pivot, "hash", header.Hash())
|
||||||
d.fsPivotLock = header
|
d.fsPivotLock = header
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1304,7 +1304,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
if n > 0 {
|
if n > 0 {
|
||||||
rollback = append(rollback, chunk[:n]...)
|
rollback = append(rollback, chunk[:n]...)
|
||||||
}
|
}
|
||||||
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash().Hex()[2:10], "err", err)
|
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// All verifications passed, store newly found uncertain headers
|
// All verifications passed, store newly found uncertain headers
|
||||||
@ -1316,7 +1316,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
|
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
|
||||||
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
|
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
|
||||||
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
|
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
|
||||||
log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash().Hex()[2:10], "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash().Hex()[2:10])
|
log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash(), "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash())
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1366,13 +1366,9 @@ func (d *Downloader) processContent() error {
|
|||||||
// Actually import the blocks
|
// Actually import the blocks
|
||||||
first, last := results[0].Header, results[len(results)-1].Header
|
first, last := results[0].Header, results[len(results)-1].Header
|
||||||
log.Debug("Inserting downloaded chain", "items", len(results),
|
log.Debug("Inserting downloaded chain", "items", len(results),
|
||||||
"from", log.Lazy{Fn: func() string {
|
"firstnum", first.Number, "firsthash", first.Hash(),
|
||||||
return fmt.Sprintf("#%d [%x…]", first.Number, first.Hash().Bytes()[:4])
|
"lastnum", last.Number, "lasthash", last.Hash(),
|
||||||
}},
|
)
|
||||||
"till", log.Lazy{Fn: func() string {
|
|
||||||
return fmt.Sprintf("#%d [%x…]", last.Number, last.Hash().Bytes()[:4])
|
|
||||||
}})
|
|
||||||
|
|
||||||
for len(results) != 0 {
|
for len(results) != 0 {
|
||||||
// Check for any termination requests
|
// Check for any termination requests
|
||||||
select {
|
select {
|
||||||
@ -1406,14 +1402,14 @@ func (d *Downloader) processContent() error {
|
|||||||
case len(receipts) > 0:
|
case len(receipts) > 0:
|
||||||
index, err = d.insertReceipts(blocks, receipts)
|
index, err = d.insertReceipts(blocks, receipts)
|
||||||
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
|
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
|
||||||
log.Debug("Committing block as new head", "number", blocks[len(blocks)-1].Number(), "hash", blocks[len(blocks)-1].Hash().Hex()[2:10])
|
log.Debug("Committing block as new head", "number", blocks[len(blocks)-1].Number(), "hash", blocks[len(blocks)-1].Hash())
|
||||||
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
|
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
index, err = d.insertBlocks(blocks)
|
index, err = d.insertBlocks(blocks)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash().Hex()[2:10], "err", err)
|
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// Shift the results to the next batch
|
// Shift the results to the next batch
|
||||||
|
@ -364,20 +364,20 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
|||||||
// Make sure chain order is honoured and preserved throughout
|
// Make sure chain order is honoured and preserved throughout
|
||||||
hash := header.Hash()
|
hash := header.Hash()
|
||||||
if header.Number == nil || header.Number.Uint64() != from {
|
if header.Number == nil || header.Number.Uint64() != from {
|
||||||
log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash.Hex()[2:10], "expected", from)
|
log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
|
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
|
||||||
log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash.Hex()[2:10])
|
log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Make sure no duplicate requests are executed
|
// Make sure no duplicate requests are executed
|
||||||
if _, ok := q.blockTaskPool[hash]; ok {
|
if _, ok := q.blockTaskPool[hash]; ok {
|
||||||
log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash.Hex()[2:10])
|
log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok := q.receiptTaskPool[hash]; ok {
|
if _, ok := q.receiptTaskPool[hash]; ok {
|
||||||
log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash.Hex()[2:10])
|
log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Queue the header for content retrieval
|
// Queue the header for content retrieval
|
||||||
@ -391,7 +391,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
|||||||
}
|
}
|
||||||
if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
|
if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
|
||||||
// Pivoting point of the fast sync, switch the state retrieval to this
|
// Pivoting point of the fast sync, switch the state retrieval to this
|
||||||
log.Debug("Switching state downloads to new block", "number", header.Number, "hash", hash.Hex()[2:10])
|
log.Debug("Switching state downloads to new block", "number", header.Number, "hash", hash)
|
||||||
|
|
||||||
q.stateTaskIndex = 0
|
q.stateTaskIndex = 0
|
||||||
q.stateTaskPool = make(map[common.Hash]int)
|
q.stateTaskPool = make(map[common.Hash]int)
|
||||||
@ -872,10 +872,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
accepted := len(headers) == MaxHeaderFetch
|
accepted := len(headers) == MaxHeaderFetch
|
||||||
if accepted {
|
if accepted {
|
||||||
if headers[0].Number.Uint64() != request.From {
|
if headers[0].Number.Uint64() != request.From {
|
||||||
log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash().Hex()[2:10], request.From)
|
log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From)
|
||||||
accepted = false
|
accepted = false
|
||||||
} else if headers[len(headers)-1].Hash() != target {
|
} else if headers[len(headers)-1].Hash() != target {
|
||||||
log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash().Hex()[2:10], "expected", target.Hex()[2:10])
|
log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target)
|
||||||
accepted = false
|
accepted = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -883,12 +883,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
for i, header := range headers[1:] {
|
for i, header := range headers[1:] {
|
||||||
hash := header.Hash()
|
hash := header.Hash()
|
||||||
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
|
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
|
||||||
log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash.Hex()[2:10], "expected", want)
|
log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want)
|
||||||
accepted = false
|
accepted = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if headers[i].Hash() != header.ParentHash {
|
if headers[i].Hash() != header.ParentHash {
|
||||||
log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash.Hex()[2:10])
|
log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash)
|
||||||
accepted = false
|
accepted = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,13 @@ func (f formatFunc) Format(r *Record) []byte {
|
|||||||
return f(r)
|
return f(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TerminalStringer is an analogous interface to the stdlib stringer, allowing
|
||||||
|
// own types to have custom shortened serialization formats when printed to the
|
||||||
|
// screen.
|
||||||
|
type TerminalStringer interface {
|
||||||
|
TerminalString() string
|
||||||
|
}
|
||||||
|
|
||||||
// TerminalFormat formats log records optimized for human readability on
|
// TerminalFormat formats log records optimized for human readability on
|
||||||
// a terminal with color-coded level output and terser human friendly timestamp.
|
// a terminal with color-coded level output and terser human friendly timestamp.
|
||||||
// This format should only be used for interactive programs or while developing.
|
// This format should only be used for interactive programs or while developing.
|
||||||
@ -124,7 +131,7 @@ func TerminalFormat(usecolor bool) Format {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// print the keys logfmt style
|
// print the keys logfmt style
|
||||||
logfmt(b, r.Ctx, color)
|
logfmt(b, r.Ctx, color, true)
|
||||||
return b.Bytes()
|
return b.Bytes()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -138,21 +145,21 @@ func LogfmtFormat() Format {
|
|||||||
return FormatFunc(func(r *Record) []byte {
|
return FormatFunc(func(r *Record) []byte {
|
||||||
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
|
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
logfmt(buf, append(common, r.Ctx...), 0)
|
logfmt(buf, append(common, r.Ctx...), 0, false)
|
||||||
return buf.Bytes()
|
return buf.Bytes()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
|
func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) {
|
||||||
for i := 0; i < len(ctx); i += 2 {
|
for i := 0; i < len(ctx); i += 2 {
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
buf.WriteByte(' ')
|
buf.WriteByte(' ')
|
||||||
}
|
}
|
||||||
|
|
||||||
k, ok := ctx[i].(string)
|
k, ok := ctx[i].(string)
|
||||||
v := formatLogfmtValue(ctx[i+1])
|
v := formatLogfmtValue(ctx[i+1], term)
|
||||||
if !ok {
|
if !ok {
|
||||||
k, v = errorKey, formatLogfmtValue(k)
|
k, v = errorKey, formatLogfmtValue(k, term)
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: we should probably check that all of your key bytes aren't invalid
|
// XXX: we should probably check that all of your key bytes aren't invalid
|
||||||
@ -253,7 +260,7 @@ func formatJsonValue(value interface{}) interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// formatValue formats a value for serialization
|
// formatValue formats a value for serialization
|
||||||
func formatLogfmtValue(value interface{}) string {
|
func formatLogfmtValue(value interface{}, term bool) string {
|
||||||
if value == nil {
|
if value == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
@ -264,6 +271,12 @@ func formatLogfmtValue(value interface{}) string {
|
|||||||
// expensive.
|
// expensive.
|
||||||
return t.Format(timeFormat)
|
return t.Format(timeFormat)
|
||||||
}
|
}
|
||||||
|
if term {
|
||||||
|
if s, ok := value.(TerminalStringer); ok {
|
||||||
|
// Custom terminal stringer provided, use that
|
||||||
|
return escapeString(s.TerminalString())
|
||||||
|
}
|
||||||
|
}
|
||||||
value = formatShared(value)
|
value = formatShared(value)
|
||||||
switch v := value.(type) {
|
switch v := value.(type) {
|
||||||
case bool:
|
case bool:
|
||||||
|
Loading…
Reference in New Issue
Block a user