From 2f28a12cdbee3e5c48ca5f44b128e639c60f3685 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Mon, 27 Feb 2017 17:06:40 +0200
Subject: [PATCH] common, eth/downloader, log: support terminal log formatting

---
 common/types.go              | 18 ++++++++++++++++++
 eth/downloader/downloader.go | 34 +++++++++++++++-------------------
 eth/downloader/queue.go      | 18 +++++++++---------
 log/format.go                | 25 +++++++++++++++++++------
 4 files changed, 61 insertions(+), 34 deletions(-)

diff --git a/common/types.go b/common/types.go
index d38ea7306..1585d878c 100644
--- a/common/types.go
+++ b/common/types.go
@@ -55,6 +55,24 @@ func (h Hash) Bytes() []byte { return h[:] }
 func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
 func (h Hash) Hex() string   { return hexutil.Encode(h[:]) }
 
+// TerminalString implements log.TerminalStringer, formatting a string for console
+// output during logging.
+func (h Hash) TerminalString() string {
+	return fmt.Sprintf("%x…%x", h[:3], h[29:])
+}
+
+// String implements the stringer interface and is used also by the logger when
+// doing full logging into a file.
+func (h Hash) String() string {
+	return h.Hex()
+}
+
+// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
+// without going through the stringer interface used for logging.
+func (h Hash) Format(s fmt.State, c rune) {
+	fmt.Fprintf(s, "%"+string(c), h[:])
+}
+
 // UnmarshalJSON parses a hash in its hex from to a hash.
 func (h *Hash) UnmarshalJSON(input []byte) error {
 	return hexutil.UnmarshalJSON("Hash", input, h[:])
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 92e42f0b3..be3d8d177 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -383,7 +383,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
 		return errTooOld
 	}
 
-	log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash.Hex()[2:10], "td", td, "mode", d.mode)
+	log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
 	defer func(start time.Time) {
 		log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
 	}(time.Now())
@@ -544,7 +544,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
 				return nil, errBadPeer
 			}
 			head := headers[0]
-			p.logger.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash().Hex()[2:10])
+			p.logger.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash())
 			return head, nil
 
 		case <-timeout:
@@ -657,10 +657,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 	// If the head fetch already found an ancestor, return
 	if !common.EmptyHash(hash) {
 		if int64(number) <= floor {
-			p.logger.Warn("Ancestor below allowance", "number", number, "hash", hash.Hex()[2:10], "allowance", floor)
+			p.logger.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
 			return 0, errInvalidAncestor
 		}
-		p.logger.Debug("Found common ancestor", "number", number, "hash", hash.Hex()[2:10])
+		p.logger.Debug("Found common ancestor", "number", number, "hash", hash)
 		return number, nil
 	}
 	// Ancestor not found, we need to binary search over our chain
@@ -704,7 +704,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 				}
 				header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
 				if header.Number.Uint64() != check {
-					p.logger.Debug("Received non requested header", "number", header.Number, "hash", header.Hash().Hex()[2:10], "request", check)
+					p.logger.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
 					return 0, errBadPeer
 				}
 				start = check
@@ -722,10 +722,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 	}
 	// Ensure valid ancestry and return
 	if int64(start) <= floor {
-		p.logger.Warn("Ancestor below allowance", "number", start, "hash", hash.Hex()[2:10], "allowance", floor)
+		p.logger.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
 		return 0, errInvalidAncestor
 	}
-	p.logger.Debug("Found common ancestor", "number", start, "hash", hash.Hex()[2:10])
+	p.logger.Debug("Found common ancestor", "number", start, "hash", hash)
 	return start, nil
 }
 
@@ -1208,7 +1208,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 				if atomic.LoadUint32(&d.fsPivotFails) == 0 {
 					for _, header := range rollback {
 						if header.Number.Uint64() == pivot {
-							log.Warn("Fast-sync critical section failure, locked pivot to header", "number", pivot, "hash", header.Hash().Hex()[2:10])
+							log.Warn("Fast-sync critical section failure, locked pivot to header", "number", pivot, "hash", header.Hash())
 							d.fsPivotLock = header
 						}
 					}
@@ -1304,7 +1304,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 						if n > 0 {
 							rollback = append(rollback, chunk[:n]...)
 						}
-						log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash().Hex()[2:10], "err", err)
+						log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
 						return errInvalidChain
 					}
 					// All verifications passed, store newly found uncertain headers
@@ -1316,7 +1316,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 				// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
 				if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
 					if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
-						log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash().Hex()[2:10], "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash().Hex()[2:10])
+						log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash(), "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash())
 						return errInvalidChain
 					}
 				}
@@ -1366,13 +1366,9 @@ func (d *Downloader) processContent() error {
 		// Actually import the blocks
 		first, last := results[0].Header, results[len(results)-1].Header
 		log.Debug("Inserting downloaded chain", "items", len(results),
-			"from", log.Lazy{Fn: func() string {
-				return fmt.Sprintf("#%d [%x…]", first.Number, first.Hash().Bytes()[:4])
-			}},
-			"till", log.Lazy{Fn: func() string {
-				return fmt.Sprintf("#%d [%x…]", last.Number, last.Hash().Bytes()[:4])
-			}})
-
+			"firstnum", first.Number, "firsthash", first.Hash(),
+			"lastnum", last.Number, "lasthash", last.Hash(),
+		)
 		for len(results) != 0 {
 			// Check for any termination requests
 			select {
@@ -1406,14 +1402,14 @@ func (d *Downloader) processContent() error {
 			case len(receipts) > 0:
 				index, err = d.insertReceipts(blocks, receipts)
 				if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
-					log.Debug("Committing block as new head", "number", blocks[len(blocks)-1].Number(), "hash", blocks[len(blocks)-1].Hash().Hex()[2:10])
+					log.Debug("Committing block as new head", "number", blocks[len(blocks)-1].Number(), "hash", blocks[len(blocks)-1].Hash())
 					index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
 				}
 			default:
 				index, err = d.insertBlocks(blocks)
 			}
 			if err != nil {
-				log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash().Hex()[2:10], "err", err)
+				log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
 				return errInvalidChain
 			}
 			// Shift the results to the next batch
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 9530e15be..855097c45 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -364,20 +364,20 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
 		// Make sure chain order is honoured and preserved throughout
 		hash := header.Hash()
 		if header.Number == nil || header.Number.Uint64() != from {
-			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash.Hex()[2:10], "expected", from)
+			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
 			break
 		}
 		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
-			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash.Hex()[2:10])
+			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
 			break
 		}
 		// Make sure no duplicate requests are executed
 		if _, ok := q.blockTaskPool[hash]; ok {
-			log.Warn("Header  already scheduled for block fetch", "number", header.Number, "hash", hash.Hex()[2:10])
+			log.Warn("Header  already scheduled for block fetch", "number", header.Number, "hash", hash)
 			continue
 		}
 		if _, ok := q.receiptTaskPool[hash]; ok {
-			log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash.Hex()[2:10])
+			log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
 			continue
 		}
 		// Queue the header for content retrieval
@@ -391,7 +391,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
 		}
 		if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
 			// Pivoting point of the fast sync, switch the state retrieval to this
-			log.Debug("Switching state downloads to new block", "number", header.Number, "hash", hash.Hex()[2:10])
+			log.Debug("Switching state downloads to new block", "number", header.Number, "hash", hash)
 
 			q.stateTaskIndex = 0
 			q.stateTaskPool = make(map[common.Hash]int)
@@ -872,10 +872,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
 	accepted := len(headers) == MaxHeaderFetch
 	if accepted {
 		if headers[0].Number.Uint64() != request.From {
-			log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash().Hex()[2:10], request.From)
+			log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From)
 			accepted = false
 		} else if headers[len(headers)-1].Hash() != target {
-			log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash().Hex()[2:10], "expected", target.Hex()[2:10])
+			log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target)
 			accepted = false
 		}
 	}
@@ -883,12 +883,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
 		for i, header := range headers[1:] {
 			hash := header.Hash()
 			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
-				log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash.Hex()[2:10], "expected", want)
+				log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want)
 				accepted = false
 				break
 			}
 			if headers[i].Hash() != header.ParentHash {
-				log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash.Hex()[2:10])
+				log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash)
 				accepted = false
 				break
 			}
diff --git a/log/format.go b/log/format.go
index f32fcf744..e8f4b4f24 100644
--- a/log/format.go
+++ b/log/format.go
@@ -59,6 +59,13 @@ func (f formatFunc) Format(r *Record) []byte {
 	return f(r)
 }
 
+// TerminalStringer is an analogous interface to the stdlib stringer, allowing
+// own types to have custom shortened serialization formats when printed to the
+// screen.
+type TerminalStringer interface {
+	TerminalString() string
+}
+
 // TerminalFormat formats log records optimized for human readability on
 // a terminal with color-coded level output and terser human friendly timestamp.
 // This format should only be used for interactive programs or while developing.
@@ -124,7 +131,7 @@ func TerminalFormat(usecolor bool) Format {
 		}
 
 		// print the keys logfmt style
-		logfmt(b, r.Ctx, color)
+		logfmt(b, r.Ctx, color, true)
 		return b.Bytes()
 	})
 }
@@ -138,21 +145,21 @@ func LogfmtFormat() Format {
 	return FormatFunc(func(r *Record) []byte {
 		common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
 		buf := &bytes.Buffer{}
-		logfmt(buf, append(common, r.Ctx...), 0)
+		logfmt(buf, append(common, r.Ctx...), 0, false)
 		return buf.Bytes()
 	})
 }
 
-func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
+func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) {
 	for i := 0; i < len(ctx); i += 2 {
 		if i != 0 {
 			buf.WriteByte(' ')
 		}
 
 		k, ok := ctx[i].(string)
-		v := formatLogfmtValue(ctx[i+1])
+		v := formatLogfmtValue(ctx[i+1], term)
 		if !ok {
-			k, v = errorKey, formatLogfmtValue(k)
+			k, v = errorKey, formatLogfmtValue(k, term)
 		}
 
 		// XXX: we should probably check that all of your key bytes aren't invalid
@@ -253,7 +260,7 @@ func formatJsonValue(value interface{}) interface{} {
 }
 
 // formatValue formats a value for serialization
-func formatLogfmtValue(value interface{}) string {
+func formatLogfmtValue(value interface{}, term bool) string {
 	if value == nil {
 		return "nil"
 	}
@@ -264,6 +271,12 @@ func formatLogfmtValue(value interface{}) string {
 		// expensive.
 		return t.Format(timeFormat)
 	}
+	if term {
+		if s, ok := value.(TerminalStringer); ok {
+			// Custom terminal stringer provided, use that
+			return escapeString(s.TerminalString())
+		}
+	}
 	value = formatShared(value)
 	switch v := value.(type) {
 	case bool:
-- 
GitLab