diff --git a/core/blockchain.go b/core/blockchain.go
index 5d5c5e6805fe2a229c5cb28cd8fe00707a7f529d..08302d05711185a6363cbbaaba595d10278064c8 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -301,7 +301,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
 	defer bc.chainmu.Unlock()
 
 	// Rewind the header chain, deleting all block bodies until then
-	delFn := func(db ethdb.Deleter, hash common.Hash, num uint64) {
+	delFn := func(db ethdb.Writer, hash common.Hash, num uint64) {
 		rawdb.DeleteBody(db, hash, num)
 	}
 	bc.hc.SetHead(head, delFn)
diff --git a/core/headerchain.go b/core/headerchain.go
index 027cb798fe1e1722a0e0d4205c3e614f017ec182..f005b8324cbe77c679ddda9bf5c267ca8cb19664 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -455,7 +455,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
 
 // DeleteCallback is a callback function that is called by SetHead before
 // each header is deleted.
-type DeleteCallback func(ethdb.Deleter, common.Hash, uint64)
+type DeleteCallback func(ethdb.Writer, common.Hash, uint64)
 
 // SetHead rewinds the local chain to a new head. Everything above the new head
 // will be deleted and the new one set.
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index ea923f9d18ebdb64d1fc768e3be1f60e04258020..10f3ba00f0ff04dec575530acfbe071f1866e5a2 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -45,7 +45,7 @@ func WriteCanonicalHash(db ethdb.Writer, hash common.Hash, number uint64) {
 }
 
 // DeleteCanonicalHash removes the number to hash canonical mapping.
-func DeleteCanonicalHash(db ethdb.Deleter, number uint64) {
+func DeleteCanonicalHash(db ethdb.Writer, number uint64) {
 	if err := db.Delete(headerHashKey(number)); err != nil {
 		log.Crit("Failed to delete number to hash mapping", "err", err)
 	}
@@ -180,7 +180,7 @@ func WriteHeader(db ethdb.Writer, header *types.Header) {
 }
 
 // DeleteHeader removes all block header data associated with a hash.
-func DeleteHeader(db ethdb.Deleter, hash common.Hash, number uint64) {
+func DeleteHeader(db ethdb.Writer, hash common.Hash, number uint64) {
 	deleteHeaderWithoutNumber(db, hash, number)
 	if err := db.Delete(headerNumberKey(hash)); err != nil {
 		log.Crit("Failed to delete hash to number mapping", "err", err)
@@ -189,7 +189,7 @@ func DeleteHeader(db ethdb.Deleter, hash common.Hash, number uint64) {
 
 // deleteHeaderWithoutNumber removes only the block header but does not remove
 // the hash to number mapping.
-func deleteHeaderWithoutNumber(db ethdb.Deleter, hash common.Hash, number uint64) {
+func deleteHeaderWithoutNumber(db ethdb.Writer, hash common.Hash, number uint64) {
 	if err := db.Delete(headerKey(number, hash)); err != nil {
 		log.Crit("Failed to delete header", "err", err)
 	}
@@ -240,7 +240,7 @@ func WriteBody(db ethdb.Writer, hash common.Hash, number uint64, body *types.Bod
 }
 
 // DeleteBody removes all block body data associated with a hash.
-func DeleteBody(db ethdb.Deleter, hash common.Hash, number uint64) {
+func DeleteBody(db ethdb.Writer, hash common.Hash, number uint64) {
 	if err := db.Delete(blockBodyKey(number, hash)); err != nil {
 		log.Crit("Failed to delete block body", "err", err)
 	}
@@ -278,7 +278,7 @@ func WriteTd(db ethdb.Writer, hash common.Hash, number uint64, td *big.Int) {
 }
 
 // DeleteTd removes all block total difficulty data associated with a hash.
-func DeleteTd(db ethdb.Deleter, hash common.Hash, number uint64) {
+func DeleteTd(db ethdb.Writer, hash common.Hash, number uint64) {
 	if err := db.Delete(headerTDKey(number, hash)); err != nil {
 		log.Crit("Failed to delete block total difficulty", "err", err)
 	}
@@ -347,7 +347,7 @@ func WriteReceipts(db ethdb.Writer, hash common.Hash, number uint64, receipts ty
 }
 
 // DeleteReceipts removes all receipt data associated with a block hash.
-func DeleteReceipts(db ethdb.Deleter, hash common.Hash, number uint64) {
+func DeleteReceipts(db ethdb.Writer, hash common.Hash, number uint64) {
 	if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
 		log.Crit("Failed to delete block receipts", "err", err)
 	}
@@ -378,7 +378,7 @@ func WriteBlock(db ethdb.Writer, block *types.Block) {
 }
 
 // DeleteBlock removes all block data associated with a hash.
-func DeleteBlock(db ethdb.Deleter, hash common.Hash, number uint64) {
+func DeleteBlock(db ethdb.Writer, hash common.Hash, number uint64) {
 	DeleteReceipts(db, hash, number)
 	DeleteHeader(db, hash, number)
 	DeleteBody(db, hash, number)
@@ -387,7 +387,7 @@ func DeleteBlock(db ethdb.Deleter, hash common.Hash, number uint64) {
 
 // deleteBlockWithoutNumber removes all block data associated with a hash, except
 // the hash to number mapping.
-func deleteBlockWithoutNumber(db ethdb.Deleter, hash common.Hash, number uint64) {
+func deleteBlockWithoutNumber(db ethdb.Writer, hash common.Hash, number uint64) {
 	DeleteReceipts(db, hash, number)
 	deleteHeaderWithoutNumber(db, hash, number)
 	DeleteBody(db, hash, number)
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
index d90a430129c59088d1aba2d0b587c08bc10b3f54..5c7ad693472ab3bdf58c582736af88a49303a02e 100644
--- a/core/rawdb/accessors_indexes.go
+++ b/core/rawdb/accessors_indexes.go
@@ -54,7 +54,7 @@ func WriteTxLookupEntries(db ethdb.Writer, block *types.Block) {
 }
 
 // DeleteTxLookupEntry removes all transaction data associated with a hash.
-func DeleteTxLookupEntry(db ethdb.Deleter, hash common.Hash) {
+func DeleteTxLookupEntry(db ethdb.Writer, hash common.Hash) {
 	db.Delete(txLookupKey(hash))
 }
 
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 974df681bdac36cc4962f7d160be70c5e7210661..e19649dd46925dcc24866b1de0abfeb3ca1954b9 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -148,3 +148,8 @@ func (b *tableBatch) Write() error {
 func (b *tableBatch) Reset() {
 	b.batch.Reset()
 }
+
+// Replay replays the batch contents.
+func (b *tableBatch) Replay(w ethdb.Writer) error {
+	return b.batch.Replay(w)
+}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 0673de543fe28442aaa50384eec61c008ef2314f..a299cdb6472135ac21ee50ac62bfaf4aea0d2218 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -53,6 +53,10 @@ func (n *proofList) Put(key []byte, value []byte) error {
 	return nil
 }
 
+func (n *proofList) Delete(key []byte) error {
+	panic("not supported")
+}
+
 // StateDBs within the ethereum protocol are used to store anything
 // within the merkle trie. StateDBs take care of caching and storing
 // nested states. It's the general query interface to retrieve:
diff --git a/ethdb/batch.go b/ethdb/batch.go
index a6f015821080216c1a17adb27d5223142601648e..a9c4063546750e3be0c4711380f6d088fc6f9462 100644
--- a/ethdb/batch.go
+++ b/ethdb/batch.go
@@ -24,7 +24,6 @@ const IdealBatchSize = 100 * 1024
 // when Write is called. A batch cannot be used concurrently.
 type Batch interface {
 	Writer
-	Deleter
 
 	// ValueSize retrieves the amount of data queued up for writing.
 	ValueSize() int
@@ -32,8 +31,11 @@ type Batch interface {
 	// Write flushes any accumulated data to disk.
 	Write() error
 
-	// Reset resets the batch for reuse
+	// Reset resets the batch for reuse.
 	Reset()
+
+	// Replay replays the batch contents.
+	Replay(w Writer) error
 }
 
 // Batcher wraps the NewBatch method of a backing data store.
diff --git a/ethdb/database.go b/ethdb/database.go
index 30208e1468c6bb8ea0d46774af87d3487285744e..bab99aed1f62afbf84286e75d9f854c4c9462f70 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU Lesser General Public License
 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 
-// Package database defines the interfaces for an Ethereum data store.
+// Package ethdb defines the interfaces for an Ethereum data store.
 package ethdb
 
 import "io"
@@ -32,10 +32,7 @@ type Reader interface {
 type Writer interface {
 	// Put inserts the given value into the key-value data store.
 	Put(key []byte, value []byte) error
-}
 
-// Deleter wraps the Delete method of a backing data store.
-type Deleter interface {
 	// Delete removes the key from the key-value data store.
 	Delete(key []byte) error
 }
@@ -63,7 +60,6 @@ type Compacter interface {
 type KeyValueStore interface {
 	Reader
 	Writer
-	Deleter
 	Batcher
 	Iteratee
 	Stater
@@ -76,7 +72,6 @@ type KeyValueStore interface {
 type Database interface {
 	Reader
 	Writer
-	Deleter
 	Batcher
 	Iteratee
 	Stater
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index f730887d25dcd97dcecdaba47f7c3f45f0dcbd88..f437cb9740c8ed126d30b7914961125a58672b82 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -416,3 +416,32 @@ func (b *batch) Reset() {
 	b.b.Reset()
 	b.size = 0
 }
+
+// Replay replays the batch contents.
+func (b *batch) Replay(w ethdb.Writer) error {
+	return b.b.Replay(&replayer{writer: w})
+}
+
+// replayer is a small wrapper to implement the correct replay methods.
+type replayer struct {
+	writer  ethdb.Writer
+	failure error
+}
+
+// Put inserts the given value into the key-value data store.
+func (r *replayer) Put(key, value []byte) {
+	// If the replay already failed, stop executing ops
+	if r.failure != nil {
+		return
+	}
+	r.failure = r.writer.Put(key, value)
+}
+
+// Delete removes the key from the key-value data store.
+func (r *replayer) Delete(key []byte) {
+	// If the replay already failed, stop executing ops
+	if r.failure != nil {
+		return
+	}
+	r.failure = r.writer.Delete(key)
+}
diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go
index cc2490ba2fb11e43bb33dce67674b92966a61996..5c3f7e22a31e25f860deca62d7a05310b6388e24 100644
--- a/ethdb/memorydb/memorydb.go
+++ b/ethdb/memorydb/memorydb.go
@@ -240,6 +240,22 @@ func (b *batch) Reset() {
 	b.size = 0
 }
 
+// Replay replays the batch contents.
+func (b *batch) Replay(w ethdb.Writer) error {
+	for _, keyvalue := range b.writes {
+		if keyvalue.delete {
+			if err := w.Delete(keyvalue.key); err != nil {
+				return err
+			}
+			continue
+		}
+		if err := w.Put(keyvalue.key, keyvalue.value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
 // iterator can walk over the (potentially partial) keyspace of a memory key
 // value store. Internally it is a deep copy of the entire iterated state,
 // sorted by keys.
diff --git a/light/nodeset.go b/light/nodeset.go
index 3b556108a606460af535a2b8fd16ff057fca4727..a8bf4f6c65f62656ae809b4d8be2fa14ab66f28c 100644
--- a/light/nodeset.go
+++ b/light/nodeset.go
@@ -60,6 +60,15 @@ func (db *NodeSet) Put(key []byte, value []byte) error {
 	return nil
 }
 
+// Delete removes a node from the set
+func (db *NodeSet) Delete(key []byte) error {
+	db.lock.Lock()
+	defer db.lock.Unlock()
+
+	delete(db.nodes, string(key))
+	return nil
+}
+
 // Get returns a stored node
 func (db *NodeSet) Get(key []byte) ([]byte, error) {
 	db.lock.RLock()
@@ -138,6 +147,11 @@ func (n *NodeList) Put(key []byte, value []byte) error {
 	return nil
 }
 
+// Delete panics as there's no reason to remove a node from the list.
+func (n *NodeList) Delete(key []byte) error {
+	panic("not supported")
+}
+
 // DataSize returns the aggregated data size of nodes in the list
 func (n NodeList) DataSize() int {
 	var size int
diff --git a/trie/database.go b/trie/database.go
index 6df1a7f79b7835ce1b4e5cb533bb26a238ce1e16..3bbcb6ade90c8eac6d139ab0b426d2166cde7e32 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -59,6 +59,11 @@ const secureKeyLength = 11 + 32
 // Database is an intermediate write layer between the trie data structures and
 // the disk database. The aim is to accumulate trie writes in-memory and only
 // periodically flush a couple tries to disk, garbage collecting the remainder.
+//
+// Note, the trie Database is **not** thread safe in its mutations, but it **is**
+// thread safe in providing individual, independent node access. The rationale
+// behind this split design is to provide read access to RPC handlers and sync
+// servers even while the trie is executing expensive garbage collection.
 type Database struct {
 	diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes
 
@@ -465,8 +470,8 @@ func (db *Database) Nodes() []common.Hash {
 
 // Reference adds a new reference from a parent node to a child node.
 func (db *Database) Reference(child common.Hash, parent common.Hash) {
-	db.lock.RLock()
-	defer db.lock.RUnlock()
+	db.lock.Lock()
+	defer db.lock.Unlock()
 
 	db.reference(child, parent)
 }
@@ -561,13 +566,14 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) {
 
 // Cap iteratively flushes old but still referenced trie nodes until the total
 // memory usage goes below the given threshold.
+//
+// Note, this method is a non-synchronized mutator. It is unsafe to call this
+// concurrently with other mutators.
 func (db *Database) Cap(limit common.StorageSize) error {
 	// Create a database batch to flush persistent data out. It is important that
 	// outside code doesn't see an inconsistent state (referenced data removed from
 	// memory cache during commit but not yet in persistent storage). This is ensured
 	// by only uncaching existing data when the database write finalizes.
-	db.lock.RLock()
-
 	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
 	batch := db.diskdb.NewBatch()
 
@@ -583,12 +589,10 @@ func (db *Database) Cap(limit common.StorageSize) error {
 		for hash, preimage := range db.preimages {
 			if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
 				log.Error("Failed to commit preimage from trie database", "err", err)
-				db.lock.RUnlock()
 				return err
 			}
 			if batch.ValueSize() > ethdb.IdealBatchSize {
 				if err := batch.Write(); err != nil {
-					db.lock.RUnlock()
 					return err
 				}
 				batch.Reset()
@@ -601,14 +605,12 @@ func (db *Database) Cap(limit common.StorageSize) error {
 		// Fetch the oldest referenced node and push into the batch
 		node := db.dirties[oldest]
 		if err := batch.Put(oldest[:], node.rlp()); err != nil {
-			db.lock.RUnlock()
 			return err
 		}
 		// If we exceeded the ideal batch size, commit and reset
 		if batch.ValueSize() >= ethdb.IdealBatchSize {
 			if err := batch.Write(); err != nil {
 				log.Error("Failed to write flush list to disk", "err", err)
-				db.lock.RUnlock()
 				return err
 			}
 			batch.Reset()
@@ -623,11 +625,8 @@ func (db *Database) Cap(limit common.StorageSize) error {
 	// Flush out any remainder data from the last batch
 	if err := batch.Write(); err != nil {
 		log.Error("Failed to write flush list to disk", "err", err)
-		db.lock.RUnlock()
 		return err
 	}
-	db.lock.RUnlock()
-
 	// Write successful, clear out the flushed data
 	db.lock.Lock()
 	defer db.lock.Unlock()
@@ -661,16 +660,16 @@ func (db *Database) Cap(limit common.StorageSize) error {
 }
 
 // Commit iterates over all the children of a particular node, writes them out
-// to disk, forcefully tearing down all references in both directions.
+// to disk, forcefully tearing down all references in both directions. As a side
+// effect, all pre-images accumulated up to this point are also written.
 //
-// As a side effect, all pre-images accumulated up to this point are also written.
+// Note, this method is a non-synchronized mutator. It is unsafe to call this
+// concurrently with other mutators.
 func (db *Database) Commit(node common.Hash, report bool) error {
 	// Create a database batch to flush persistent data out. It is important that
 	// outside code doesn't see an inconsistent state (referenced data removed from
 	// memory cache during commit but not yet in persistent storage). This is ensured
 	// by only uncaching existing data when the database write finalizes.
-	db.lock.RLock()
-
 	start := time.Now()
 	batch := db.diskdb.NewBatch()
 
@@ -678,41 +677,47 @@ func (db *Database) Commit(node common.Hash, report bool) error {
 	for hash, preimage := range db.preimages {
 		if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
 			log.Error("Failed to commit preimage from trie database", "err", err)
-			db.lock.RUnlock()
 			return err
 		}
+		// If the batch is too large, flush to disk
 		if batch.ValueSize() > ethdb.IdealBatchSize {
 			if err := batch.Write(); err != nil {
-				db.lock.RUnlock()
 				return err
 			}
 			batch.Reset()
 		}
 	}
+	// Since we're going to replay trie node writes into the clean cache, flush out
+	// any batched pre-images before continuing.
+	if err := batch.Write(); err != nil {
+		return err
+	}
+	batch.Reset()
+
 	// Move the trie itself into the batch, flushing if enough data is accumulated
 	nodes, storage := len(db.dirties), db.dirtiesSize
-	if err := db.commit(node, batch); err != nil {
+
+	uncacher := &cleaner{db}
+	if err := db.commit(node, batch, uncacher); err != nil {
 		log.Error("Failed to commit trie from trie database", "err", err)
-		db.lock.RUnlock()
 		return err
 	}
-	// Write batch ready, unlock for readers during persistence
+	// Trie mostly committed to disk, flush any batch leftovers
 	if err := batch.Write(); err != nil {
 		log.Error("Failed to write trie to disk", "err", err)
-		db.lock.RUnlock()
 		return err
 	}
-	db.lock.RUnlock()
-
-	// Write successful, clear out the flushed data
+	// Uncache any leftovers in the last batch
 	db.lock.Lock()
 	defer db.lock.Unlock()
 
+	batch.Replay(uncacher)
+	batch.Reset()
+
+	// Reset the storage counters and bumpd metrics
 	db.preimages = make(map[common.Hash][]byte)
 	db.preimagesSize = 0
 
-	db.uncache(node)
-
 	memcacheCommitTimeTimer.Update(time.Since(start))
 	memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
 	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
@@ -732,14 +737,14 @@ func (db *Database) Commit(node common.Hash, report bool) error {
 }
 
 // commit is the private locked version of Commit.
-func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error {
+func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
 	// If the node does not exist, it's a previously committed node
 	node, ok := db.dirties[hash]
 	if !ok {
 		return nil
 	}
 	for _, child := range node.childs() {
-		if err := db.commit(child, batch); err != nil {
+		if err := db.commit(child, batch, uncacher); err != nil {
 			return err
 		}
 	}
@@ -751,39 +756,58 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error {
 		if err := batch.Write(); err != nil {
 			return err
 		}
+		db.lock.Lock()
+		batch.Replay(uncacher)
 		batch.Reset()
+		db.lock.Unlock()
 	}
 	return nil
 }
 
-// uncache is the post-processing step of a commit operation where the already
-// persisted trie is removed from the cache. The reason behind the two-phase
-// commit is to ensure consistent data availability while moving from memory
-// to disk.
-func (db *Database) uncache(hash common.Hash) {
+// cleaner is a database batch replayer that takes a batch of write operations
+// and cleans up the trie database from anything written to disk.
+type cleaner struct {
+	db *Database
+}
+
+// Put reacts to database writes and implements dirty data uncaching. This is the
+// post-processing step of a commit operation where the already persisted trie is
+// removed from the dirty cache and moved into the clean cache. The reason behind
+// the two-phase commit is to ensure ensure data availability while moving from
+// memory to disk.
+func (c *cleaner) Put(key []byte, rlp []byte) error {
+	hash := common.BytesToHash(key)
+
 	// If the node does not exist, we're done on this path
-	node, ok := db.dirties[hash]
+	node, ok := c.db.dirties[hash]
 	if !ok {
-		return
+		return nil
 	}
 	// Node still exists, remove it from the flush-list
 	switch hash {
-	case db.oldest:
-		db.oldest = node.flushNext
-		db.dirties[node.flushNext].flushPrev = common.Hash{}
-	case db.newest:
-		db.newest = node.flushPrev
-		db.dirties[node.flushPrev].flushNext = common.Hash{}
+	case c.db.oldest:
+		c.db.oldest = node.flushNext
+		c.db.dirties[node.flushNext].flushPrev = common.Hash{}
+	case c.db.newest:
+		c.db.newest = node.flushPrev
+		c.db.dirties[node.flushPrev].flushNext = common.Hash{}
 	default:
-		db.dirties[node.flushPrev].flushNext = node.flushNext
-		db.dirties[node.flushNext].flushPrev = node.flushPrev
+		c.db.dirties[node.flushPrev].flushNext = node.flushNext
+		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
 	}
-	// Uncache the node's subtries and remove the node itself too
-	for _, child := range node.childs() {
-		db.uncache(child)
+	// Remove the node from the dirty cache
+	delete(c.db.dirties, hash)
+	c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
+
+	// Move the flushed node into the clean cache to prevent insta-reloads
+	if c.db.cleans != nil {
+		c.db.cleans.Set(string(hash[:]), rlp)
 	}
-	delete(db.dirties, hash)
-	db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
+	return nil
+}
+
+func (c *cleaner) Delete(key []byte) error {
+	panic("Not implemented")
 }
 
 // Size returns the current storage size of the memory cache in front of the