diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 1f6f2bb2955e0ce0c7115cb9fb4b990b20352860..bf5e81d45e45bb9bc813469f95bc3b17dd799221 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -91,6 +91,11 @@ func init() {
 		utils.BootnodesFlag,
 		utils.DataDirFlag,
 		utils.KeyStoreDirFlag,
+		utils.EthashCacheDirFlag,
+		utils.EthashCachesInMemoryFlag,
+		utils.EthashCachesOnDiskFlag,
+		utils.EthashDatasetDirFlag,
+		utils.EthashDatasetsOnDiskFlag,
 		utils.FastSyncFlag,
 		utils.LightModeFlag,
 		utils.LightServFlag,
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 9349857e91503658e4e6f5df47202757e4d0ef87..34910daa55f0005368c18965641ffda5ed46e670 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -77,6 +77,16 @@ var AppHelpFlagGroups = []flagGroup{
 			utils.LightKDFFlag,
 		},
 	},
+	{
+		Name: "ETHASH",
+		Flags: []cli.Flag{
+			utils.EthashCacheDirFlag,
+			utils.EthashCachesInMemoryFlag,
+			utils.EthashCachesOnDiskFlag,
+			utils.EthashDatasetDirFlag,
+			utils.EthashDatasetsOnDiskFlag,
+		},
+	},
 	{
 		Name: "PERFORMANCE TUNING",
 		Flags: []cli.Flag{
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 00599e82a4d7ea6c7ffff6c7bb5bc6eb1f2c4bed..b92be84640ac526bbaf4f7c0e3db862e5aa1cec7 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -23,6 +23,7 @@ import (
 	"io/ioutil"
 	"math/big"
 	"os"
+	"os/user"
 	"path/filepath"
 	"runtime"
 	"strconv"
@@ -113,6 +114,29 @@ var (
 		Name:  "keystore",
 		Usage: "Directory for the keystore (default = inside the datadir)",
 	}
+	EthashCacheDirFlag = DirectoryFlag{
+		Name:  "ethash.cachedir",
+		Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
+	}
+	EthashCachesInMemoryFlag = cli.IntFlag{
+		Name:  "ethash.cachesinmem",
+		Usage: "Number of recent ethash caches to keep in memory (16MB each)",
+		Value: 2,
+	}
+	EthashCachesOnDiskFlag = cli.IntFlag{
+		Name:  "ethash.cachesondisk",
+		Usage: "Number of recent ethash caches to keep on disk (16MB each)",
+		Value: 3,
+	}
+	EthashDatasetDirFlag = DirectoryFlag{
+		Name:  "ethash.dagdir",
+		Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
+	}
+	EthashDatasetsOnDiskFlag = cli.IntFlag{
+		Name:  "ethash.dagsondisk",
+		Usage: "Number of ethash mining DAGs to keep on disk (1+GB each)",
+		Value: 2,
+	}
 	NetworkIdFlag = cli.IntFlag{
 		Name:  "networkid",
 		Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten)",
@@ -431,6 +455,36 @@ func MakeDataDir(ctx *cli.Context) string {
 	return ""
 }
 
+// MakeEthashCacheDir returns the directory to use for storing the ethash cache
+// dumps.
+func MakeEthashCacheDir(ctx *cli.Context) string {
+	if ctx.GlobalIsSet(EthashCacheDirFlag.Name) && ctx.GlobalString(EthashCacheDirFlag.Name) == "" {
+		return ""
+	}
+	if !ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
+		return "ethash"
+	}
+	return ctx.GlobalString(EthashCacheDirFlag.Name)
+}
+
+// MakeEthashDatasetDir returns the directory to use for storing the full ethash
+// dataset dumps.
+func MakeEthashDatasetDir(ctx *cli.Context) string {
+	if !ctx.GlobalIsSet(EthashDatasetDirFlag.Name) {
+		home := os.Getenv("HOME")
+		if home == "" {
+			if user, err := user.Current(); err == nil {
+				home = user.HomeDir
+			}
+		}
+		if runtime.GOOS == "windows" {
+			return filepath.Join(home, "AppData", "Ethash")
+		}
+		return filepath.Join(home, ".ethash")
+	}
+	return ctx.GlobalString(EthashDatasetDirFlag.Name)
+}
+
 // MakeIPCPath creates an IPC path configuration from the set command line flags,
 // returning an empty string if IPC was explicitly disabled, or the set path.
 func MakeIPCPath(ctx *cli.Context) string {
@@ -751,6 +805,11 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 		GpobaseStepUp:           ctx.GlobalInt(GpobaseStepUpFlag.Name),
 		GpobaseCorrectionFactor: ctx.GlobalInt(GpobaseCorrectionFactorFlag.Name),
 		SolcPath:                ctx.GlobalString(SolcPathFlag.Name),
+		EthashCacheDir:          MakeEthashCacheDir(ctx),
+		EthashCachesInMem:       ctx.GlobalInt(EthashCachesInMemoryFlag.Name),
+		EthashCachesOnDisk:      ctx.GlobalInt(EthashCachesOnDiskFlag.Name),
+		EthashDatasetDir:        MakeEthashDatasetDir(ctx),
+		EthashDatasetsOnDisk:    ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name),
 		AutoDAG:                 ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
 		EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name),
 	}
@@ -923,7 +982,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
 
 	seal := pow.PoW(pow.FakePow{})
 	if !ctx.GlobalBool(FakePoWFlag.Name) {
-		seal = pow.NewFullEthash("", "")
+		seal = pow.NewFullEthash("", 1, 0, "", 0)
 	}
 	chain, err = core.NewBlockChain(chainDb, chainConfig, seal, new(event.TypeMux), vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)})
 	if err != nil {
diff --git a/eth/backend.go b/eth/backend.go
index 024520b13030a9c70e0dacf9189605f642529f25..d49251d75c3da310ca6ff5f4957353aee3d623fb 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -84,6 +84,12 @@ type Config struct {
 	PowShared bool
 	ExtraData []byte
 
+	EthashCacheDir       string
+	EthashCachesInMem    int
+	EthashCachesOnDisk   int
+	EthashDatasetDir     string
+	EthashDatasetsOnDisk int
+
 	Etherbase    common.Address
 	GasPrice     *big.Int
 	MinerThreads int
@@ -157,16 +163,11 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
 	if err := SetupGenesisBlock(&chainDb, config); err != nil {
 		return nil, err
 	}
-	pow, err := CreatePoW(config)
-	if err != nil {
-		return nil, err
-	}
-
 	eth := &Ethereum{
 		chainDb:        chainDb,
 		eventMux:       ctx.EventMux,
 		accountManager: ctx.AccountManager,
-		pow:            pow,
+		pow:            CreatePoW(ctx, config),
 		shutdownChan:   make(chan bool),
 		stopDbUpgrade:  stopDbUpgrade,
 		netVersionId:   config.NetworkId,
@@ -284,19 +285,20 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
 }
 
 // CreatePoW creates the required type of PoW instance for an Ethereum service
-func CreatePoW(config *Config) (pow.PoW, error) {
+func CreatePoW(ctx *node.ServiceContext, config *Config) pow.PoW {
 	switch {
 	case config.PowFake:
 		log.Warn("Ethash used in fake mode")
-		return pow.FakePow{}, nil
+		return pow.FakePow{}
 	case config.PowTest:
 		log.Warn("Ethash used in test mode")
-		return pow.NewTestEthash(), nil
+		return pow.NewTestEthash()
 	case config.PowShared:
 		log.Warn("Ethash used in shared mode")
-		return pow.NewSharedEthash(), nil
+		return pow.NewSharedEthash()
 	default:
-		return pow.NewFullEthash("", ""), nil
+		return pow.NewFullEthash(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
+			config.EthashDatasetDir, config.EthashDatasetsOnDisk)
 	}
 }
 
diff --git a/les/backend.go b/les/backend.go
index e96fc9df0b8ada4d338e23045d490c6d3c9acc28..404728c0e153f0a475bee06e46abd244c4277f88 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -77,11 +77,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
 	if err := eth.SetupGenesisBlock(&chainDb, config); err != nil {
 		return nil, err
 	}
-	pow, err := eth.CreatePoW(config)
-	if err != nil {
-		return nil, err
-	}
-
 	odr := NewLesOdr(chainDb)
 	relay := NewLesTxRelay()
 	eth := &LightEthereum{
@@ -90,7 +85,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
 		chainDb:        chainDb,
 		eventMux:       ctx.EventMux,
 		accountManager: ctx.AccountManager,
-		pow:            pow,
+		pow:            eth.CreatePoW(ctx, config),
 		shutdownChan:   make(chan bool),
 		netVersionId:   config.NetworkId,
 		solcPath:       config.SolcPath,
diff --git a/node/service.go b/node/service.go
index 1cd1fe808e62c75493b39cff0c3fabb21e3bcc8f..5e1eb0e6454585dbb924369540978d20176af64e 100644
--- a/node/service.go
+++ b/node/service.go
@@ -46,6 +46,13 @@ func (ctx *ServiceContext) OpenDatabase(name string, cache int, handles int) (et
 	return ethdb.NewLDBDatabase(ctx.config.resolvePath(name), cache, handles)
 }
 
+// ResolvePath resolves a user path into the data directory if that was relative
+// and if the user actually uses persistent storage. It will return an empty string
+// for emphemeral storage and the user's own input for absolute paths.
+func (ctx *ServiceContext) ResolvePath(path string) string {
+	return ctx.config.resolvePath(path)
+}
+
 // Service retrieves a currently running service registered of a specific type.
 func (ctx *ServiceContext) Service(service interface{}) error {
 	element := reflect.ValueOf(service).Elem()
diff --git a/pow/ethash.go b/pow/ethash.go
index b22c65e0bd844a0e7fb7668f945cb7f454e0e2a7..602f9324f8282d1b492e687382345391797ff3b6 100644
--- a/pow/ethash.go
+++ b/pow/ethash.go
@@ -17,12 +17,19 @@
 package pow
 
 import (
+	"bufio"
 	"bytes"
 	"errors"
+	"fmt"
+	"io/ioutil"
 	"math/big"
+	"os"
+	"path/filepath"
 	"sync"
 	"time"
 
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
 	"github.com/ethereum/go-ethereum/log"
 	metrics "github.com/rcrowley/go-metrics"
 )
@@ -36,6 +43,15 @@ var (
 var (
 	// maxUint256 is a big integer representing 2^256-1
 	maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
+
+	// sharedEthash is a full instance that can be shared between multiple users.
+	sharedEthash = NewFullEthash("", 3, 0, "", 0)
+
+	// algorithmRevision is the data structure version used for file naming.
+	algorithmRevision = 23
+
+	// dumpMagic is a dataset dump header to sanity check a data dump.
+	dumpMagic = hexutil.MustDecode("0xfee1deadbaddcafe")
 )
 
 // cache wraps an ethash cache with some metadata to allow easier concurrent use.
@@ -48,21 +64,65 @@ type cache struct {
 }
 
 // generate ensures that the cache content is generates.
-func (c *cache) generate(test bool) {
+func (c *cache) generate(dir string, limit int, test bool) {
 	c.once.Do(func() {
-		cacheSize := cacheSize(c.epoch*epochLength + 1)
+		// If we have a testing cache, generate and return
 		if test {
-			cacheSize = 1024
+			rawCache := generateCache(1024, seedHash(c.epoch*epochLength+1))
+			c.cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
+			return
+		}
+		// Full cache generation is needed, check cache dir for existing data
+		size := cacheSize(c.epoch*epochLength + 1)
+		seed := seedHash(c.epoch*epochLength + 1)
+
+		path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
+		logger := log.New("seed", hexutil.Bytes(seed))
+
+		if dir != "" {
+			dump, err := os.Open(path)
+			if err == nil {
+				logger.Info("Loading ethash cache from disk")
+				start := time.Now()
+				c.cache = prepare(size, bufio.NewReader(dump))
+				logger.Info("Loaded ethash cache from disk", "elapsed", common.PrettyDuration(time.Since(start)))
+
+				dump.Close()
+				return
+			}
+		}
+		// No previous disk cache was available, generate on the fly
+		rawCache := generateCache(size, seed)
+		c.cache = prepare(size, bytes.NewReader(rawCache))
+
+		// If a cache directory is given, attempt to serialize for next time
+		if dir != "" {
+			// Store the ethash cache to disk
+			start := time.Now()
+			if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
+				logger.Error("Failed to create ethash cache dir", "err", err)
+			} else if err := ioutil.WriteFile(path, rawCache, os.ModePerm); err != nil {
+				logger.Error("Failed to write ethash cache to disk", "err", err)
+			} else {
+				logger.Info("Stored ethash cache to disk", "elapsed", common.PrettyDuration(time.Since(start)))
+			}
+			// Iterate over all previous instances and delete old ones
+			for ep := int(c.epoch) - limit; ep >= 0; ep-- {
+				seed := seedHash(uint64(ep)*epochLength + 1)
+				path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
+				os.Remove(path)
+			}
 		}
-		rawCache := generateCache(cacheSize, seedHash(c.epoch*epochLength+1))
-		c.cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
 	})
 }
 
 // Ethash is a PoW data struture implementing the ethash algorithm.
 type Ethash struct {
-	cachedir string // Data directory to store the verification caches
-	dagdir   string // Data directory to store full mining datasets
+	cachedir     string // Data directory to store the verification caches
+	cachesinmem  int    // Number of caches to keep in memory
+	cachesondisk int    // Number of caches to keep on disk
+	dagdir       string // Data directory to store full mining datasets
+	dagsondisk   int    // Number of mining datasets to keep on disk
 
 	caches map[uint64]*cache // In memory caches to avoid regenerating too often
 	future *cache            // Pre-generated cache for the estimated future epoch
@@ -71,15 +131,27 @@ type Ethash struct {
 	hashrate *metrics.StandardMeter // Meter tracking the average hashrate
 
 	tester bool // Flag whether to use a smaller test dataset
-	shared bool // Flag whether to use a global chared dataset
 }
 
 // NewFullEthash creates a full sized ethash PoW scheme.
-func NewFullEthash(cachedir, dagdir string) PoW {
+func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsondisk int) PoW {
+	if cachesinmem <= 0 {
+		log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
+		cachesinmem = 1
+	}
+	if cachedir != "" && cachesondisk > 0 {
+		log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk)
+	}
+	if dagdir != "" && dagsondisk > 0 {
+		log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk)
+	}
 	return &Ethash{
-		cachedir: cachedir,
-		dagdir:   dagdir,
-		caches:   make(map[uint64]*cache),
+		cachedir:     cachedir,
+		cachesinmem:  cachesinmem,
+		cachesondisk: cachesondisk,
+		dagdir:       dagdir,
+		dagsondisk:   dagsondisk,
+		caches:       make(map[uint64]*cache),
 	}
 }
 
@@ -87,18 +159,16 @@ func NewFullEthash(cachedir, dagdir string) PoW {
 // purposes.
 func NewTestEthash() PoW {
 	return &Ethash{
-		caches: make(map[uint64]*cache),
-		tester: true,
+		cachesinmem: 1,
+		caches:      make(map[uint64]*cache),
+		tester:      true,
 	}
 }
 
 // NewSharedEthash creates a full sized ethash PoW shared between all requesters
 // running in the same process.
 func NewSharedEthash() PoW {
-	return &Ethash{
-		caches: make(map[uint64]*cache),
-		shared: true,
-	}
+	return sharedEthash
 }
 
 // Verify implements PoW, checking whether the given block satisfies the PoW
@@ -140,7 +210,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
 	current, future := ethash.caches[epoch], (*cache)(nil)
 	if current == nil {
 		// No in-memory cache, evict the oldest if the cache limit was reached
-		for len(ethash.caches) >= 3 {
+		for len(ethash.caches) >= ethash.cachesinmem {
 			var evict *cache
 			for _, cache := range ethash.caches {
 				if evict == nil || evict.used.After(cache.used) {
@@ -149,21 +219,21 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
 			}
 			delete(ethash.caches, evict.epoch)
 
-			log.Debug("Evictinged ethash cache", "old", evict.epoch, "used", evict.used)
+			log.Debug("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
 		}
 		// If we have the new cache pre-generated, use that, otherwise create a new one
 		if ethash.future != nil && ethash.future.epoch == epoch {
 			log.Debug("Using pre-generated cache", "epoch", epoch)
 			current, ethash.future = ethash.future, nil
 		} else {
-			log.Debug("Generating new ethash cache", "epoch", epoch)
+			log.Debug("Requiring new ethash cache", "epoch", epoch)
 			current = &cache{epoch: epoch}
 		}
 		ethash.caches[epoch] = current
 
 		// If we just used up the future cache, or need a refresh, regenerate
 		if ethash.future == nil || ethash.future.epoch <= epoch {
-			log.Debug("Pre-generating cache for the future", "epoch", epoch+1)
+			log.Debug("Requiring new future ethash cache", "epoch", epoch+1)
 			future = &cache{epoch: epoch + 1}
 			ethash.future = future
 		}
@@ -172,16 +242,15 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
 	ethash.lock.Unlock()
 
 	// Wait for generation finish, bump the timestamp and finalize the cache
-	current.once.Do(func() {
-		current.generate(ethash.tester)
-	})
+	current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
+
 	current.lock.Lock()
 	current.used = time.Now()
 	current.lock.Unlock()
 
 	// If we exhusted the future cache, now's a goot time to regenerate it
 	if future != nil {
-		go future.generate(ethash.tester)
+		go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
 	}
 	return current.cache
 }
diff --git a/pow/ethash_algo.go b/pow/ethash_algo.go
index fcd6af995241fb54bc035469521fab8b92bf8168..f6d05880a2dbbb6bb8080ffcc1ef2f93397ca361 100644
--- a/pow/ethash_algo.go
+++ b/pow/ethash_algo.go
@@ -45,12 +45,6 @@ const (
 	loopAccesses       = 64      // Number of accesses in hashimoto loop
 )
 
-var (
-	// Metadata fields to be compatible with the C++ ethash
-	ethashRevision = 23                                       // Data structure version
-	ethashMagic    = hexutil.MustDecode("0xfee1deadbaddcafe") // Dataset dump magic number
-)
-
 // cacheSize calculates and returns the size of the ethash verification cache that
 // belongs to a certain block number. The cache size grows linearly, however, we
 // always take the highest prime below the linearly growing threshold in order to
@@ -108,16 +102,33 @@ func seedHash(block uint64) []byte {
 // set of 524288 64-byte values.
 func generateCache(size uint64, seed []byte) []byte {
 	// Print some debug logs to allow analysis on low end devices
-	logger := log.New("size", size, "seed", hexutil.Bytes(seed))
-	logger.Debug("Generating ethash cache")
+	logger := log.New("seed", hexutil.Bytes(seed))
+	logger.Debug("Generating ethash verification cache")
 
-	defer func(start time.Time) {
-		logger.Debug("Generated ethash cache", "elapsed", common.PrettyDuration(time.Since(start)))
-	}(time.Now())
+	start := time.Now()
+	defer func() {
+		logger.Info("Generated ethash verification cache", "elapsed", common.PrettyDuration(time.Since(start)))
+	}()
 
 	// Calculate the number of thoretical rows (we'll store in one buffer nonetheless)
 	rows := int(size) / hashBytes
 
+	// Start a monitoring goroutine to report progress on low end devices
+	var progress uint32
+
+	done := make(chan struct{})
+	defer close(done)
+
+	go func() {
+		for {
+			select {
+			case <-done:
+				return
+			case <-time.After(3 * time.Second):
+				logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start)))
+			}
+		}
+	}()
 	// Create a hasher to reuse between invocations
 	keccak512 := crypto.Keccak512Hasher()
 
@@ -126,6 +137,7 @@ func generateCache(size uint64, seed []byte) []byte {
 	copy(cache, keccak512(seed))
 	for offset := uint64(hashBytes); offset < size; offset += hashBytes {
 		copy(cache[offset:], keccak512(cache[offset-hashBytes:offset]))
+		atomic.AddUint32(&progress, 1)
 	}
 	// Use a low-round version of randmemohash
 	temp := make([]byte, hashBytes)
@@ -139,6 +151,8 @@ func generateCache(size uint64, seed []byte) []byte {
 			)
 			xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
 			copy(cache[dstOff:], keccak512(temp))
+
+			atomic.AddUint32(&progress, 1)
 		}
 	}
 	return cache