diff --git a/cmd/clef/README.md b/cmd/clef/README.md
index 027c22c98f1011672c9a5f87107040af02954201..c02ac44d857aa1aee463705e20f96cdc3b1a9201 100644
--- a/cmd/clef/README.md
+++ b/cmd/clef/README.md
@@ -875,3 +875,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to
 | QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: |  :+1: (partially)|
 | GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: |  :x: |
 | Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: |  :x: |
+| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: |  :+1: (approve tx only)|
diff --git a/cmd/clef/docs/qubes/clef_qubes_http.png b/cmd/clef/docs/qubes/clef_qubes_http.png
index a641e1987fdf691d3af2ae44fa3d61835b5c5f08..e95ad8da4af6fdd4f628ec4ea741c0b98147f00b 100644
Binary files a/cmd/clef/docs/qubes/clef_qubes_http.png and b/cmd/clef/docs/qubes/clef_qubes_http.png differ
diff --git a/cmd/clef/docs/qubes/clef_qubes_qrexec.png b/cmd/clef/docs/qubes/clef_qubes_qrexec.png
index f57fc8933ed57bc3dbcfa88a8e2e440cfb186e72..b1814e7c36720865ed35391993f3d38f3e38844d 100644
Binary files a/cmd/clef/docs/qubes/clef_qubes_qrexec.png and b/cmd/clef/docs/qubes/clef_qubes_qrexec.png differ
diff --git a/cmd/clef/docs/qubes/qrexec-example.png b/cmd/clef/docs/qubes/qrexec-example.png
index 0d86fde19d8bc302adcfc1d9e1a2bd70c934afe9..fc5d57725dd30f32415e543244c9c40110a31c8e 100644
Binary files a/cmd/clef/docs/qubes/qrexec-example.png and b/cmd/clef/docs/qubes/qrexec-example.png differ
diff --git a/cmd/clef/docs/qubes/qubes_newaccount-1.png b/cmd/clef/docs/qubes/qubes_newaccount-1.png
index 598dbbee7a9916087584b55898f678271d65625e..3bfc8b5b7e91330f1f773d9fef259906eab06c31 100644
Binary files a/cmd/clef/docs/qubes/qubes_newaccount-1.png and b/cmd/clef/docs/qubes/qubes_newaccount-1.png differ
diff --git a/cmd/clef/docs/qubes/qubes_newaccount-2.png b/cmd/clef/docs/qubes/qubes_newaccount-2.png
index cd762a1934a8034fe41d4a6cfd6e47978e298365..c6dbd535ddd3d556db2f173e8772c79d642eefbb 100644
Binary files a/cmd/clef/docs/qubes/qubes_newaccount-2.png and b/cmd/clef/docs/qubes/qubes_newaccount-2.png differ
diff --git a/cmd/clef/sign_flow.png b/cmd/clef/sign_flow.png
index 9c0f3cc5d58b9bd142a920525fd339e2bd0776a5..93ef81a32e8ee72d68f59dbad9db6b03ac7fb782 100644
Binary files a/cmd/clef/sign_flow.png and b/cmd/clef/sign_flow.png differ
diff --git a/cmd/clef/tutorial.md b/cmd/clef/tutorial.md
index ed86f4810eb256214c928b2452dac0ff6540f64a..dfb31ba4efacec5d00efaaba864e63994275515e 100644
--- a/cmd/clef/tutorial.md
+++ b/cmd/clef/tutorial.md
@@ -31,43 +31,51 @@ NOTE: This file does not contain your accounts. Those need to be backed up separ
 
 ## Creating rules
 
-Now, you can create a rule-file.
+Now, you can create a rule-file. Note that it is not mandatory to use predefined rules, but it's really handy.
 
 ```javascript
 function ApproveListing(){
     return "Approve"
 }
 ```
-Get the `sha256` hash....
+
+Get the `sha256` hash. If you have openssl, you can do `openssl sha256 rules.js`...
 ```text
 #sha256sum rules.js
 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72  rules.js
 ```
-...And then `attest` the file:
+...now `attest` the file...
 ```text
 #./signer attest 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
 
 INFO [02-21|12:14:38] Ruleset attestation updated              sha256=6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
 ```
-At this point, we then start the signer with the rule-file:
 
+...and (this is required only for non-production versions) load a mock-up `4byte.json` by copying the file from the source to your current working directory:
+```text
+#cp $GOPATH/src/github.com/ethereum/go-ethereum/cmd/clef/4byte.json $PWD
+```
+
+At this point, we can start the signer with the rule-file:
 ```text
-#./signer --rules rules.js
-
-INFO [02-21|12:15:18] Using CLI as UI-channel
-INFO [02-21|12:15:18] Loaded 4byte db                          signatures=5509 file=./4byte.json
-INFO [02-21|12:15:18] Could not load rulefile, rules not enabled file=rulefile
-DEBUG[02-21|12:15:18] FS scan times                            list=35.335碌s set=5.536碌s diff=5.073碌s
-DEBUG[02-21|12:15:18] Ledger support enabled
-DEBUG[02-21|12:15:18] Trezor support enabled
-INFO [02-21|12:15:18] Audit logs configured                    file=audit.log
-INFO [02-21|12:15:18] HTTP endpoint opened                     url=http://localhost:8550
+#./signer --rules rules.js --rpc
+
+INFO [09-25|20:28:11.866] Using CLI as UI-channel 
+INFO [09-25|20:28:11.876] Loaded 4byte db                          signatures=5509 file=./4byte.json
+INFO [09-25|20:28:11.877] Rule engine configured                   file=./rules.js
+DEBUG[09-25|20:28:11.877] FS scan times                            list=100.781碌s set=13.253碌s diff=5.761碌s
+DEBUG[09-25|20:28:11.884] Ledger support enabled 
+DEBUG[09-25|20:28:11.888] Trezor support enabled 
+INFO [09-25|20:28:11.888] Audit logs configured                    file=audit.log
+DEBUG[09-25|20:28:11.888] HTTP registered                          namespace=account
+INFO [09-25|20:28:11.890] HTTP endpoint opened                     url=http://localhost:8550
+DEBUG[09-25|20:28:11.890] IPC registered                           namespace=account
+INFO [09-25|20:28:11.890] IPC endpoint opened                      url=<nil>
 ------- Signer info -------
+* extapi_version : 2.0.0
+* intapi_version : 2.0.0
 * extapi_http : http://localhost:8550
 * extapi_ipc : <nil>
-* extapi_version : 2.0.0
-* intapi_version : 1.2.0
-
 ```
 
 Any list-requests will now be auto-approved by our rule-file.
@@ -107,16 +115,16 @@ The `master_seed` was then used to derive a few other things:
 
 ## Adding credentials
 
-In order to make more useful rules; sign transactions, the signer needs access to the passwords needed to unlock keystores.
+In order to make more useful rules like signing transactions, the signer needs access to the passwords needed to unlock keystores.
 
 ```text
-#./signer addpw 0x694267f14675d7e1b9494fd8d72fefe1755710fa test
+#./signer addpw "0x694267f14675d7e1b9494fd8d72fefe1755710fa" "test_password"
 
 INFO [02-21|13:43:21] Credential store updated                 key=0x694267f14675d7e1b9494fd8d72fefe1755710fa
 ```
 ## More advanced rules
 
-Now let's update the rules to make use of credentials
+Now let's update the rules to make use of credentials:
 
 ```javascript
 function ApproveListing(){
@@ -134,13 +142,15 @@ function ApproveSignData(r){
 }
 
 ```
-In this example,
-* any requests to sign data with the account `0x694...` will be
-    * auto-approved if the message contains with `bazonk`,
-    * and auto-rejected if it does not.
-    * Any other signing-requests will be passed along for manual approve/reject.
+In this example:
+* Any requests to sign data with the account `0x694...` will be
+    * auto-approved if the message contains with `bazonk`
+    * auto-rejected if it does not.
+* Any other signing-requests will be passed along for manual approve/reject.
 
-..attest the new file
+_Note: make sure that `0x694...` is an account you have access to. You can create it either via the clef or the traditional account cli tool. If the latter was chosen, make sure both clef and geth use the same keystore by specifing `--keystore path/to/your/keystore` when running clef._
+
+Attest the new file...
 ```text
 #sha256sum rules.js
 2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f  rules.js
@@ -155,21 +165,24 @@ And start the signer:
 ```
 #./signer --rules rules.js --rpc
 
-INFO [02-21|14:41:56] Using CLI as UI-channel
-INFO [02-21|14:41:56] Loaded 4byte db                          signatures=5509 file=./4byte.json
-INFO [02-21|14:41:56] Rule engine configured                   file=rules.js
-DEBUG[02-21|14:41:56] FS scan times                            list=34.607碌s set=4.509碌s diff=4.87碌s
-DEBUG[02-21|14:41:56] Ledger support enabled
-DEBUG[02-21|14:41:56] Trezor support enabled
-INFO [02-21|14:41:56] Audit logs configured                    file=audit.log
-INFO [02-21|14:41:56] HTTP endpoint opened                     url=http://localhost:8550
+INFO [09-25|21:02:16.450] Using CLI as UI-channel 
+INFO [09-25|21:02:16.466] Loaded 4byte db                          signatures=5509 file=./4byte.json
+INFO [09-25|21:02:16.467] Rule engine configured                   file=./rules.js
+DEBUG[09-25|21:02:16.468] FS scan times                            list=1.45262ms set=21.926碌s diff=6.944碌s
+DEBUG[09-25|21:02:16.473] Ledger support enabled 
+DEBUG[09-25|21:02:16.475] Trezor support enabled 
+INFO [09-25|21:02:16.476] Audit logs configured                    file=audit.log
+DEBUG[09-25|21:02:16.476] HTTP registered                          namespace=account
+INFO [09-25|21:02:16.478] HTTP endpoint opened                     url=http://localhost:8550
+DEBUG[09-25|21:02:16.478] IPC registered                           namespace=account
+INFO [09-25|21:02:16.478] IPC endpoint opened                      url=<nil>
 ------- Signer info -------
 * extapi_version : 2.0.0
-* intapi_version : 1.2.0
+* intapi_version : 2.0.0
 * extapi_http : http://localhost:8550
 * extapi_ipc : <nil>
-INFO [02-21|14:41:56] error occurred during execution          error="ReferenceError: 'OnSignerStartup' is not defined"
 ```
+
 And then test signing, once with `bazonk` and once without:
 
 ```
@@ -195,4 +208,4 @@ t=2018-02-21T14:42:41+0100 lvl=info msg=Sign       api=signer type=request  meta
 t=2018-02-21T14:42:42+0100 lvl=info msg=Sign       api=signer type=response data=93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c error=nil
 t=2018-02-21T14:42:56+0100 lvl=info msg=Sign       api=signer type=request  metadata="{\"remote\":\"127.0.0.1:49708\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=2020626f6e6b2062617a2067617a0a
 t=2018-02-21T14:42:56+0100 lvl=info msg=Sign       api=signer type=response data=                                                                                                                                   error="Request denied"
-```
+```
\ No newline at end of file
diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go
index 22b92d2149eb269d452625ab8cb0e1ffa223df02..00978ba26c60c7b6969be0ba6c007afb899c5bcb 100644
--- a/cmd/swarm/config.go
+++ b/cmd/swarm/config.go
@@ -125,7 +125,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
 	//get the account for the provided swarm account
 	prvkey := getAccount(config.BzzAccount, ctx, stack)
 	//set the resolved config path (geth --datadir)
-	config.Path = stack.InstanceDir()
+	config.Path = expandPath(stack.InstanceDir())
 	//finally, initialize the configuration
 	config.Init(prvkey)
 	//configuration phase completed here
@@ -183,7 +183,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
 
 	if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
 		if datadir := ctx.GlobalString(utils.DataDirFlag.Name); datadir != "" {
-			currentConfig.Path = datadir
+			currentConfig.Path = expandPath(datadir)
 		}
 	}
 
@@ -230,6 +230,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
 		if len(ensAPIs) == 1 && ensAPIs[0] == "" {
 			ensAPIs = nil
 		}
+		for i := range ensAPIs {
+			ensAPIs[i] = expandPath(ensAPIs[i])
+		}
+
 		currentConfig.EnsAPIs = ensAPIs
 	}
 
@@ -272,7 +276,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
 	}
 
 	if datadir := os.Getenv(GETH_ENV_DATADIR); datadir != "" {
-		currentConfig.Path = datadir
+		currentConfig.Path = expandPath(datadir)
 	}
 
 	bzzport := os.Getenv(SWARM_ENV_PORT)
diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go
index 9eae2a3f806bfb8d747a0b3ccc730e5d0e24f84b..f76cb1b984029594bc24170743dcfe7085ee1236 100644
--- a/cmd/swarm/upload.go
+++ b/cmd/swarm/upload.go
@@ -138,6 +138,12 @@ func upload(ctx *cli.Context) {
 // 3. cleans the path, e.g. /a/b/../c -> /a/c
 // Note, it has limitations, e.g. ~someuser/tmp will not be expanded
 func expandPath(p string) string {
+	if i := strings.Index(p, ":"); i > 0 {
+		return p
+	}
+	if i := strings.Index(p, "@"); i > 0 {
+		return p
+	}
 	if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
 		if home := homeDir(); home != "" {
 			p = home + p[1:]
diff --git a/swarm/api/testdata/test0/img/logo.png b/swarm/api/testdata/test0/img/logo.png
index e0fb15ab33a59ae0324983a2cfc6cc3cea59ef61..9557f960535d520687f69a9a65b35e3637828dd9 100644
Binary files a/swarm/api/testdata/test0/img/logo.png and b/swarm/api/testdata/test0/img/logo.png differ
diff --git a/swarm/network/fetcher.go b/swarm/network/fetcher.go
index 5b4b61c7e23fb134c2572fac5c90712a3fa180e8..6aed57e225ec0e31b39bc780be88b7e55f7da60e 100644
--- a/swarm/network/fetcher.go
+++ b/swarm/network/fetcher.go
@@ -32,6 +32,8 @@ var searchTimeout = 1 * time.Second
 // Also used in stream delivery.
 var RequestTimeout = 10 * time.Second
 
+var maxHopCount uint8 = 20 // maximum number of forwarded requests (hops), to make sure requests are not forwarded forever in peer loops
+
 type RequestFunc func(context.Context, *Request) (*enode.ID, chan struct{}, error)
 
 // Fetcher is created when a chunk is not found locally. It starts a request handler loop once and
@@ -44,7 +46,7 @@ type Fetcher struct {
 	protoRequestFunc RequestFunc     // request function fetcher calls to issue retrieve request for a chunk
 	addr             storage.Address // the address of the chunk to be fetched
 	offerC           chan *enode.ID  // channel of sources (peer node id strings)
-	requestC         chan struct{}
+	requestC         chan uint8      // channel for incoming requests (with the hopCount value in it)
 	skipCheck        bool
 }
 
@@ -53,6 +55,7 @@ type Request struct {
 	Source      *enode.ID       // nodeID of peer to request from (can be nil)
 	SkipCheck   bool            // whether to offer the chunk first or deliver directly
 	peersToSkip *sync.Map       // peers not to request chunk from (only makes sense if source is nil)
+	HopCount    uint8           // number of forwarded requests (hops)
 }
 
 // NewRequest returns a new instance of Request based on chunk address skip check and
@@ -113,7 +116,7 @@ func NewFetcher(addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher {
 		addr:             addr,
 		protoRequestFunc: rf,
 		offerC:           make(chan *enode.ID),
-		requestC:         make(chan struct{}),
+		requestC:         make(chan uint8),
 		skipCheck:        skipCheck,
 	}
 }
@@ -136,7 +139,7 @@ func (f *Fetcher) Offer(ctx context.Context, source *enode.ID) {
 }
 
 // Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally.
-func (f *Fetcher) Request(ctx context.Context) {
+func (f *Fetcher) Request(ctx context.Context, hopCount uint8) {
 	// First we need to have this select to make sure that we return if context is done
 	select {
 	case <-ctx.Done():
@@ -144,10 +147,15 @@ func (f *Fetcher) Request(ctx context.Context) {
 	default:
 	}
 
+	if hopCount >= maxHopCount {
+		log.Debug("fetcher request hop count limit reached", "hops", hopCount)
+		return
+	}
+
 	// This select alone would not guarantee that we return of context is done, it could potentially
 	// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
 	select {
-	case f.requestC <- struct{}{}:
+	case f.requestC <- hopCount + 1:
 	case <-ctx.Done():
 	}
 }
@@ -161,6 +169,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
 		waitC     <-chan time.Time // timer channel
 		sources   []*enode.ID      // known sources, ie. peers that offered the chunk
 		requested bool             // true if the chunk was actually requested
+		hopCount  uint8
 	)
 	gone := make(chan *enode.ID) // channel to signal that a peer we requested from disconnected
 
@@ -183,7 +192,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
 			doRequest = requested
 
 		// incoming request
-		case <-f.requestC:
+		case hopCount = <-f.requestC:
 			log.Trace("new request", "request addr", f.addr)
 			// 2) chunk is requested, set requested flag
 			// launch a request iff none been launched yet
@@ -213,7 +222,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
 		// need to issue a new request
 		if doRequest {
 			var err error
-			sources, err = f.doRequest(ctx, gone, peers, sources)
+			sources, err = f.doRequest(ctx, gone, peers, sources, hopCount)
 			if err != nil {
 				log.Info("unable to request", "request addr", f.addr, "err", err)
 			}
@@ -251,7 +260,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
 // * the peer's address is added to the set of peers to skip
 // * the peer's address is removed from prospective sources, and
 // * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer)
-func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID) ([]*enode.ID, error) {
+func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID, hopCount uint8) ([]*enode.ID, error) {
 	var i int
 	var sourceID *enode.ID
 	var quit chan struct{}
@@ -260,6 +269,7 @@ func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSki
 		Addr:        f.addr,
 		SkipCheck:   f.skipCheck,
 		peersToSkip: peersToSkip,
+		HopCount:    hopCount,
 	}
 
 	foundSource := false
diff --git a/swarm/network/fetcher_test.go b/swarm/network/fetcher_test.go
index b2316b0976928e559ca823e86bd1775f3d2de495..3a926f475873155d359d9f6ef0055f9382a8e430 100644
--- a/swarm/network/fetcher_test.go
+++ b/swarm/network/fetcher_test.go
@@ -33,7 +33,7 @@ type mockRequester struct {
 	// requests []Request
 	requestC  chan *Request   // when a request is coming it is pushed to requestC
 	waitTimes []time.Duration // with waitTimes[i] you can define how much to wait on the ith request (optional)
-	ctr       int             //counts the number of requests
+	count     int             //counts the number of requests
 	quitC     chan struct{}
 }
 
@@ -47,9 +47,9 @@ func newMockRequester(waitTimes ...time.Duration) *mockRequester {
 
 func (m *mockRequester) doRequest(ctx context.Context, request *Request) (*enode.ID, chan struct{}, error) {
 	waitTime := time.Duration(0)
-	if m.ctr < len(m.waitTimes) {
-		waitTime = m.waitTimes[m.ctr]
-		m.ctr++
+	if m.count < len(m.waitTimes) {
+		waitTime = m.waitTimes[m.count]
+		m.count++
 	}
 	time.Sleep(waitTime)
 	m.requestC <- request
@@ -83,7 +83,7 @@ func TestFetcherSingleRequest(t *testing.T) {
 	go fetcher.run(ctx, peersToSkip)
 
 	rctx := context.Background()
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	select {
 	case request := <-requester.requestC:
@@ -100,6 +100,11 @@ func TestFetcherSingleRequest(t *testing.T) {
 			t.Fatalf("request.peersToSkip does not contain peer returned by the request function")
 		}
 
+		// hopCount in the forwarded request should be incremented
+		if request.HopCount != 1 {
+			t.Fatalf("Expected request.HopCount 1 got %v", request.HopCount)
+		}
+
 		// fetch should trigger a request, if it doesn't happen in time, test should fail
 	case <-time.After(200 * time.Millisecond):
 		t.Fatalf("fetch timeout")
@@ -123,7 +128,7 @@ func TestFetcherCancelStopsFetcher(t *testing.T) {
 	rctx, rcancel := context.WithTimeout(ctx, 100*time.Millisecond)
 	defer rcancel()
 	// we call Request with an active context
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
 	select {
@@ -151,7 +156,7 @@ func TestFetcherCancelStopsRequest(t *testing.T) {
 	rcancel()
 
 	// we call Request with a cancelled context
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
 	select {
@@ -162,7 +167,7 @@ func TestFetcherCancelStopsRequest(t *testing.T) {
 
 	// if there is another Request with active context, there should be a request, because the fetcher itself is not cancelled
 	rctx = context.Background()
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	select {
 	case <-requester.requestC:
@@ -200,7 +205,7 @@ func TestFetcherOfferUsesSource(t *testing.T) {
 
 	// call Request after the Offer
 	rctx = context.Background()
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	// there should be exactly 1 request coming from fetcher
 	var request *Request
@@ -241,7 +246,7 @@ func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
 
 	// call Request first
 	rctx := context.Background()
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	// there should be a request coming from fetcher
 	var request *Request
@@ -296,7 +301,7 @@ func TestFetcherRetryOnTimeout(t *testing.T) {
 
 	// call the fetch function with an active context
 	rctx := context.Background()
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	// after 100ms the first request should be initiated
 	time.Sleep(100 * time.Millisecond)
@@ -338,7 +343,7 @@ func TestFetcherFactory(t *testing.T) {
 
 	fetcher := fetcherFactory.New(context.Background(), addr, peersToSkip)
 
-	fetcher.Request(context.Background())
+	fetcher.Request(context.Background(), 0)
 
 	// check if the created fetchFunction really starts a fetcher and initiates a request
 	select {
@@ -368,7 +373,7 @@ func TestFetcherRequestQuitRetriesRequest(t *testing.T) {
 	go fetcher.run(ctx, peersToSkip)
 
 	rctx := context.Background()
-	fetcher.Request(rctx)
+	fetcher.Request(rctx, 0)
 
 	select {
 	case <-requester.requestC:
@@ -457,3 +462,26 @@ func TestRequestSkipPeerPermanent(t *testing.T) {
 		t.Errorf("peer not skipped")
 	}
 }
+
+func TestFetcherMaxHopCount(t *testing.T) {
+	requester := newMockRequester()
+	addr := make([]byte, 32)
+	fetcher := NewFetcher(addr, requester.doRequest, true)
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	peersToSkip := &sync.Map{}
+
+	go fetcher.run(ctx, peersToSkip)
+
+	rctx := context.Background()
+	fetcher.Request(rctx, maxHopCount)
+
+	// if hopCount is already at max no request should be initiated
+	select {
+	case <-requester.requestC:
+		t.Fatalf("cancelled fetcher initiated request")
+	case <-time.After(200 * time.Millisecond):
+	}
+}
diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go
index 431136ab1d7914d9f1c9294fd101963df556de55..c2adb1009e58575a38cc2157746adc5c30e5448b 100644
--- a/swarm/network/stream/delivery.go
+++ b/swarm/network/stream/delivery.go
@@ -128,6 +128,7 @@ func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, err
 type RetrieveRequestMsg struct {
 	Addr      storage.Address
 	SkipCheck bool
+	HopCount  uint8
 }
 
 func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *RetrieveRequestMsg) error {
@@ -148,7 +149,9 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
 
 	var cancel func()
 	// TODO: do something with this hardcoded timeout, maybe use TTL in the future
-	ctx, cancel = context.WithTimeout(context.WithValue(ctx, "peer", sp.ID().String()), network.RequestTimeout)
+	ctx = context.WithValue(ctx, "peer", sp.ID().String())
+	ctx = context.WithValue(ctx, "hopcount", req.HopCount)
+	ctx, cancel = context.WithTimeout(ctx, network.RequestTimeout)
 
 	go func() {
 		select {
@@ -247,6 +250,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
 	err := sp.SendPriority(ctx, &RetrieveRequestMsg{
 		Addr:      req.Addr,
 		SkipCheck: req.SkipCheck,
+		HopCount:  req.HopCount,
 	}, Top)
 	if err != nil {
 		return nil, nil, err
diff --git a/swarm/network/stream/messages.go b/swarm/network/stream/messages.go
index 1e47b7cf9a1b379579ff1ab04366dcdcbf16879f..74c785d587bf461763d42c67719369bcf4ee0acb 100644
--- a/swarm/network/stream/messages.go
+++ b/swarm/network/stream/messages.go
@@ -26,7 +26,7 @@ import (
 	bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
 	"github.com/ethereum/go-ethereum/swarm/spancontext"
 	"github.com/ethereum/go-ethereum/swarm/storage"
-	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go"
 )
 
 var syncBatchTimeout = 30 * time.Second
@@ -197,10 +197,16 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
 	if err != nil {
 		return err
 	}
+
 	hashes := req.Hashes
-	want, err := bv.New(len(hashes) / HashSize)
+	lenHashes := len(hashes)
+	if lenHashes%HashSize != 0 {
+		return fmt.Errorf("error invalid hashes length (len: %v)", lenHashes)
+	}
+
+	want, err := bv.New(lenHashes / HashSize)
 	if err != nil {
-		return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
+		return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
 	}
 
 	ctr := 0
@@ -208,7 +214,7 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
 	ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
 
 	ctx = context.WithValue(ctx, "source", p.ID().String())
-	for i := 0; i < len(hashes); i += HashSize {
+	for i := 0; i < lenHashes; i += HashSize {
 		hash := hashes[i : i+HashSize]
 
 		if wait := c.NeedData(ctx, hash); wait != nil {
diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go
index 3b1b11d3640b51c4781d2052c4e84c0709957fb7..1eda06c6a77197c87cc39c9d2957d89fca1d71da 100644
--- a/swarm/network/stream/stream.go
+++ b/swarm/network/stream/stream.go
@@ -642,7 +642,7 @@ func (c *clientParams) clientCreated() {
 // Spec is the spec of the streamer protocol
 var Spec = &protocols.Spec{
 	Name:       "stream",
-	Version:    6,
+	Version:    7,
 	MaxMsgSize: 10 * 1024 * 1024,
 	Messages: []interface{}{
 		UnsubscribeMsg{},
diff --git a/swarm/network/stream/streamer_test.go b/swarm/network/stream/streamer_test.go
index 04366cd399031df91698287bc7af3730745d7b22..0bdebefa7d3c91ee31b2164f475225b1f15c56eb 100644
--- a/swarm/network/stream/streamer_test.go
+++ b/swarm/network/stream/streamer_test.go
@@ -19,6 +19,7 @@ package stream
 import (
 	"bytes"
 	"context"
+	"errors"
 	"strconv"
 	"testing"
 	"time"
@@ -56,11 +57,12 @@ func TestStreamerRequestSubscription(t *testing.T) {
 }
 
 var (
-	hash0     = sha3.Sum256([]byte{0})
-	hash1     = sha3.Sum256([]byte{1})
-	hash2     = sha3.Sum256([]byte{2})
-	hashesTmp = append(hash0[:], hash1[:]...)
-	hashes    = append(hashesTmp, hash2[:]...)
+	hash0         = sha3.Sum256([]byte{0})
+	hash1         = sha3.Sum256([]byte{1})
+	hash2         = sha3.Sum256([]byte{2})
+	hashesTmp     = append(hash0[:], hash1[:]...)
+	hashes        = append(hashesTmp, hash2[:]...)
+	corruptHashes = append(hashes[:40])
 )
 
 type testClient struct {
@@ -460,6 +462,71 @@ func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) {
 	}
 }
 
+func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) {
+	tester, streamer, _, teardown, err := newStreamerTester(t, nil)
+	defer teardown()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	stream := NewStream("foo", "", true)
+
+	var tc *testClient
+
+	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
+		tc = newTestClient(t)
+		return tc, nil
+	})
+
+	node := tester.Nodes[0]
+
+	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
+	if err != nil {
+		t.Fatalf("Expected no error, got %v", err)
+	}
+
+	err = tester.TestExchanges(p2ptest.Exchange{
+		Label: "Subscribe message",
+		Expects: []p2ptest.Expect{
+			{
+				Code: 4,
+				Msg: &SubscribeMsg{
+					Stream:   stream,
+					History:  NewRange(5, 8),
+					Priority: Top,
+				},
+				Peer: node.ID(),
+			},
+		},
+	},
+		p2ptest.Exchange{
+			Label: "Corrupt offered hash message",
+			Triggers: []p2ptest.Trigger{
+				{
+					Code: 1,
+					Msg: &OfferedHashesMsg{
+						HandoverProof: &HandoverProof{
+							Handover: &Handover{},
+						},
+						Hashes: corruptHashes,
+						From:   5,
+						To:     8,
+						Stream: stream,
+					},
+					Peer: node.ID(),
+				},
+			},
+		})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedError := errors.New("Message handler error: (msg code 1): error invalid hashes length (len: 40)")
+	if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: tester.Nodes[0].ID(), Error: expectedError}); err != nil {
+		t.Fatal(err)
+	}
+}
+
 func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) {
 	tester, streamer, _, teardown, err := newStreamerTester(t, nil)
 	defer teardown()
diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go
index 936132d401e31106f769055ed35ee5fa5b334a1f..7a5a9e4d987a0f6b2a42a685967342a57992ea74 100644
--- a/swarm/storage/mru/testutil.go
+++ b/swarm/storage/mru/testutil.go
@@ -40,7 +40,7 @@ func (t *TestHandler) Close() {
 
 type mockNetFetcher struct{}
 
-func (m *mockNetFetcher) Request(ctx context.Context) {
+func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) {
 }
 func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) {
 }
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index a3a552232118c585a596c4a40503c4beb4fdab73..16bc48a9a2cd17bd9adee5a533b73e83da429249 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -34,7 +34,7 @@ type (
 )
 
 type NetFetcher interface {
-	Request(ctx context.Context)
+	Request(ctx context.Context, hopCount uint8)
 	Offer(ctx context.Context, source *enode.ID)
 }
 
@@ -263,6 +263,9 @@ func (f *fetcher) Fetch(rctx context.Context) (Chunk, error) {
 
 	// If there is a source in the context then it is an offer, otherwise a request
 	sourceIF := rctx.Value("source")
+
+	hopCount, _ := rctx.Value("hopcount").(uint8)
+
 	if sourceIF != nil {
 		var source enode.ID
 		if err := source.UnmarshalText([]byte(sourceIF.(string))); err != nil {
@@ -270,7 +273,7 @@ func (f *fetcher) Fetch(rctx context.Context) (Chunk, error) {
 		}
 		f.netFetcher.Offer(rctx, &source)
 	} else {
-		f.netFetcher.Request(rctx)
+		f.netFetcher.Request(rctx, hopCount)
 	}
 
 	// wait until either the chunk is delivered or the context is done
diff --git a/swarm/storage/netstore_test.go b/swarm/storage/netstore_test.go
index b734c117bac4d12fcdd0b063216f62628d934ac5..8a09fa5ae0792960c3c35404dfc6409d910f1abd 100644
--- a/swarm/storage/netstore_test.go
+++ b/swarm/storage/netstore_test.go
@@ -40,6 +40,7 @@ type mockNetFetcher struct {
 	offerCalled     bool
 	quit            <-chan struct{}
 	ctx             context.Context
+	hopCounts       []uint8
 }
 
 func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) {
@@ -47,7 +48,7 @@ func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) {
 	m.sources = append(m.sources, source)
 }
 
-func (m *mockNetFetcher) Request(ctx context.Context) {
+func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) {
 	m.requestCalled = true
 	var peers []Address
 	m.peers.Range(func(key interface{}, _ interface{}) bool {
@@ -55,6 +56,7 @@ func (m *mockNetFetcher) Request(ctx context.Context) {
 		return true
 	})
 	m.peersPerRequest = append(m.peersPerRequest, peers)
+	m.hopCounts = append(m.hopCounts, hopCount)
 }
 
 type mockNetFetchFuncFactory struct {
@@ -412,7 +414,8 @@ func TestNetStoreGetCallsRequest(t *testing.T) {
 
 	chunk := GenerateRandomChunk(ch.DefaultSize)
 
-	ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
+	ctx := context.WithValue(context.Background(), "hopcount", uint8(5))
+	ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
 	defer cancel()
 
 	// We call get for a not available chunk, it will timeout because the chunk is not delivered
@@ -426,6 +429,10 @@ func TestNetStoreGetCallsRequest(t *testing.T) {
 	if !fetcher.requestCalled {
 		t.Fatal("Expected NetFetcher.Request to be called")
 	}
+
+	if fetcher.hopCounts[0] != 5 {
+		t.Fatalf("Expected NetFetcher.Request be called with hopCount 5, got %v", fetcher.hopCounts[0])
+	}
 }
 
 // TestNetStoreGetCallsOffer tests if Get created a request on the NetFetcher for an unavailable chunk