From 996755c4a832afce8629a771cab8879c88c98355 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jano=C5=A1=20Gulja=C5=A1?= <janos@users.noreply.github.com>
Date: Wed, 10 Apr 2019 16:50:58 +0200
Subject: [PATCH] cmd/swarm, swarm: LocalStore storage integration

---
 cmd/swarm/config.go                           |    6 +-
 cmd/swarm/config_test.go                      |    4 +-
 cmd/swarm/db.go                               |  118 +-
 cmd/swarm/export_test.go                      |  164 ++
 cmd/swarm/flags.go                            |    4 +
 cmd/swarm/testdata/datastore_fixture.go       | 1390 +++++++++++++++++
 swarm/api/config.go                           |   14 +-
 swarm/api/config_test.go                      |    1 -
 swarm/api/http/test_server.go                 |    9 +-
 swarm/api/inspector.go                        |    6 +-
 swarm/api/manifest.go                         |    1 -
 swarm/chunk/chunk.go                          |  105 +-
 swarm/network/stream/common_test.go           |   93 +-
 swarm/network/stream/delivery.go              |   58 +-
 swarm/network/stream/delivery_test.go         |   15 +-
 swarm/network/stream/intervals_test.go        |    5 +-
 .../network/stream/snapshot_retrieval_test.go |    5 +-
 swarm/network/stream/snapshot_sync_test.go    |   29 +-
 swarm/network/stream/stream.go                |   13 +-
 swarm/network/stream/syncer.go                |  124 +-
 swarm/network/stream/syncer_test.go           |   61 +-
 swarm/shed/index.go                           |    8 +-
 swarm/shed/schema.go                          |    2 +-
 swarm/storage/common_test.go                  |   65 +-
 swarm/storage/database.go                     |   82 -
 swarm/storage/feed/handler.go                 |   12 +-
 swarm/storage/feed/handler_test.go            |   13 +-
 swarm/storage/feed/testutil.go                |   15 +-
 swarm/storage/filestore.go                    |   10 +-
 swarm/storage/filestore_test.go               |   60 +-
 swarm/storage/hasherstore.go                  |    7 +-
 swarm/storage/hasherstore_test.go             |    6 +-
 swarm/storage/ldbstore.go                     | 1082 -------------
 swarm/storage/ldbstore_test.go                |  788 ----------
 swarm/storage/localstore.go                   |  251 ---
 swarm/storage/localstore/export.go            |  204 +++
 swarm/storage/localstore/export_test.go       |   80 +
 swarm/storage/localstore/gc_test.go           |   48 +-
 swarm/storage/localstore/index_test.go        |   49 +-
 swarm/storage/localstore/localstore.go        |   70 +-
 swarm/storage/localstore/localstore_test.go   |   55 +-
 swarm/storage/localstore/mode_get.go          |   43 +-
 swarm/storage/localstore/mode_get_test.go     |   67 +-
 swarm/storage/localstore/mode_has.go          |   19 +-
 swarm/storage/localstore/mode_has_test.go     |   13 +-
 swarm/storage/localstore/mode_put.go          |  108 +-
 swarm/storage/localstore/mode_put_test.go     |  116 +-
 swarm/storage/localstore/mode_set.go          |   53 +-
 swarm/storage/localstore/mode_set_test.go     |   35 +-
 .../localstore/retrieval_index_test.go        |   17 +-
 swarm/storage/localstore/schema.go            |   52 +
 swarm/storage/localstore/subscription_pull.go |   75 +-
 .../localstore/subscription_pull_test.go      |  243 ++-
 .../localstore/subscription_push_test.go      |   16 +-
 swarm/storage/localstore_test.go              |  244 ---
 swarm/storage/memstore.go                     |   92 --
 swarm/storage/memstore_test.go                |  158 --
 swarm/storage/netstore.go                     |   45 +-
 swarm/storage/netstore_test.go                |  130 +-
 swarm/storage/schema.go                       |   17 -
 swarm/storage/types.go                        |   44 +-
 swarm/swarm.go                                |   47 +-
 62 files changed, 3092 insertions(+), 3674 deletions(-)
 create mode 100644 cmd/swarm/testdata/datastore_fixture.go
 delete mode 100644 swarm/storage/database.go
 delete mode 100644 swarm/storage/ldbstore.go
 delete mode 100644 swarm/storage/ldbstore_test.go
 delete mode 100644 swarm/storage/localstore.go
 create mode 100644 swarm/storage/localstore/export.go
 create mode 100644 swarm/storage/localstore/export_test.go
 create mode 100644 swarm/storage/localstore/schema.go
 delete mode 100644 swarm/storage/localstore_test.go
 delete mode 100644 swarm/storage/memstore.go
 delete mode 100644 swarm/storage/memstore_test.go
 delete mode 100644 swarm/storage/schema.go

diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go
index 32cd442a0..e4b333549 100644
--- a/cmd/swarm/config.go
+++ b/cmd/swarm/config.go
@@ -252,15 +252,15 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
 	}
 
 	if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
-		currentConfig.LocalStoreParams.ChunkDbPath = storePath
+		currentConfig.ChunkDbPath = storePath
 	}
 
 	if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
-		currentConfig.LocalStoreParams.DbCapacity = storeCapacity
+		currentConfig.DbCapacity = storeCapacity
 	}
 
 	if ctx.GlobalIsSet(SwarmStoreCacheCapacity.Name) {
-		currentConfig.LocalStoreParams.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
+		currentConfig.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
 	}
 
 	if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) {
diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go
index 869edd0f7..484f6dec3 100644
--- a/cmd/swarm/config_test.go
+++ b/cmd/swarm/config_test.go
@@ -447,8 +447,8 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
 		t.Fatal("Expected Sync to be disabled, but is true")
 	}
 
-	if info.LocalStoreParams.DbCapacity != 9000000 {
-		t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
+	if info.DbCapacity != 9000000 {
+		t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.DbCapacity)
 	}
 
 	if info.HiveParams.KeepAliveInterval != 6000000000 {
diff --git a/cmd/swarm/db.go b/cmd/swarm/db.go
index 7916beffc..b0e9f367f 100644
--- a/cmd/swarm/db.go
+++ b/cmd/swarm/db.go
@@ -17,6 +17,10 @@
 package main
 
 import (
+	"archive/tar"
+	"bytes"
+	"encoding/binary"
+	"encoding/hex"
 	"fmt"
 	"io"
 	"os"
@@ -25,10 +29,22 @@ import (
 	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/log"
-	"github.com/ethereum/go-ethereum/swarm/storage"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
+	"github.com/syndtr/goleveldb/leveldb"
+	"github.com/syndtr/goleveldb/leveldb/opt"
 	"gopkg.in/urfave/cli.v1"
 )
 
+var legacyKeyIndex = byte(0)
+var keyData = byte(6)
+
+type dpaDBIndex struct {
+	Idx    uint64
+	Access uint64
+}
+
 var dbCommand = cli.Command{
 	Name:               "db",
 	CustomHelpTemplate: helpTemplate,
@@ -67,6 +83,9 @@ The import may be quite large, consider piping the input through the Unix
 pv(1) tool to get a progress bar:
 
     pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
+			Flags: []cli.Flag{
+				SwarmLegacyFlag,
+			},
 		},
 	},
 }
@@ -77,12 +96,6 @@ func dbExport(ctx *cli.Context) {
 		utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
 	}
 
-	store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
-	if err != nil {
-		utils.Fatalf("error opening local chunk database: %s", err)
-	}
-	defer store.Close()
-
 	var out io.Writer
 	if args[1] == "-" {
 		out = os.Stdout
@@ -95,6 +108,23 @@ func dbExport(ctx *cli.Context) {
 		out = f
 	}
 
+	isLegacy := localstore.IsLegacyDatabase(args[0])
+	if isLegacy {
+		count, err := exportLegacy(args[0], common.Hex2Bytes(args[2]), out)
+		if err != nil {
+			utils.Fatalf("error exporting legacy local chunk database: %s", err)
+		}
+
+		log.Info(fmt.Sprintf("successfully exported %d chunks from legacy db", count))
+		return
+	}
+
+	store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
+	if err != nil {
+		utils.Fatalf("error opening local chunk database: %s", err)
+	}
+	defer store.Close()
+
 	count, err := store.Export(out)
 	if err != nil {
 		utils.Fatalf("error exporting local chunk database: %s", err)
@@ -109,6 +139,8 @@ func dbImport(ctx *cli.Context) {
 		utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
 	}
 
+	legacy := ctx.IsSet(SwarmLegacyFlag.Name)
+
 	store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
 	if err != nil {
 		utils.Fatalf("error opening local chunk database: %s", err)
@@ -127,7 +159,7 @@ func dbImport(ctx *cli.Context) {
 		in = f
 	}
 
-	count, err := store.Import(in)
+	count, err := store.Import(in, legacy)
 	if err != nil {
 		utils.Fatalf("error importing local chunk database: %s", err)
 	}
@@ -135,13 +167,73 @@ func dbImport(ctx *cli.Context) {
 	log.Info(fmt.Sprintf("successfully imported %d chunks", count))
 }
 
-func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
+func openLDBStore(path string, basekey []byte) (*localstore.DB, error) {
 	if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
 		return nil, fmt.Errorf("invalid chunkdb path: %s", err)
 	}
 
-	storeparams := storage.NewDefaultStoreParams()
-	ldbparams := storage.NewLDBStoreParams(storeparams, path)
-	ldbparams.BaseKey = basekey
-	return storage.NewLDBStore(ldbparams)
+	return localstore.New(path, basekey, nil)
+}
+
+func decodeIndex(data []byte, index *dpaDBIndex) error {
+	dec := rlp.NewStream(bytes.NewReader(data), 0)
+	return dec.Decode(index)
+}
+
+func getDataKey(idx uint64, po uint8) []byte {
+	key := make([]byte, 10)
+	key[0] = keyData
+	key[1] = po
+	binary.BigEndian.PutUint64(key[2:], idx)
+
+	return key
+}
+
+func exportLegacy(path string, basekey []byte, out io.Writer) (int64, error) {
+	tw := tar.NewWriter(out)
+	defer tw.Close()
+	db, err := leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 128})
+	if err != nil {
+		return 0, err
+	}
+	defer db.Close()
+
+	it := db.NewIterator(nil, nil)
+	defer it.Release()
+	var count int64
+	for ok := it.Seek([]byte{legacyKeyIndex}); ok; ok = it.Next() {
+		key := it.Key()
+		if (key == nil) || (key[0] != legacyKeyIndex) {
+			break
+		}
+
+		var index dpaDBIndex
+
+		hash := key[1:]
+		decodeIndex(it.Value(), &index)
+
+		po := uint8(chunk.Proximity(basekey, hash))
+
+		datakey := getDataKey(index.Idx, po)
+		data, err := db.Get(datakey, nil)
+		if err != nil {
+			log.Crit(fmt.Sprintf("Chunk %x found but could not be accessed: %v, %x", key, err, datakey))
+			continue
+		}
+
+		hdr := &tar.Header{
+			Name: hex.EncodeToString(hash),
+			Mode: 0644,
+			Size: int64(len(data)),
+		}
+		if err := tw.WriteHeader(hdr); err != nil {
+			return count, err
+		}
+		if _, err := tw.Write(data); err != nil {
+			return count, err
+		}
+		count++
+	}
+
+	return count, nil
 }
diff --git a/cmd/swarm/export_test.go b/cmd/swarm/export_test.go
index e8671eea7..19e54c21d 100644
--- a/cmd/swarm/export_test.go
+++ b/cmd/swarm/export_test.go
@@ -17,19 +17,34 @@
 package main
 
 import (
+	"archive/tar"
 	"bytes"
+	"compress/gzip"
 	"crypto/md5"
+	"encoding/base64"
+	"encoding/hex"
 	"io"
+	"io/ioutil"
 	"net/http"
 	"os"
+	"path"
 	"runtime"
 	"strings"
 	"testing"
 
+	"github.com/ethereum/go-ethereum/cmd/swarm/testdata"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm"
 	"github.com/ethereum/go-ethereum/swarm/testutil"
 )
 
+const (
+	DATABASE_FIXTURE_BZZ_ACCOUNT = "0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
+	DATABASE_FIXTURE_PASSWORD    = "pass"
+	FIXTURE_DATADIR_PREFIX       = "swarm/bzz-0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
+	FixtureBaseKey               = "a9f22b3d77b4bdf5f3eefce995d6c8e7cecf2636f20956f08a0d1ed95adb52ad"
+)
+
 // TestCLISwarmExportImport perform the following test:
 // 1. runs swarm node
 // 2. uploads a random file
@@ -99,6 +114,112 @@ func TestCLISwarmExportImport(t *testing.T) {
 	mustEqualFiles(t, bytes.NewReader(content), res.Body)
 }
 
+// TestExportLegacyToNew checks that an old database gets imported correctly into the new localstore structure
+// The test sequence is as follows:
+// 1. unpack database fixture to tmp dir
+// 2. try to open with new swarm binary that should complain about old database
+// 3. export from old database
+// 4. remove the chunks folder
+// 5. import the dump
+// 6. file should be accessible
+func TestExportLegacyToNew(t *testing.T) {
+	/*
+		fixture	bzz account 0aa159029fa13ffa8fa1c6fff6ebceface99d6a4
+	*/
+	const UPLOADED_FILE_MD5_HASH = "a001fdae53ba50cae584b8b02b06f821"
+	const UPLOADED_HASH = "67a86082ee0ea1bc7dd8d955bb1e14d04f61d55ae6a4b37b3d0296a3a95e454a"
+	tmpdir, err := ioutil.TempDir("", "swarm-test")
+	log.Trace("running legacy datastore migration test", "temp dir", tmpdir)
+	defer os.RemoveAll(tmpdir)
+	if err != nil {
+		t.Fatal(err)
+	}
+	inflateBase64Gzip(t, testdata.DATADIR_MIGRATION_FIXTURE, tmpdir)
+
+	tmpPassword := testutil.TempFileWithContent(t, DATABASE_FIXTURE_PASSWORD)
+	defer os.Remove(tmpPassword)
+
+	flags := []string{
+		"--datadir", tmpdir,
+		"--bzzaccount", DATABASE_FIXTURE_BZZ_ACCOUNT,
+		"--password", tmpPassword,
+	}
+
+	newSwarmOldDb := runSwarm(t, flags...)
+	_, matches := newSwarmOldDb.ExpectRegexp(".+")
+	newSwarmOldDb.ExpectExit()
+
+	if len(matches) == 0 {
+		t.Fatalf("stdout not matched")
+	}
+
+	if newSwarmOldDb.ExitStatus() == 0 {
+		t.Fatal("should error")
+	}
+	t.Log("exporting legacy database")
+	actualDataDir := path.Join(tmpdir, FIXTURE_DATADIR_PREFIX)
+	exportCmd := runSwarm(t, "--verbosity", "5", "db", "export", actualDataDir+"/chunks", tmpdir+"/export.tar", FixtureBaseKey)
+	exportCmd.ExpectExit()
+
+	stat, err := os.Stat(tmpdir + "/export.tar")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// make some silly size assumption
+	if stat.Size() < 90000 {
+		t.Fatal("export size too small")
+	}
+	t.Log("removing chunk datadir")
+	err = os.RemoveAll(path.Join(actualDataDir, "chunks"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// start second cluster
+	cluster2 := newTestCluster(t, 1)
+	var info2 swarm.Info
+	if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
+		t.Fatal(err)
+	}
+
+	// stop second cluster, so that we close LevelDB
+	cluster2.Stop()
+	defer cluster2.Cleanup()
+
+	// import the export.tar
+	importCmd := runSwarm(t, "db", "import", "--legacy", info2.Path+"/chunks", tmpdir+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
+	importCmd.ExpectExit()
+
+	// spin second cluster back up
+	cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
+	t.Log("trying to http get the file")
+	// try to fetch imported file
+	res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + UPLOADED_HASH)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.StatusCode != 200 {
+		t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
+	}
+	h := md5.New()
+	if _, err := io.Copy(h, res.Body); err != nil {
+		t.Fatal(err)
+	}
+
+	sum := h.Sum(nil)
+
+	b, err := hex.DecodeString(UPLOADED_FILE_MD5_HASH)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !bytes.Equal(sum, b) {
+		t.Fatal("should be equal")
+	}
+}
+
 func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
 	h := md5.New()
 	upLen, err := io.Copy(h, up)
@@ -117,3 +238,46 @@ func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
 		t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
 	}
 }
+
+func inflateBase64Gzip(t *testing.T, base64File, directory string) {
+	t.Helper()
+
+	f := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64File))
+	gzf, err := gzip.NewReader(f)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	tarReader := tar.NewReader(gzf)
+
+	for {
+		header, err := tarReader.Next()
+		if err == io.EOF {
+			break
+		}
+
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		name := header.Name
+
+		switch header.Typeflag {
+		case tar.TypeDir:
+			err := os.Mkdir(path.Join(directory, name), os.ModePerm)
+			if err != nil {
+				t.Fatal(err)
+			}
+		case tar.TypeReg:
+			file, err := os.Create(path.Join(directory, name))
+			if err != nil {
+				t.Fatal(err)
+			}
+			if _, err := io.Copy(file, tarReader); err != nil {
+				t.Fatal(err)
+			}
+		default:
+			t.Fatal("shouldn't happen")
+		}
+	}
+}
diff --git a/cmd/swarm/flags.go b/cmd/swarm/flags.go
index 5e1ada632..6093149e3 100644
--- a/cmd/swarm/flags.go
+++ b/cmd/swarm/flags.go
@@ -182,4 +182,8 @@ var (
 		Usage:  "URL of the Global Store API provider (only for testing)",
 		EnvVar: SwarmGlobalstoreAPI,
 	}
+	SwarmLegacyFlag = cli.BoolFlag{
+		Name:  "legacy",
+		Usage: "Use this flag when importing a db export from a legacy local store database dump (for schemas older than 'sanctuary')",
+	}
 )
diff --git a/cmd/swarm/testdata/datastore_fixture.go b/cmd/swarm/testdata/datastore_fixture.go
new file mode 100644
index 000000000..6a147a6a4
--- /dev/null
+++ b/cmd/swarm/testdata/datastore_fixture.go
@@ -0,0 +1,1390 @@
+package testdata
+
+const DATADIR_MIGRATION_FIXTURE = `H4sIAJSqh1wAA+zbBVQcXbow6saCBZcAwd2hm4bGCe4eIFhIA427SyAhaNDgFtwhQII7wS24BQuu
+AQKEQPC/Pp3vrHvWmZlz78xdd93ZWQ/VsmvXfmtX7ap6V9oW4e3q5uiC4AH96wovLy+Ml5f216UA
+/69LXgj0t+XvhRbMB4WCYRB+XgEILS+YD3gJouX/F/bpz+Lu6gZ3AbpiA3dwdP0f6gHVLCz+h+9/
+j+PP5f9Hiu0f46/zWJqLC8ILFuLi5eMCQx6DIVxQAS4oLzcUJsTHxw/EZMDFxQuHg/mFeCFCFnAw
+n4UFXBBYmglYWFgIIEzNEBZwM4SQkLkAHPpftwGsK/A/jT+Mn+8v4w+m5YXwQ2C8IFref8cO+P/5
++PvSw83NXRCurvTC9P/o6NJz0pu5eDu5OdIL+9KbWTtZIVyAteEIV+C4EeQyc3P5pcKvH7shvNx+
+adgcAbWAAW1C+PnNzfihvKYIfkFBPgic31xQyByGMEMIwvkFwQIWMIQ5FCogAAbeQc0QpkJm5uZC
+AgjeP9tzgrvA7V1/2ay1B9AuxEJASABuBhWCmEIhCFNzU0FzmIAprwA/P1wI2JwAvR8nva25BVDT
+9dcO0//69m+NmNvaIRzohfkgnPTAAiIAAUOhnPRO9MJgTnogJEFOele43W8BwAQsIGAE2MICZiEE
+NhUQhID5gQUYCoMCJ4eQBZQXym8qADMTRJib8cLNITAYUBMCBvOaCQrxw/h/6Yg93AxoCWYuaAaD
+geGCEAteCB+fqaAAHwJqagqBWAhBBXj5EEJQYO/D4IIwM6i5AAIqiIDz8fHCTKFwXqggL4zP7JeW
+rM1/iZ3PHG4KR1hwWQiCTbmgplBBLiFzPjMuoD9gC/Nft2sGxOuBcHG1dvwlSL//dvxdPeEu9v/K
+yR/02/zPz/+Pzf9goCIvGAKF8f1n/v93lN/G38HRHOH6LzsK/vnx5wNOhf+M/7+j/Db+KurSyv+6
+bfxy/YdC/8H7P9iv4w/m4//P9f/fUX4bf1Mfn3/43u6fnyb+6fMfuLhCwf85//8d5W/zP/Ak8C/a
+xt+7/wf/X85/KC8f9D/n/7+jmAsB+10IYiYkBEcIAHtfUFAQbgrcrwvwQYC7WLApAoiaDwyHAXf2
+gjCIgClMAHiUFzIV5Dfnh/MKCoD/3+7/f8r/vfK/mP+BM8ENwfVr1oDb3PQfuBz88/M/lA8G+8/8
+/+8o/4vxN7Nyd7D9Z54W/vnx5/t1/v/P+P/ry/9+/H+7dHPbOVr+vW38j/f/ED5+Pr6/jL8A7Jfz
+/z/5v39Tce5INScFISH9/vaXJRISxj0ndxdrN+8Jnj0qMlQk5D++RPoFCsafKyOj/wIJgAxAAaAC
+0AD3AOgADAAmAAuADbgPwAHgAvAA+AACACGACEAMIAGQAh4AyADkAArAQwAlgApADaAB0ALoAPQA
+BgAjgAnADGABsALYAOwADgAngAvADeAB8ALAAAiADwAF8AMEADCAIEAIIAwQAYgCxADiAAnAI4Ak
+QAogDZAByALkAPIABYAiQAmgDFABqALUAOoADYAmQAugDXgM0AHoAvQATwD6AAOAIcAIYAx4CjAB
+PAPAAaYAM4A5AAGwAFgCrADWABuALcAOYA9wADgCnADOABeAK8AN4A7wAHgCvADeAB+AL+A5wA/g
+D3gBeAkIALwCBAKCAMGAEEAoIAzwGhAOiABEAqIA0YAYwBtALCAOEA9IACQCkgDJgBRAKiANkA54
+C8gAZAKyANmAHEAuIA+QDygAFAKKAMWAEkApoAzwDlAOqABUAt4DPgCqANWAGkAtoA5QD2gANAKa
+AM2AFkAroA3QDvgI6AB0AroA3YAeQC+gD9APGAAMAoYAnwDDgBHAKGAMMA6YAEwCpgDTgBnALOAz
+YA4wD1gALAKWAF8Ay4AVwCpgDbAO2ABsArYA24AdwC5gD/AVsA84ABwCvgGOAMeAE8B3wCngB+AM
+cA74CbgAXAKuANeAG8At4C6Wl5iFHISEgvRf5g5MK7idnaMnAuFQ6FHNNwZCQv39e/Rfvse698ds
+AmKPMFPaYz7s0n6HO7RPO6AuM/NznEZwSi9chdYnqE/tHPxHTV8/JGCG+XPaQaID/b01UTpfvkTC
+xPxjDbY/t/n3VvzL9IaEhIT6l9d/bh/piqENJw+EhP3XsP7y9T/WPSTQ37r3t54i/T/aU2Sb9Y8j
+o/hIhP91AP7cbDEtH8K9Stuqt+iFnI6DmbIBkV1z0qwxl/lGA9VCybJxTCEI/7eqd5/vVkD4SnKK
+cr9eBBSAf6C7VVCkrJc1cE1UVQWxA5UwQGhIhCCUX4+EX64UFCC0348KJaSHf77WQmL9vQ4Qaqg1
+CPX3zw1+75XC7+9/W6JkI/2txWzkP2qjgrJR/niNJPBrDw9AghpWjm6OrlaOTrR83LwgQSlFVVTU
+3xr99TXTry/xp6gi52JAVai7LzFT9n9K+d91gggwkARQQShIwFWEAAmFAOmuG0QDNIyG9Gv5vWMo
+yKhIaKB76BiYWNhAhS584MgHPkNBvYeKfg8V454rcJ1EIUAlpEMDE9Hj35PUhBDDnRnQA2hjpUhy
+tPg+MJrykna4jEvHQV9hrGo/YjJ7wH8U75qrIVMVeMz8WMA8wa3z2URQHpnTGossDFGt456f2BV8
+Minn8XJdXteiBhOLnIKVjV1QSFhBUUnvib6BpZW1jaeXt09IaNjr8DdJySmpadkFhUXFJaXva+vq
+GxqbPnb39Pb1DwyOTU3PzH6em1/Z2Nza3tnd+3764+z858XlL1ECexfpj/LbQQEEgYr2tyiBizMy
+ECcKMgoaMhoyKmooMG4EKPiEdKi0YEkiNF56iCbxo3t8DHApEmctaIAGY2yO9AdSdFN+7aNxpmcd
+Mqsuxw9emQk4Mcs+jmPJnQCaJfs1DhgQiJw8EIqO7i/BmCMsfonH1c3d45eYXgYGBf8WV3xC4m+h
+5eX/HlxVdc3v8XV2/RHh5B8hrq3/HuTJn1EugKRBaCioqKgoaMAfNDQguPsY6EDBw8bGvE+AR0hI
+gEdAQERK+YCImIKEgICMjoyCioqGhoboAT0jPTUjJTUN9S+NIAGrot9Dx8XAwKUmJiCm/qfL3Tzo
+Psqvu48A2M/Hi3ZN6A81nE+REa04xlmrvA22X+D1hLgTrapCwqfbRYwn+y9PajJaaeSgLheZjRJf
+L3Dbk7PV4F6H39JdwW4d3xUrmKojC3IU87IDtWqZFAbHEwvsqx+SW23yGz7Y731V/a46KGBSpXDY
+OMKIwHTgNgxtc5teJygwqvdVSzEkaUNDwYeQll9Ubq93S1u0mV/KSJmNmXBe1LaYWXc4jqEqkAVG
+4uTG9lgDTppbN1ie+2T2CXOckFLCQfjekLDqieqBzmE4gxrxRjZhvgtbVLCIqcDn8UJ2Frhp5LAK
+IYZ9kVm+9ceDAkN79jkDjZ9T9qeZzCnTfETKHsNrCQd5zXcgiVNxtRA7zhSGhc51KooU3DBSIy1C
+mfdIJMxRQsqstD9Gi6QfvM1I+G6rWqHLarMq3a3CoJLz6I25ItZEgTkJtvWCtpoYjsrHK0vbp7uZ
+hIaNNdU5tbveHooGW87s5hoaGTMtwwPkDk6pa8YKLTdi7E0kh2521AEfiXW6Xz+2GOns0BCUE16f
+hvWNcvjBbJOTWco3hHxQNTvBpZw/z5tgpw/CTt4WxL9TVIGMrblhJpSoBBYyF2rlCnx+Pc5oT6RZ
+F7RfILjurB1REkQuZMVZXDxvEcz8uEQ1DWUzjWXn2riuXJg1Oq1Ruwaunuc9mn/8A2JmPkA9xA0e
+zwtIwgiWRpUklILUsnDIphih2O1jvN6em0B9ZmOU1jjxVa2n93FcItY0+8S7V9mRhJr4OrJ5rqWn
+B1k1274cQxxZ8qEpnp4i69vIbfAtH/4PNk+mG148kRq7YNQL6X6MqFES0mgYJ5QTEQN5BLEfPEpB
++1yja1HAWrEcIPD688JFXkb17k/tmCyLwljmaK1EOlLGvIBFEDSYNclMMQkSITP5XtiMgI+w00yS
+iC3FOtJ6nF5rwhwhlExW63IFui/04Smkr7TYIf0Qnz/gULiU/PKLs0y+TdTZvGWRTQaRssv0YN08
+mnjGlnhJ3+TT3q6k0ac9H1vRbPxNPl+fPxeCvn1+gxxc8MptWO4tNm/Dvbn2wcXL/OI0MdkuLK+6
+kdjsLt66MUR/EttIwl7ndHP/q+nXz24zhi0cP887jdRv79jOMcC8hM+sywdZH3K4Y2487Kt5QMh6
+9nIdyfiCnRvkKuzjVBksL754WITkH6jB+nBaQ/VpWZZG5XbGY2GuURkfsmxKYaKT5Mh6dMzvGNgv
+kd8WWGZrwasKXVIV0KNqZawDZHaeFc8F7XwSmCdiS0tktqpIE+058GEfSU4nfMZ/5hU1POf6lMZX
+IEXMsP9xFHom/6Hi0lBnfCukPdG+FRcHXGrBz9Vlye6h2YFeeYH+ztmt8Y3Qxo7OHUiIsxXG/oS+
+QYDWOo0Kp8sbOmneoiF688IkBToanxRZnd8lXn0a9eGktNLwuw5/r5PRD0y4KRQpnzopNts/+Lml
+VcE4vjE+5Tflvu+HqcXsezsoL8cLcNgh1oay9qx0ijKJXyt2fLAZVEauf+yyXpyvgnR9GhXmJSEL
+P0qFgt1PFXWZlxR+YNQWTRuhYTopj/iccl0FCHdjtgbyw986fvCQ9P8UQMppDE9WGf6wreVkM04B
+JcrjzSB7nbBWOpCWyKCpNINUYzPOgUBTY5EybYnEyqHg6B/UiQVd+C8snNTNF9kxCTpsEI98rJwR
+zZpoaLLL0MpK9pyvyl8yOqIeKDRqaFrl76vfN3wYG/32vtCNC5oFqtWuoZIli0FvLQ8Xq4LkvJBF
+QfZFT7MRGMTMrIjCrFnqhzd8w8mRV3wQQc3XtOY8HwrT3rN0v3mqoy/M/NzemFRa2kxtK6seK+Wx
+3ZrUeIVfluKI+5MutK6h9ExH8wEf0cu6icpcre5HC9Vrvu8+vroF3984yjfKMh5wQ5JTiy4rNXyW
+Edjx41roO6fvrQFxUiTXh53NxohMs/Guxq8/h6LlTg3a2Ioc6T9vWM7W2Ou6SWOOo7LLy+y+aITb
+Z5k05EDORRJ3Tlw7H8fE5b8Gg6koGPIYycNeDqopNCSeVviZwIIfJmdPJPiFtzOrtepb9zs0amy0
+kqZ9x8necx0tV+efOckQdG3XuS77oZf3UTIZkkfweAkxBbGnSfDxiThUKzM1vcj4OegaARvVYN63
+S7F7K6zAHSKWaKId94Er8F0M2cRIvad66YVJZW6rq0/998Z28R8SPFv765tE7UpX4hxVj9so5rbj
+g4e5vyKGtVpqK1XMF61TZsUnK9d8IuXl1KxHvIRKHsnLP9C2156hzWFl+/rzcKeIz0rbjPP1vhC3
+SeTpKPSbXL/GyFB12XO9Wm/NRtu6H43kC/NuDJ+rlDBiOz1GlTzCS2QadFXeFG+EjoeaR5HtiZvk
+bQtD8DEyDyUHarAnXW4kmA91+b87CrGhaQRpZuNjPtMitw7iGKGULVhi0dnz9td3VbRPWrwD5Xh/
+M6A4zLm8qF+5xA5VzwjbrBe/idHfkFS5Wuby055MxJuW4L6R3T22F5qUGzMIKk6nl7xYaNlsquRo
+reWrE23V8ZvNOsevEcHpq1TtEoFPxuDSo3yI+Xxs/MVKd4nJppII7+I2RL3QOyYHUSRjU0/5qU05
+fwvaKBQ2nrx7azJV6i7mST6yGpPnfKi/I/RqVp9Fh/4ZFbSi/PB6KVR9wG1xXkhbikIqiG7T+lDo
+piMcOUTdvsutSCSHIOteDMeSntxoalPh/nDYif7MQfyAz77glnXI0DJfxtuvacTxFzc7CegqlI/4
+cvLJAleKy8V4NPVmR8dp0banE6laxXdY75+fEKKW8mj3u7fMi2iRDIyzJQTGsxXPI4WNQ3O10gof
+cr1szoSN+9HAOg5sDh4ocexxBxQ3W9yBcLgG9PQGJicCgw7OehQMrNzrlfe3Vm5zLup3qY6YI2UG
+aV+pKHCoVeJsF1hTyxfWRX8FT5hotZeh8Ikj7JYE4NvTniRt0Tw+I/JlHdG3JXCb0qXqSUnPa5oc
+CyO2pW9iQxMeb984sEEyQ0zkR0yK9xqfFkUaNUFcZoNckk49pW2eUQnz29bf29nc8Ym2zP1+SZXJ
+edFDJP/DsUjok9t1Vll3PU5gE/l32tIz8NPt0d0P5X5PKi4b+Y29n6UYoDSX+SVleLHEfvxBl2c9
+mZULLfQgKe+jmXS51HmzVVQWmC0r5JL1IHFHdeT07ZVDpv87lUULe0Tg4nJWaaOJhZtTBI4zonlE
+yDjUbMgtNXSvAjFaOwsb1/7w/LC0h8nc3DooH7JAgl1O5BDu25h1iffwswRPSIMI8nR9jSf7WaV3
+s4Px7p7b2ffKsedbc9yfWjwdLpKT9gw9Xb6byP60VE/KDFnovTiVUC56ZU+j+IVLFS+HraU5TO9r
+70xgcuFiy9eok52PmRIRRVNo2z5U1dz6veEnjZVy+/372DwqnSEDyiqfoxkKtrM2222texvWxIoy
+zzK9QQLVfpMF/OWypcbqb88oi+RF3UVFMiC1vsNt63sDI3IrHwrt57pmT2/1xk/cfV4ofrykiQ+l
+kRw7WXCfdBXDwSsf/BTVRP2WDbIYfrDJoziUndpUZGx8X21BbrDSNEPGiOGFREkJCj6Pdxz/8gPH
+r450w27PqvS2K8bOI+ZKqJp8rkOkS/mAGw2JPchyuGbg7Db7l4AvVDzpOXVhXUUI8zRw9qm6KEOp
+WLJkyC2Nob3mbO8JxTyBobfbrcE25WS0Q8SN3JiErJLAkrdgY4GenFif3NId6F6b+4CPunBejMvU
+ZZO9TTwbSlCfMCXBIkFwsjpXHgwnw3aHa+fiDG5l+6m2qMxP1ZjJ/oSKbc/HZ1MsGeY8ud2eLK86
+mhCeskfFOr3PmXKxGYwIUR9srSl/voAFkn21d4nmE6mfeTzI7uWHCnl4OwqNKrfXLOmw+hyQITVq
+bPSjmePBPvXXJPi8i3VBwFwsVTW0T14ksCbBlybf3/T8qT2bl62lEXWo4PxM8sRK+Q7oKO1larsj
+6sLYZ4vz76N16/siGdp1HnJ4T9EQJF2RIt4+h5/FREVNfSzSLGWe9kUTpcvJqHKVdc/Gz1rozhh/
+4fd+W5M8fqGs9GagIaflYGXm+bAJTD27ppFFvOUFbPGirszuMHRS2yEgVHJVyBa9UsKEJP8lW5GD
+Iy/f5m78lgOf7x6D8By7cY13aZuhL4l4u+aYNwtpd6nNmE4Rprs7DjftNLcrg3t3Wl/td4dzO3uN
+uaTUJiLmsDXvUQaq5kP5yYb7wrYUQnjyFDxCQ0anB8pDzbSJJC/zP89C3X88dQnMoc+w72bBVLm3
+gz+2Ql76INSY/dXF4uGb+xSzFd+rt/wI60cndqc0IWqpc1gdudUIOm7mL42Fi9/2XKwZ778fFCx9
+8KZQ16EoZqJod+dByKeyioeX/iLYAoNIyOigvyY4/l7eAKUTCflvWQ1ktj9TDn9vxb9kNZD/ktVA
+/ktWA6VgVattFB+J/L9mNf54TEcupt0h3GP+ilJCqHmM56Dq+/m5ZBlRs79Dx1VgHh/zXMezj39k
+NUi+iT0NIb1nbdsuITE2k5VcohVV+Ox90YukGVVHIc76vVR+v0oR/6z0/XDd+lMfFffvMzfqPLlR
+iWGvy2dkdYuFaXmXYMnA4/XhpbfiJ7eWOfJ+X1FhwQ7jqRTjZaxaR6XvVk6aJeGhuPal3HC15cp1
+aZA5La3H0v364XT3Jh5+Z/XnUeMTwvBtj4l7HX3qEoI15OGXuA7fBFTpGZ5jRMZhLx5nfK/kMTQz
+yANVGljoRfp8UC4dXy0PJOHvU51YGmh9dOsrtbhYrI2Sh/rqnm8FUaOx0IxuLTwKw+1z7Ddhfoft
+ZNM3ytP1H/oK3yKkdh/SiobdKA6F1v0wWBJqtHQTuPjBV9K+VZOO6HPMxHBaYFZQIxFjEC7SwPgE
+Gvp2fV77XSwd3d4nqGHvy3ns/ndnfViu4JaljrFfRN9IcW2wu/8inLZEUfMb+z7ZdEBnUf8AuTfs
+q7xZp3fG02RxnsnL69ZRvOkf+MeGEobH1z7bUhdXZWSpYrdKlULneDE5PLH3k+spJbwG+lzFuz4i
+poGHhStTPhntuLo2RPnuwVvJUOw9I1ob4mZEZt4s9qx4RC5w/N64qsG5W5lIAnkkahJnF+sugg5G
+Lpamgtfs9TMC8yI9arsrc4Zwl6kfDvhFBKIn7l3cFhVxw1M/6nOZzLraYynqtNIPb1phFzh0Nrq8
+RY1UZrUFp9bDqumdFrcSin0vVM/GMvge+scnfvUWE35EFW+RU6CSGbgIfm+kNb3FsV3jnzzQsEEO
+91xpyZBVYcNmFPy8A1PVfRMi29NuPeTWQt3VNvTqAMLTbfpte+z4NgmmzkTzbqTxSPeGrHqifsTP
+5mPWPT1FnfGRIDGrtIX7atCP5YkZj1ero42v1Pn2GvypkjZzW9WsfUp9vnFIfR05128xTGXvcZjN
+Cy//GkF10TS3ny4hgvL4h8CQmQ1sa724VUjqBHe0Uhtr24FYjzzx0XxX7APhdMuWdTvPU44X1Ht1
+GztWYsKb3y8ui3bbRUfzejOFHKPrjZRc8znR4AJwATFaM0f1QoWGwh2PzR2/9M2JoUOjrLRc8YFP
+TiODmeNluG/TwCkbu5MEMmoBpribAQ8NdGONLY4rxF/qapfVpcft9uhGFD8XLPhBHZXZhph05wZP
+JIx6CXKkCxbZNSY9yKhM4eHbB2+TUFRWXEyTCb+PLeTXWWVTJGbCfxTbpk6E3dMvNSqhxF3KedGc
+POPyYE84rzrK68sWR5EgO4fR7iyvQ2yXJWf2ejNC2Ctc5FCvjBT2mEpcjMQixPDZXm9eznfhvYv0
+eepIHbbMd9+etFZtHlSIwATPWtyNVhkkF3bD9D3EHSd7dv1YQkQX0ncetKG7hc6zjBxi/Sgt/fB8
+kOQ9F3EGL3c7u2wMQQxiYUS3+Z2b7rSAoXOStHsliW09sdOhTcPudz/8iRXXpCt7Z6KZ6NwtRK/I
+x3dS2VzUkx7OJfM0iGwRauWcWergsjPD1a7v59CyCpX9mmBLYkyPGgu1hHo9RGkU4UBphosvpwO3
++XHd64UjJs17LMVed6CC9YZ3c1OrZd/4k/bfk0vyEeS+S32VAHaMRSY+tfjQYB9fDk++p6E7KMmK
+MZE/ov5MrsLbJ/IgL9O/dk7HLdtNGblmK6kCGYN0qap2SEDaTaETXTmCWvbQp83+Q4OWOGLPuzSI
+wbyu1vTrVmu4j8N64uGG+YeG/cGR29zlT2cUVHj3aqOoddyOjdhbbUaE0FUuGzB2U+t7BiFzBnN6
+9QOXvv7ESn4hCx3L2F8Wf7ru+MhjGBrVfa+jThly/pj+njSdNzOmcPf7daHfSO9Qx5eWiiP/qhQm
+efmkCVOPrTh+MGnMw/atxVGNVVlpUqVlhgbs2qB0BEnpzgWMyuibUN4sd9jrN4iPnB0WmdE7T/3X
+px1xNmFLn7CqJuCK7O/G8FKFK2wrHNJOxRzEKDnODqp2W7cSjVwS4LsJufJgiAURGN9Q/pGbzU2A
+G17ZDHuw1KniY7ZplASLjbct8ybTn82qA+thMK3maKLq4MGuaLmxXNB8RzXWsX5haZpxgarIEEu+
+xqzxYxxhf7yx4UqJiLLqDWHwJJkeTNTmp2eFcPxMK5shzadzHZv5Ln8HsxivyeahaiZI4onHbIYy
+ZWqHIY2MCAyj0ILC1Xyby6pCv1ErT3p/UZlat9aLHcdPXaXtaAFl3Y+MBteghp/0u3hI/f1Wf0PG
+Utjmw/dpAofzEptzC1EyFwb1FQ9M+MdtpqKcBj56n9qpyzUsS9B47rExBDa1qRgKy4AaVFSKZbBl
+GCG5vMnDp7zqtN+zg5VQ56c3Trl6medK8VtdlF3qOZ2VZGrpHpeoEQbJJD7L5bCbxKKSKVFl4VQh
+yNVIyDZIaLZYJb/82Oaf6/78hRz3+2x62dgBbMIxqRlKqT4Ic3k5wvAtfwLveyd+AeutpkcPB/r2
+qD3bOOPCxo0KFuEf9sOj1TUOORZ2I9R8s7OXE4RZ/K0WdMd59T9aau23kryZiTTaNt8ZfivoUETJ
+MbDJJmNcP8DczZUlLvVBS0ikShu73Cypb1KgOrlIRM8uq2pBxOFrfM60Tg9pAz6J2pXQ048pFPYX
+Q5Qe51pHS51ItFYpxfi73SiaK1i5OWCSAl5MRhes9z84WLC8DtsTOYahrIvwIe91XFPnSimBN3OK
+r3XfpsAGqecqmdy5aVl3OSyDIMp15kqxjMohvXrvUxaX+OxVTlrRrLjxclx0IAVk7AYKqZSvbSlK
+NPEl6ZhQzaUwXBYkKbHxZDcybEgj1dVsPH96FoXt/RypFCO3Ws4kTUUTfhaVoxFfLxKoZClDpExK
+XlOOpbXVmTuuNFQvPhxyIlkkWS7sZObuqvZuhDDTRFKTrfQkyoc2Ka5e5EEUlfDpKHX71sLbFPyn
+MvLojqL4lq8vKdLb/IQd0rNaFCVF/JC1c/niGZXelI7HMTGbIpTMNXNN83eCID7ToZjGY98domJ5
+oVGElrZSr8JJU+PBOfE1qPR4MtqFFSWqGfz3YapUb5qusI6tUhAZ2Q+9lS58+sD3xcBDes/n2Zkx
+5umT48NSxpg2BrDCegcVkvW4VEg/WfcPyURJD+xEXoQqFdva2ppIeivObic2v7SOw+AP85Xm1tLB
+pO3/WI01Tjrww+8eQZX1kx/UKAHi9/LmtUypQQbMHR/T0aXv5kEYSEggJDTknrWnTGtHXqQMPG+K
+BrQ3Hgxoq02yqSyYf6j9rJDg/zotK1f5oYbVAp3yYk40eNke2rwmNj8nRwZ/LUunSD8Hg+H3cDpJ
+p+7TJCd/UWpbOWtNvVIaaiDR0lTkaq3x6/EF34HCu64KazZSSOLYN87gN5+WSjd1UK17cJ5+kmgI
+PBfbuplSUKyrKnl9xbHii/fluMEy4Co5QsVlbtC8hRGZsGoioehQ76LbyTL0MkD0eXPfVfMXIv/t
+E+ixp0fnKsdCEY5HMiaCHc/hDvTwQM/bKDx3s8uShY10sYBe9Ym/iS+zSs0o2Rhwd1+TBD9VFGIa
+7hYOIqLdMvQ/ZjDwyGNQ4K8OPKfK283IJL0DNTac+SpXLtbdgR4Enht/ZYy7jWB7BSUjoqjGIlF1
+UVhZd74DRVaNLi7GFQwgzGvYaFlajV9kyuP9IDRJdvN90PZpuYmm2Yq2su3d+7DO5qUrjxfrivwu
+ORwqxWzOA1LkK6Nu4e22H1ZeOGVgN90M1dx0pS+IcvsVxH4jLRBdPbgZUj8WlHCdVyUdgvQnMcUq
+7VcuR59lrR35f2uoa0j01OS6z5zuRaxMSdTRbSSxjjf6fuok4aOSsa/ejTQNuvoPDH/jyf0vV4S3
+uxKNToURtxbfotp/DpLIkeS+1EgkSyEdveeLTvjaQCx58w6ElIYbQaIiv3OQOK3b7qBSFQ2lK7TV
+o2rRWp7cbU5kFOGyW9MkmJGPPFQopBP4TlpmLS3yhNHeucbw1XApRZDSE9nqzrlVu4mGCSz7TIHR
+h5aOxISqhETEnh67DdEcT+gjvKu62figwVNcdFuhEt/qumauP5bG2ucY4Mcp3oE0o8dxTyey/LRl
+kpq/mt+cVp8T7S/775ks5Z3jCm9fDy8EXiMC5Y1JkePy35jpaoZ4ncWcl6wu3CJ/6Ok9uAP10pwv
+cOiM1ByER2Nja1QJfGZw6XBiOfeeC0A62Z/XdqDWkijyGvlka/Oqgj1sSt2taoLsm2hplHFuvdEh
+bvDVKXGWS91rWSs8HwOxxV6KXGcrKIKVJx7Nm9X/oFgjkk2mSp/zRXGgghUu5qsvXwrUkN00zD31
+s7OcGrbSS5IuwjcrQ78oSziL8oSOKe7L33KXU8YmUFhf96wHXlu+hx5AIHJwdhxV4WLBdp9MqtCf
+Rb4Tsr2TVxt97GkB0etWSmiVvinfExZ6uzVDQAeu36iP51SOlIaJa8HF7vylaY87M9VZ4gc2xtke
+YSHlmr4qQCNeIs4DqwSWSWUaR6B6sJvPNgF3HsUoClbK2NHkXPvwlybSgvX4bKyJxLaWNS0sdyBK
+vKuJ1qyWrAay1Re3nLlxKfLHVrkj+mdCpI1rj7VegD07aY6jd1eS2UFG7CNM4P2GO9CvHc3SRxDG
+H55a19djRSuJDtjOrA2uJ8R4qzGv6jQMyoQzocG3qB1haakGqUwlcavTaHmf/e31nAv8bx6en0q8
+wFtebi+4uqRpz7k6VV829b8gOm8wae0cnuL5Andi49jPQjwXyVyPuXwJbXbT6bEnh3PmC5l5SkVL
+Dbi7ODUBvVZ63K3ctTSTcx66SClr7Z84YGNfhLukeO6X9AWMsrzjqMAdefO13fSXvq9RHg9wHukw
+OZ409G/jmtG+DwKnd6y8UWXld+92fTzVXqTS+zpRHbI3C1W0aa27f6h7qruT5KlIvujwGpbMbpPp
+cqKplTl8yzi+L/HpsL1XHn17o9JaRWJ4i8qNs52res7qTK+rlvwwk/ZFMYr0hzwsJUKHZ7jGnXM4
+vhml8a4upI8Llct0KxfX+w06KeBhvOjLlL5F8zQunadSEUMjJRqkBTKfbBAcbrHjDuEHeYvJyn5t
+OtxbWU42miSdQQt+S836z4IZ5bBzFDZfTWPINneI7qclzvMgzORfwJhN/BLTh0JKtrQZEyazna8r
+v0Odr5cz1BF3IGKrM/KzjOWfYy/ir/ZqXJxNaPgOjjTOXrCtFIpFVUfEzJlUz0dJZvZTPQnJr77+
+qUPBrUS1tDGJ581UsWkjawyUF2+p4nvdUBOuVJzN0zaeqiTpmzc1hT6d5kxCpqJHYHg9eJXZA4o7
+CfiiiF8XWzBdRiKa+dimMZhL7yiktUPNsDkY6kvgy3729PhqrI/UL6b/DsSghz/+KP9K//5M/qzp
+HYj0ly5GtEn0uWqXZukIvxaR7WtSXL3KjrWhaWK6SqM4iKZSIRmLU6u0Y4Hhj5sVZrxye4IIdQjJ
+aONNXdGIMru4PO/gBElMMbSVUy+ok/pVnJpDkuSX1Gk4IwampzsguY/TE06YZUvU1M2NupEr30z7
+MRhPISGjI/0tw0AH+nt5A5ROZJS/ZTVQ2P5MOfy9Ff+S1UD5S1YD5S9ZDdTKpAzwKD4S7X//n2VQ
+imljXAmKGswkeGzCmMMuf5wF5YyRlXdfmlbkCORJHFWuU/+R1SBQCSqCv7bg1AbRdyMPBljZ10P7
+j9YHvSKSLD7HH5ysHx0iqy//XJrhimVI5uzYRFXcreqjBbcZJeEsNJDe62q29nxSdzBU1/CMczKe
+Coy7QkEoF3Dq1+blEy39sqHlfRaThCcnQs1oUUNSWm1i3CqEj16S0tzFHPxC058o6XFkEb3loDle
+Ep+z6QBDG0NocWKaTAxlFbf2Wb5fXWS7JyefTBpiPWjjND5a6kv+zb53L3m03XtQvNpSlwsfOTpl
+GDSlV7ZrZqVTQMgSiae601WoyuFoHbqS8GHUy/n5s4WBrVQzAr8v5J2M4GGZV32H69JhPqLfQFV9
+F5CTOYwB2Wh5L9cfjP6kVDKjefFHyTRv1iU/G3fQJQfx3qdVzCm4tD5Eliirn05IobZfRMrN2s03
+NY/Uui98sRb9kYiBwWHuQm37UFxZ8FZFtJGWbvVckVqbQ+A15J5ZKKeMKYizQOtkSuBoUzwa2SEe
+gpAkv4+NFbs5RABj9GBICHe+OBkt7adrJDWu2RpumDJ61eA0eqCIu7b6sMByfDSuhXaWs6BF5F5c
+Bn8jgyPGA8JoDOddRVG+o5dRn5hqFsTS3YJenryN9esqOHieU+3AoFlHtJhAOzSd0OgRZ3K/5Wjp
+zM2P/BPMBE8zbG01MS6a3O+TnSIh+CgZx2FziJ1b/5CTgDH/s3PU4j2NBLdRXPvOEE26oDO+pLWk
+8SFZ+s/dPdzzOIZRNRlMOUmf5zzJwaucpojVOS4USshQt1EKImYkv2Fdsy5+NaGVkG2XDw/tK+Sj
+18k8YUBRd5/zG/9HQ1/oGC4l7EVKGDSWioYmaRpHsTZP0qljZGXi+l9RZmqvBtX7iMlsYiyiJTu9
+maqj9Ncy7iAqlKGRkxWnVCwxtjHtbtVJeUl2qzAhshRcSD+MKsdbLDvIE9QdWClL9knT5SYuf0D4
+6Yj2+cOJ8ZFEx54X3ObJldtMu3QOnWG3Dk+6rlhp0tnzj4vUf37JfZomJyPY7XFTMfzu2CE+mbzX
+qk1fnqb2xq0q3ue+kMBhBXUWTbMb99uUIDsFL3O6HbfdD28bUZ/an5peORcJCHb7lRy9OcDtb9hP
+32eOTWLadb5pW3XdOtup3rOEOY/9tIuW5owb2NYfAXNuN4fURr+iFOMKLcmdNI4LY0hA2VmLZORW
+k8S3Ydo129u6tlSLL1yEKRJuJpS/zv0hN1wnJl1Dg9lrAK+73aZ+7nURJHTb0iqvKfr5+kMSdwiM
+7GaTnHcjcsSk5g70tppKIoD+iqDsC03Y6yTD69SLCfSETCKW44fqIfBkbjrN/awQwQ42Wrm3k5Gr
+BuuoJ0T+rY34FOVm+Tih5EWcDfaEXmwoClVRuITYBwvRebjkfVcGbu8ZaDh3ICmnnNJpeow72I2r
+2b1ubMN8cbrRgZwFdZn5B0mTNJTg2NOIsXVwX9NT5VhWP3JUrFVxqjYpaSr7NoKuAemDdOfTlQv5
+z5RVSwu0fSEyGWaFDDvdVdtVQ5siKbeovfOFQ/enO5QJBUJcBTaSdg2/pZUridzLkNkMLSIQL0de
+7VVZnpZ3/IH46Rb2Y5KzLtSZtf/5gwhovwkCOpMw5HTUJXgNWU1SuANZNnzVPZB7c7Jsb32IP/go
+GrpgHWVN09jtP8+tH+E6jdv1mSB/Q2EzJpnSVl2VYVtl/P00LSmXpnVhM4cWfky+W0wGuj1/NoW9
+X9Mz/yOjCvt7RQq7NMLakHXK6PHNmq/4o7uKVJuYYzcq/k2ovGFfhoc3Wad2RHiMexZNfn2IQSZA
+kkCqW/AUeTtXgFUolC/l87rt9A4Eo5Y0qsPOOmaM64S+9jGxFyGXS/F2Qf2wGv58FzWn6IrLVoLi
+auxNtmYX7frJ5klAb0pLNf3PiaWlcfVxYfenkaIso55n2jQa3Ycr+ZpTx/lpmwPUVO8KaoNeixPl
+U8goqx5N1n7uN/zQMembqe/GmIarwnwvgnYNV/KcvzeAOf0SlF8/VyU/+X53+pL3KItr2fHMregY
+Rz2KKecaR7BJYVDgidsYcjyBPYNxlzSLkye6k0ze84bvV/5kP0Knh5IXE/OjsIw/qXO2BWUstv1k
+Djx0edXN20dxNAufLD11/gmP3EgUei9P6XVAE/qEgk1JHre3HIyJSReqzRqupnXoSTrkoyXKVHEi
+aDo6EnA/dhl2T8JTYEFF2KCCMuXRoIwoOBxZ+A2yT3u/B5Vw9SpatDQKXEglhjXe51vO4y8ply9c
+013NyEYpDMwPlW8y8NyJL+SDLjySbUz1FaSPxa/JHgcHyaXiklVzRhAeFmqbfTFzrH5gLq1qrQo0
+98EtLI8gNvs2nuHSKKwj/sSv8VZtwF9xYvzNJRUbpnUyUnttTLKVhcdkPGd3GMYAZG2gx/4OJEyw
+CTxAIjWPNYJTw/F2zf1MxrOGQyQcSHKO5pYkh9oIF3iHO+77T19WUzZ+cunG6E2Ri333DLF6I5o1
+STOcKVbYFoS7kvrSo9El8nnyD4KyF9roAgojJeegkxVgYv8BuSLUVyRyTBAy49nPQ93cRG5w+Qmf
+HHlyLni92xPhuhVCtNr3Ru1hxlnodIpMAVINHqeZeh4XXdymun5Lvxz1E2Ra1JacCqqWBda5qrce
+vcQvo+QwrSe9Z2EJ56vc8PPQUlJBeDOJA67KcYJ1pBd8Uu5Q6ksyej2w5be9bzVLF39o9pmY0lpH
+gpIXVpH2MKMh0kEpvSIpg3NBa9V2cgmcYgrvXskY0jQ/CAKxdGEsvrSMcGS77yxzwT0Drgl1XHDA
+yLhulfHfQs5GvtewguGB7iICHt0XmJwMz2y8UktXjhjK6ndpVVlnPeojzudkIegNxWVTTip7n+b6
+yj6SxShEc4Kpj67ubde+M17n7KJRSPzUAXEf84GY9FRvF6VLiHCc/OLH8TotiAuRder157jT3HXE
+UPSFGTUh6JP1J/GNR2+mXmCzzuJymvobPQlbnTdVbZUjomlKWX8dpX1AsDkQY716dt534jFVUsot
+Yv5kcLGlGRPeJXgyk1lxKwT/+XYxwtX/AbMwUqRHuMs639HKbmMjI4TCgcK4upeJMNP5RWpPkPqg
+sLhdXmHwyZUpXg17xH1fr/u5m+AF+SHOh+8J0XnU8TbmMHI2wzZDk5mpxlaT1eNEkxgSzNqk04vW
+nVxMM346u1CS0qnso3fknx5HlicgbTbKOGVmMd5u8cbSeCZTUzh32ca9bbakKhiYpvewPmuUCeN8
+b0zWLKO625IbiYm7vtfXJZmAdP/9rXqd8oRbbNa6y2nK6saHa6Pm61ejXazfMKIDR+IR3ND5n37V
+txY3ZF2Z5HJxmKQEN27McfpuC5sIkIx2M9nqQwnYwel9pouqo/XUrOSF3LAxBVcFs9csJz4apkL3
+ZWNw+jON4KMEupFS79q+rIysfoRzvWNq7lRisttLWb33OI2yOcAZnuPifLLVd/7E+xGudJ2oYE7D
+gkIU6QSZ2SLZEHxzqWXO3EbhgV26o4plUqdH1cMGeNPFhCZLJ2qWnTiRy1i2U1bvUNDNp9wr51Mz
+JAX6mhiGpLI+S+2pSKMH5ESbYdmZT4z0NWPYwpJSHR1xBQ9xBe2JN1aYOU3QUiuUnYXhyXmJnKrV
+IVv4BCQFLCFj1m+DUgp1NbsEJJFrMM2oJrIP8I9GE6x9vhGYic1rMahJW9QiLTvYuff7GGmlbnc+
+Q5YLyrkAO+OpdiMzvoA6MgmO0wuKJSr4dFzXZ++pjNazvAkXr9R+jopM5iUNrrlw1pTwlu3Oq2YJ
+QrdWEzHF+OHcNx2T5E9gpqF1msBon7XVv6PgafVUndXONmUq5YRiCuea1o4k537DxO7X6BWQ0y0V
+HVfBebzEQIiUaP0FuFle2D30QvbnJ5BDZgjoAvHs5LCOKLvXMOTl9wIPRF+Cl4xLz8MbuVTLOtRs
+EDS+YSHa/ELaB/OBmIzGPvKgc7+Q6IGMPdlHa0jh+I0mAQ1OvuDRoOniE7/2hvz6QsmvOcncDk/u
+LewHfVLHXd3bgKAfmLLoSp+Qi0ruoplSWA4b3ZNEUaFsmzvlGhSXgtw7ZQkREso3h5AwMyTImcTf
+Wm7UddFQv3PLikJZ0X7+gZP1Abjxq51oER1eqWBSo4L86UHXJp/n0+rj/D7Oz/fiSUtQ2/TxBoLa
+oQrg0fdCA5mmyoG8X5z9DSwTR0NYA35gOvdZSY71fKYq2OTdGmc3fcFMyP8znm7vaTXrgHDMy76R
+YKYlRdhw/dChFyvbWM0Ss8EDL8u0FomoII66exHcTwJoMCZ8EPdfibDCFOazOM1NA63DXE/ptc9p
+rbLzXXrpU8Lx6ngehRiHPkq/v63p12ZCOPGUcDXkgp6AOvKDCdg7x85c2u9sYGmUNbLVN6S55Auh
+W1ZI/GorV6uAIdlsBmyS/HDCxFaix07S2eTZPtsEQ9Lze1qtuMANfVEi6aTKmJ21j4nTJYq6WNxx
+DqXHgk+o6+Eyar7fOyPpGmtw/h0I2/79/eHPB8mrSmL4SYiO7V28bKLiy+DkDCxGFfz145wZYeXe
+GgqKMSuJ35OZ9ySQr66l12jwf4bggXH4Y6nkIsugO18/Sd8c3YHQbq9uNn+Q8PwQPFKgNnEWFmZF
+ppYj1tvYC3sTuIeSQ4KnEnajc9ES3tT6fE5qS6lTHWn7XGLo1tn6lJvf5/ScKi4gdtKAV8ZaoSx5
+SyKg7Er+kCZe/YxCovpzwSuvI7HEE+sL6B3IzlPrM1XMQN4VFw1PQ2U8UGNnWNWnWa9/x3+j6MJk
+ap2G77xDoqo3YZdnfecymmFMDP1qA6iaFR9x9WJq9iYpK3lRXRfqY+o/+w6xh2F1FVtYhGIQfuxW
+9JGpjA2Lo0M8pjTHX53k+ES8dfSs7bzjlIXn9codiGWlc9cksHg/0X8m8jLqiV6lS97GhE6PKkb3
+nlG7mclZzJL8GYW48ZSEFWJJwpZmgWosS8cPeIKv6lt8LEnoFxD/jVW94lFIzsSHp2zaxWhvJiq+
+3s692PD72SnJnKmeUmvqhzJ58x0P6NxAz01de7z60AfqLE8NaqOa2UP5s/cv4nw11Un28JCPTT77
+cmyfl3p54/bkXWdu3I60vjydbNL+WhaxtSauY/szQD0i/uqFh8R+zXVzb+Wm9vG3cJ1HvcosT3dv
+B67CLiSAO2q8RaXc10pFFPxMj0LpNvNnqZSeoIQi8KSXTOYlOqdvAs6dqeTHxAvMjvDyX7QFnFwF
+bqzffra9gD5vZr28A/HTzPedqftQSXfvxX9sITkjbW88sjmdyHKm2ThnmC7a2KCh3si/outt5LoI
+eGoTf5ki4/FzdpzuXSDPOA3/RWN77Y2Jqw7SWcOtCksy04uqO5Azy96LJoWLkj0ziunjML/K87Br
+KpWioxcthjd7dyDtm42sMD9F9qKO2683q7fbvxlDRj8zdrqRvbhlupJ5kWCgwaRbmLlAkPfSK1P8
+nRyDIg1Bz+vsHoZPmkeRN+Wyz3BEI9ZusmdvCtuTnz9ivETX1/nrb2VQkOhAfy9vgNKJgvq3rAbq
+n79AQfl7K/4lq4H6l6wG6l+yGmgH6NXjo/hILP/9L1BQi2kN8/UZ9UPs0ip1q7aR3Tvao/V/kAYT
+w2wjIk+U+oXD+//IauyhJhJ8zKBYNGkOssqZlG/1v4B6o6dkB9tECVQ94xwifSnh8cjXYxJnTVTa
+DC1hihyfj1Kvz/RmyOBC9/r4k8ZUaIO1v6B3NE2q66fiyO8JyHvGyiUMVyurW37llecl9+kQ2CHm
+/PTRUWy7S5V+0xa3a4p0U/ZhxJjErfKyfbW3P7tO9mPso/m5lHQNgzdm1k97+gkOJJ39wzgPCKur
+F/ntzu32Z9/tnx8YGv+06DyFDlDroHrgRt7LZPekaaIKpWGswaq14rtfsnC7R7yTf5rQeiChKL/l
+l867Reh6ET6UUqg0hBl7yKaleJZ4UBTesE/TrODURRfrsdfdaaaUuXs7RfTZ2RkvLy/HjafkB7//
+tYf4zLx/5rKOoVQylI2tVY7d+fZb9+t3A+EqK2KYbbzdnqmfLC43FTkqv0OWudhhk9WeTsPdm6mM
+PA46TIq3/vOdvsFGT5Qd8nCokXYuHO+x751aU9k501VGH3y8Il1e+zpww3EpP6vUv1eIeeAUU/j5
+DtRD9ow3OQJF56jpZmBFObrUgDaP4OVHpCX/O1Btyjt66mfn/gYF4TH+NMqbW34ZVSa51xvs7apS
+JRM4H30HqyRSn877FRwDk+n7V+uNAbj2nUtsSVvkUkNL7X2TfQPiC4ky9scKVWfubApNN9M1cjLl
+ZEI1Td8towIu3QZe3zC4JHN/hYbFHSo+VSVyU3011xM3lH1r3tSYQORUoq8V5sY9qvimbykvL298
+d0FeT+MMRbmdnDkZn4kj0WPJikSg7oMi13BUFUKA9NFVcICZba227iThExw2i3lZJtNKj5VK0+Xr
+pxUV7T4HNL11MjeYOKKEFFrHcM69wxeuLCSCeJP00DYNq0eYrerHFQs698FFUigNhSJ3oG8tLaMf
+6grDGlapmmbWw7rPxw3tHh+Wcj2fPmCY16MKOq3B0QuQgyIOl9cHzu5A7H71jgSZXwz9ZergtKNX
+Ga8Y08X2RLkr7kC97Xzc/TcSi2UDG1+vh5fObHQWy1J0wxGLd6DIOHIxsfKY4wRS4hMe+r0i7S+B
+3/bnVdehfqjCapUOsDoVuuZFej6qZWoFX7yfUy2EZ90ytS45hV/vW/uOvveLeHHsJ+IdLcZD92VL
+Yj2T6sOoQ1MM2lOjh7Zujsoeg1syOQXvjHq+zLMke1+lCpZ0XZ08XsNydl7UrVF1dr6fNDIole61
+Gatl9EN0VnVbX8RbkvShaFPpOBjjnNpwMYoz1kmpnmYpk2MsOJ/NyIP+xLsl1ZDp8A60brYgil5n
+3HjLJWrtNM73qTJXaVphgLV9Yvt7defk+U14bYlZ1EBoAvHXc6i78gAP7Yln0nvgMYBxNC6VqaI2
+6xqeZe7ktOsSTeN1BwLXWok+fVuXf3ualClwgqoWsaPHTHr+4Ta6zmPGqYOxbajE1qgmulhtU+sB
+AX7HLc/y8Nbek7JGmq9RNScdZpwRmk0vFxdwCi3tT5229dICjeLZ9CQak+e5eZY/mhEtq1vzLtAT
+qbfL0C7FpRXMZ60dlyRC3p+r6iwM2RsMIngYlIUmahyIBfDxGZfYXq87dxhpuc0JfujjzlsA+kam
++dTdbIGNQ0THLOR86avDi0jlq4QMrgdpnjzL79ZsS87s+bNY4ll2Rfyf9BeqWl5EVC4ZLhQK5Uo+
+cimh0zNp4/d577bnTLOOsXq9tvWaCVspODrp6tI7nXbNL9SkIDTXPuUOVL5ylhxqNr8luvgDM/D8
+XqVeGdLa8X4uYeoKZJjYpvOJMZaZSni5Ae675qxFY6eI+ZjzydbWuUFN5r1qpmv/fo/2tf6xPVns
+xY5vm3VOJrVI70S8L5XvQMcYP5dv94/f6owlUfen0QnWFEIuNDXhSMo8Pij3LaTigit00ProI5K0
+yQOzmNKIc1zBorXUEbgPs74xpbisCvsrS58Oa/XGqU/QH5IooyoavgkKK6J32mpMibCtMJ2fSWBm
+y0ucHnV1H31lWjzlkmZMjOPLvYSNwIpSyXDmBE9CBD0SveQKHMjmVCzrryY3B+vfZ1q8afZNIYpe
+8Ih2mG/FvJn42SXtzujeWCXfvLBo1If6oiBW3a9pi85ai506imBro/61unOQ3Ood6EG2G3GuiSoa
+qJ/X5px/9VnmE9JkE0XuTXNSh3HaoScbA9lf9fScr1J7lDTlYr4sHlUziiB0fb8dN0SEng+ML8Q/
+pk34pBO9dQeCxS9gs+xybagzf7lhXjs/V4R6NnUaLBKQ6/C3xEblXevjIukNCS7i1h5/ae8h4lHI
+sAabq2pg8ntgxy9N3vJLHJqT5/KfEDCmcg+wxVnnGDnw65BuxcVtBSEID83CDYM2zdxHo7VP/c+H
+PCZzvQ6i5XboGQv3YnMfq61oRLuUCm1iJTMVKvOXYfecDIeX2SljXeofkNbEyj4hZnbvozzcmzf0
+JnCoZTVPFuukk/JE1WNMMCyFpDiYYn8zabRXUsh/+FJiRkT7LUxzlQLH1KTW9t3XN8wrrA8aVYeT
+orByZJEptHXzAtMzy1I5P4bjLRotStviLvs5+pHoc5ZvTdgcbtW1vNrbWzpwnV80Vmlf5/30eK9n
+s8FIeeZ2x1DarNHSnvskmL8/mNXVVkmhtUM5aUR+ql54YxuLE78hZAA68TC93SOw8ypqc7/5tHdi
+GrZ1MFHFWs7ha5elLBqMY4z9Dlc/VhQZh7vdxSCjaSKYnLiobpAWg/ujeSmWSaS5/qzPHYgonNoc
+O7K75jv+LK22bsi01JMLJWYZC2GtmZSpYPsbwt6wVx9YjYySH9TrMYYFv4iz02FKlO1r3XTyOrDd
+7DlNyKAT8WSCE8rlk8VFMqe8FkhXtnPkLJRdamAmnyZ+XZVEXGAgboyz0YttdWDh/DlHsbnVuElp
+o6lh4WzodcZSY128s45DWgUjrzRbmB6DrTu92Ss4vaRN7CnbwEQJd8JamlIfBX2IjNlqef21tvdu
+c9jp0NWQCgf7REn8OP5sfWxbQ+HcN76lzmwd5nLxT0uK8881kTmfa7YWOesu+PKXzx3LuGX15EKX
+f1ZsbY2aL/McMIszLSLnZHh4VncxDQRwBLosRhZeTsiyI3xR271TmLTYPSnCNWPAHKo4Ca+jJ3bd
+E1kzNeIazHXWiBxKlKbjZLy5rfaDkqq+Za354y82PvBnawhcKjd/GfEkmWvpplSKej6z7ELKePOx
+pMd3stEW5TiHyGJuqEV6AtmUrlJSgZ5tU85CvbJXPZZ1C4VS7jnDpzohGSZ9g/LSfDlk7goN4rME
+VrYNmziNwR4COu5Dl0Rma4qZAhzp6E3E9xJvZ5dGX0MV0zirg2evBjGKVGayHsWGE0ilu9Oy3zin
+s85oOTyIrd2YFPx0tsHMNO+qmgQecKZl+aIxOhmkGjhC6Cn7RL3ekIOci7G5Frh7JmLwULihZVfP
+CGJ//L4jrphBSldr0qm0dFXR/6SmscIlhDobf1AjqGJH8b297m7mrGctxIglcrlNMvuk7k0J4djg
+E5eVlZhFgYNUOemn+W/1kic4w6bZSBbWhgmJHm4R0dubScWncTNFSea2dBcrHJ7UtLS0NiZq0mm2
+e2kp/dCyn4X6f3N2YFR9Zx1N7bHMUlcwzm+jo5/wuPCDXt0CxUyq3qCnJ3ZD4uIis5s8M6QLHGJb
+ouOUn88qz4uRyKy/kSg7vbBOfzPjkq0AVWcTrXd7l/R1nELnJZLLggxt9TKjZkOiHqKLYpfepJnt
+c/6si5yeohh/WfQxIVkY/cNmd0wpmeN+rGTMxqzTx4fQ7AKnYtYiwvfvvhTVGUjT54Id4hPHmVIf
+h6SSxjKGJ4ZEFyHQ3tfLxOWksuf7os7/H8LeO57N9+3jjipaWkGt+iJVe5fSmok9YsQesUqp1VpF
+UaSoqhlbaYkdo6hVo0gVtao1q/ZWYidGSMTjfp7nvu/vH/fr9Tuv/HmdryvHeZzH5/yc71xXLp7p
+wlGz6lFZWZtrpqEqjfer1DwzqhSV27Ie3nMVe/kb+/YpDhurzl9x9WbrwbVsxdJLW6H77Ie9lXC2
+GPMJP//9vtbWL8bqOx9LS3GcoQ8/gY469tAhc2JOSccGzqQSCDQjnYXN4vUhl21d9qM/McOVMx/M
+f0D1+1P8tN6NPjigkiCcFjZLZIjSL2rbIb1zzOem4ND3uY23dZONFbZt/CTNOe/NaN6NftFszDmj
+PFq1/CUtp4S31F5SW7ausOmrTIK5exgzg9f9inWNFDP4LHRl2it9pEHryvFM5AVAe9bo+R1VO3vN
+vmS05eOsdt07A4slpnBbHEqJn++YqsCAtn/HppvpadMLVb/Ht9lGe5jCtjRhFCYuGkJ5aoUG7IE+
+q19a9uNGWcRnv77L8eD7bJSk8Yu5nPL2ZFzctn694KFjEsJnFU4lYA7vvdN7PWw2xh0ktGZu+3GZ
+0/fbszSvN0YN7x8riLnECjPyWCdS/bVrTByo8jINflow9Nl0/tbTqpbKqSmW243NjDNWtoYuGQXf
+bYcjM6nLRcP3P/G0yCbftbx748mfBHG+7y818M5zxuT6p9K99DPPg/f+Sdzu3vW8tIX8DqqqLFay
+OrIyPtB7f3rE+UNNBzRd0mSgPlrD8k0vqxcKH69JaP4ozOcAmhmp3+WdrfSK7554mb9uB2OOvV5Q
+j8kbsrBBtqs321e9EK4cSrOfcWbFFz5dzpzWlSdWbPkAPWW0WoqNoFWuUs1eBSkicM4snzuzXrHA
+BXSey3FlJWt9til/VVHmDi30CwuT3EkVu4yJJtTMLgfXMMtQ8Fzt1O+Lhh5j3k9dzVm6V8VKn0LK
+1ZzivSahZ8o5YoXGtEn/RA3lbJTf4bjKL+kayS+/lHc5UTkXS55+9bHl0VdhGndj+mxuFYFVS+SV
+NavwmLjRwG829BQj6p7i6r0sPk/3RTqVI7W4QWZcjsXqoc4tquHr3ZVt0st9dLoJBZdub0jTRrd1
+ObHQqdoCoetXs2JCxuOSrquiJU1pozpVPk5+/vmxqiRz7sa8zf1gyhzCoxbm+lRcc1tt3vQG+r1I
+oyVl0b05dXfqiS/yH72usL7uLi1x2+b77v28id4lgMqy0X5BPvqFpY8OTwrnzk85nMjoPFNtsg7P
+qj1cmEYIm9ohtSkpKfooslGM+WPjQKPWuJVrfOq8zhiHR8V4lhdDk7dHhLdf3DUv5yZok02d82yN
+Lxvb/aIs/wlfeVfpMbXt4bi7cVm1Mo+rF1ybV2e9esBrdo5A5nqjYyqVYsO3r4N+RnszDvlNMG+4
+m1okZfhNOaVRUAGKRRSkioZUJwWTHYuvODZBj7OfukqZJQ3C09NZ6VqUvr06eqksTxb8rG+gshJv
++GqsQbSny6vI42257zMzCTJObyu+xP/WhP+1+y8R8yZcdUaPW7MZMoQ+RjM3jpS+1Zp7Wp7YR5fG
+aK6l4Z3sJqx+t+ttfvRwmUiB4fKU8MzOg/a6l8++2jwjOm6/3NlMPZuwaKJrNUyqMvytfqPpVi+F
+4W50L8LHNfeayYRIsJBCYCkayVw5pWZRQudlsl1WftVYi7+RSSSohX8kb4CFXji5zEv+kZO12/Hn
+nhUXrlYvPkWDFqadQJ+Wj2M/4nrY57Q0AieNv7K0aus6aaAl3iENXtpyvxZJ1i5ch6/Gco97VfHP
+/Jb3Mv/3EyhXKXgA/4kbUH67SvW/VIPqf55AufqfOv6LalD9i2pQ/YtqUEetA/G/GCgk/m+qQVV2
+x+NKRvCmXfX9tLDHY6UnU0kv19oVkxQ2BzdXWsYSr/3zP1TD94p409tiX11vHxbOzSfrQUnYjF9B
+93VLqCiFdG8ZJ919t5Ml9W2ldSZ90tX1/nvXenFF/44t/vYNNp+fUbNsFtwZ+0NfowIX7tbb4Rod
+az6t1O0+UAxrA/NlupfUfUr6G2D3ocBrY1TUCqcYI5n2kL2Z987YQ4k/533GP+wSpKpokYSp4eGU
+Js0fzR+e/JRjKY2wymYb5ULFmCpxvssIVNKUmOFewiws2KyV6NU4ljckP0tafsyjwMZkpBEv6hDW
+qGPXuIjlmWz64DskWv+Hz6ax7nPDPz01bRrM1u/fvf6RHGth0dFtnkunwMyqkJNo/Xi6Xkaz8KBV
+94VUodX6dIGPXdy8nZCk2oZ6QmfGtFiJloZMFd5t1d6AVwNZzGLCu5hEo8Wh3CCS7J2Z0vlQ2T3Q
+pvGHe/2CUdPVz5YS66O/UjIYYhnQ/Cr2pbl3C76FtGNZCq3Fv91pbRv4dsOup9E8HDq9LLyn3bl9
+fOlDRPZzu2balFumvNiIfIXrz1rPePk81JfLPsblbKcNGtQWPo8t93A0gCtGrzcXSaxZ6wlefSn4
+9Iv5qzLtsMJ5xdzvEOCOcand559CTXRCfPtn47qBxrgqCcRLo9tQoUJs1/76jLRliHhVtdGDrb/t
+z5Tiw/yUt5euJXBEE0UWU1ete3efcTjM+xfAO25CA8KZ1Vpkghq1uLec3q2vlkgsXtU9YZa+CU9j
+13vnt7Lnuc23okA3UU7160ruXPE39Ru/qnolnLhDOGcS50hsgXXTVa9Tvw4TfXOZ2JfuJrlL8B6O
+fPzn5WC07axigFNLqmPeP1MU5hPfqia+PuF8Ur3MCeXouVnol+Nw80/Zn22GGZjf+3+Ctk2v9gk/
+n7b5KfFA1Jqzg42qqMtSfmeGKrf+5B+15Uihr/RJJ74N9Q/teVL6W3dFUsU3hDshb9CM24oa5a37
+Hzz9gmx8EgfrvN56NjQnvvAzsuk1FV7rsm8kXj/mh48fAJiv0HLzlOqbLxEndhW9VjyV+uYfXnsG
++jKzIO8EZxMPG8daVtq11XYHNX+DXgCg09+zTxoYz+yu3d/FizE58zHoRSqtZdpWrSwJ7oYemMQV
+Wc6aQO3niwuXt6rlU8vmVVOX1EreezdpVhXVeFbq2UTvBR5n/i4GbJ243OQSxuepi3Uupa0eaVSx
+2pUP3KF5XCkU+pmBg5aVDSDR6w1Ned4000+/2QDz6IC/FtdVGq9I4GJFV9hrThNf1Pkdd3QlLScl
+UcvX69tKuem84DIeSOEouWaipMz6spxeZ7V0kc9meuWNqOzj6aSkpNz1lqaIWe8vTy2G7eG7Hh2s
+xj3+VxmHEmA5MfWdj8rFT8QbC9wcvNltmUux0Pf306VtdkpHamLWpMpnekfLHNTlfn/nsYvxN3jz
+V5kklLQX2NScCCrkMXu/HTD7UM6L9e3PB0aqz7yDn4tP9D76XcIkJ4Ot2DTtU3qsLSxo45Oa9cFF
+KNGF2GKax1zKHycKtbayG+D5UySw4NkQaVPprOsbrKTCPJE8ypW59Pzp9ougv0+W8On2mnsP1ZYX
+zwda68tSzE17NXhL090ndBWEd+Kr6sywWMWR+TvFMz9iIxKcWcTZKmcDf7ezV3xwrfjHnTX6Sqyw
+Vr3q0wSeuQySZfuS7ox4VUtE0F+Od26uMxnUwa6qq3TZVQZvLgDSSzFBEYb/VAar3QOFWC/H9jfb
+xCDAa2JG4tTet2zUXaknmnUTf5sMBfi0pT2TGRGZvip3zyT4fTXzHG+U1/wk+4HziEHwzWLPfyKd
+oKbd+2K0DRtHARLGmnxMiBcUsuYoQh/LZw0PUyELtdE1InROon0d0Rx0/ElNv8lCT8+g9V3CHcOQ
+KgOuxraga1/d6RPiEtFemuc9n8duFqWYm195rJOiHdDxkOmxStEJU4V8jbNdoVc1A3PjQIlloxUk
+wFbbsRMdF8R0HOCv0PbOKqCSYsVYmu3G59HYwuUD8WtqyFixFSZdF1+Ox2nXw3bvSEhopFznYPnQ
+xviFO+ngOz/Hcg7e72rkKPpURcfcpGw9jV8DblHcXN3H+r6AWnzZZ3neUUJ0y53dSKJaTy+YD2oU
+ITwW/EaeJZz17XC1/t0Al2fN323vVxT4xU7cfZbzUKzIrheO1pMJTHt8Y0AUHlbi47vvOFKm6EwT
+LKGpJI4I43RN4btGPz0vozxg8hqqQ7NLo62tlRAVh9NovHRgVQMeK79FXSy0ghK57Oee7/7zG9si
+EEcXUIDWDg5zpzMXKTqp4NxAvtMB3hnI3rO0X8g+7a3gsNpplEiL0SquwxfdSh4J5Bpf1lD1abbG
+fDGtx0jdLpWuPtNPTYXSeJhnNZX0hUgwuXKllTcZn2TTq7Jr1ZWxet1du/c5ykK5kphkS+vmONGX
+8I3VaCRYVGstvml2xWe3iaK3rbwo4LPpyftgzSOM1G7mqN9fS21naINcExXOstXQoZ0lHaDTaJKK
+vi0ntVNezi+S+GpAhrGxNMlXKXAT6655lbFS+sOzxS+FTkZwb/HTZAOOFWKU6eBketMAxWHz7Afj
+ct/pWVtF8TjbhzZCnwnktWn5wt8uL+u8GAoQIe+GC8ftrIkD9sXH+s5xgTWzHOkHSo4SVTek7apS
+GUaMZF3EdTx1d21j75SZn6ryqLcdm/Iw2sMHn6IrTWuPBY4DzShZdlqr0D7EQCla0UjDei0JWsn2
+ac0h5ix17Vyu8eyBLt3t1HUDOtoyOBSuq6e5NsD58nVz0fOEZu9CMdsqNdiLlMFnn69aW8ElWKys
+157q635OmnZlnS+5//mcEweFxmt/vgB0hAezDcHZ4u5WXeN/LT7DT20wgF0+s+bVuq6jt2XabO4U
+YTiv+RWDSHSf49dPsdX6UCvJmqbJb2Z5ry1+YiZa2OfpCV4RAeQRWPBqiI6YeRE1/3nLBX8Hl4pl
+lqnYXftToci+1N5mO/laW6z7VoY1Ed3WMflBR8dPrDGQa1D7auN+wtG9Lok/vylOWDOL7//ztS7y
+d+SYF59ZYIbFktfbZ72f0rVlS9c+6z09kAR8R/LHfJ6/O/dQKfZZ7U0OAcYEebmBrLvj/C8Lkjwt
+b0zckoiF+HDPqqUVaRvdjflZzGBSaOQWV/I+e/QG1iYJOj1jQMv2DCFiq9j+NbDpw/helv1WY+p4
+LwvRYYGm8bRohV1CqgIa1T/XI+KSKJ0y6DMTXe1GTtJhyIyLUOPszS83LFksPChI+ol1Ze1pFu9c
+6qUfauoe4eCl2Q1mkm7WP54snV5Wd2xSy+hLLxFdvZ4WrABf4y/LSJ3o3X3D1XWDzaFZUfKVu+ur
+R+P0xOCqsgCxQLHWKwN0wpJRtDeZrpzE+aVsdxLjlH895d2NLhfiuQCEJAZ/G6N30oimm6CaO66Z
+u3GPIFr33X0HxH4dSMFr5hE0Mn0X8oKKqUKVTy8J61+4EDbr+Hxh3l5QYoQo6nGMpPg+lS8jjBbh
+d/tILfr2o5EEm5hSdkE0/TV/adG5wrf/2MgMpOZWk1eLul6I+BB8rzPusF7Tf7CvfoKcmwvce5n5
+c/6c3tNf572679X3uODv6/1SGky3OGOM/+RhnMM2wzIV3U/MuvRwCnKNOgE79Tlfwbti9N+RDPAC
+Z3GJIxedJg1DphtP2wc9b7UwNU9OV3GgOe52H4p9CXbz5j/jK73jzN5/EsWVsS3a4FEAvzJ3zc13
+v9IsTpw/WvhHXn6xgSOtx+t8/Jdut3K5yvhXFi+1Xq0Y+SbjTNZOWk7HynrkU1+2uKa4P3jo+1xi
+8+zhk8Y4USaYR2tr63GNxzn+zyu15OKW369FPHtmJcw/BKX2Pza3wITuj0wc7KbXZfqFTP6QtpYA
+tdSLGvX62dnZwUI6JGzcVn2nC8T5KJJdHDPYgl1Yxl98roeLv9KLddewWt1+WpFtVMxtd2UdeV9M
+9YdaCbEzqbNGjd1NUvMmnMaAS5GvsaZKx/BBhpDlLIyl7nuiQHyD+02Fe3RmkffvDpftZJj1MsbO
+8juW2zbevl87vaafKToy8f+3yT3LXUJKmAI/33qpnjEibXAvT3lY9A+o+G7Nj0DJAW7ejvEK2wvA
+w1W9SqrJTzpcJVAXA/L8s3SmIdpnR4GjIz/z63lb5H/N5wruJuHplyInfqf80yNWkC7rtqILX460
+Hvogrf3ZcpcbrG/aiGJuuHfC8dxAuiEr3blVy3R2UBQ80SSl7t6aKczqIYLtD7hs2y8td1EtHZqW
+EpWUatbKGd1mrS9qZh384zAi2LCGFppZXNTM3pM2L0r0LH3EWONL7OaUMJ9IebBStqk1LdtnVRq+
+kt48+LhixkjqgBt7gDLSpzec91ejreAniUZEii48sbHkR5LbzQzM5nXBGfJyM9L5oDQuMPTZjnNG
+9ahVbi9do4CMkNGxjlvsnzv8xt3R7wMsZH0l2itPRLZOkqob+bN8kpt3n53ya73K2wzl6NhjhRMC
+YXWkyR5Otq5fn9yHTxJGdRoTwrAnNR6tx0nmVD8q+uzsrJVrxJmoMq5IEP4evhA5k/GokaVlzQkU
+e8EV477BltbXmsn66bQO/BT5nne9X2c9U/bmpC9wvlqEzyp0CvRAeygFz4f4f2/moPyvmzqnuarH
+NRTV45ld1jLZIxMYwt6F6x/gvzmpx+znAwbUdJFVgTPtr3fE8+szl9PKR+fWMC3zARwRIVebf91A
+KsC+yZR+QY5y+JM/ko4TIQ3fHd5L2b7Z0/PWyXnhk1Ag3U6BUcgbZKva9HpKnlao1RcQEHMXEHxz
+KB72kivKEDWBSpe1w0yPLnx7qZUxwJmr8bpM+pf0s4TmZ+2eKkwc7n3Sln8Gr3/QugCItTr09yIf
+FbvJCp1RVDMmw+V/B/M/y+tEw7D9H461ZuVL7ms3kMVdljhm3BRMla5ojby9qvrH4WnHTL1ajGeD
+onqDol31eElS9GV37BOV/Lc/zpQfLBHePzsWKyrlbr/8LpO6KjrGjylANwOf1T9zTwP/5JS/0atg
+yq43/oWjOHhY4wWTDNh4iQnt94QmU5ppfxRWCIVabZh8e4TumBvKPPpMpM6FVECfAJqOGMII4EPI
+w1nkemwvXqsUUFCaPLNGHvNaTuu69dgAHbDrDmYaUBtZUcmmWDuJWSWu4DRO9NTW8r4uDrWnCesF
+yFNI5XddLRJtOIaqgR2CwMTbf85ug6dM7njMGs0TqY6dlyOQ6d0tCNqXIYie/Oscu3+z5ihwaaqZ
+z72Eb9+UJjz+h9sU1PpOrThrBDeITlpV0fB5fd+FW/wWQ3QW58dpaxlgplRC6E1x5Zh7Y5BE381n
+n6NK2rWX6SW/Hm7efdH4RlhjWi85O6SF/3hMYtYYnIBcXbpPHrvbzSCvq72s+0Vi/mqyXeG2QMNg
+nxbET/bp4zqzyNXMP6krAy5dLfK0znbjkX3vn4RPMi7R7D7LzOZTfZG+IjP45vCzMfH98/CbjtMy
+td4mS0py83nSNJmmCaFFAqstfho+eS8BTWxFrZD3Iauxs3Tnpa6D58lJ1r3uGkHt0uFs/6YaVBQ8
+gP/EDSi/UVH/L9Wg/h+qQfWfOv6LalD/i2pQ/4tq0BjMuyX9YqCQ/b+pBnXZndD4hNHXQTp7Rzeb
+be86PzYwm/vw6+NkGPxsYfdbZDBA47+pRljtoqmdg3FFTplwjS/MwKovTOTI8oQjHeF2AaBxQnZG
+HRR4MJcA2Szq1w+d2kTGi7eModxAFoFXreOsqbfsC2DxJqqnzeslbh59yhT2uvgA/FGoJz4g/4bI
+tfE6GSOg/LMJZC481VIlJCbbzfZOcnq4Quu99H2uhm2XGkrDaVm/MVM7vTNVNe6KTU7uRFmBMhW1
+cNVI0MMK9GB/QoaKUk4cXzWvaezI69JX6F/R8PV8nSJCYA/cf6MPTXMQ8KuiTejMogIVzSVa6L8T
+GyDOo81RdmXlNWckA1WYdTcRDLMV3uA4TbfMS1KRqek54qBh1SvjAMYbw6aP/P1NoGZK00nC5jRF
+uJuqIhvVSRHLGfyCy5d2AdmZorKprTbuy2tpaXRTEc8QcEOQsXLEDMLG7wFMKzRTHycqv3KHpwJe
+WzQeHjmXHZV5eGK/mX4NXLd1vul/t0Y0YHLiQKxMrHJ9PJSUS5vLt3xmHodw268p0pjUSqWM4E7o
+YqQ2d8ftpK0/NleYnQSJc0lDIhDtOiH6MTtaVgyaV98ance1svOqEgRyO4DMdUiVRVA9ByICPY4I
+mEV4nmU/wF9hhgQYmIx3kvUIwhvqZcSXrg71DZ7KKkVHjWHtdZzqQxA358UKy5FcuUwT1f4QYITr
+p8FEUJYjv4j2ltOO39slJWWzAzhzhS+2aGBKm7miuXwsgkyDHW+JXeOFAqVPfbQYUDGPmjeuNEdg
++0w8cnwIdVAjsOrhp5S13r8z8Vt2rJ8UwhTfjjPf9Ewrg9aCaHrOOZlAdciaq2GNYXrwCQJYquM6
+CZ5i9rpSU+DVGj4EgEpbJisnMyWQmYPqjGScmaOpfcsOl05Ehmz9DO4w8cvVrN82UjKwU6k00hbr
+U0rPeSNStgXf4I63It7+rtIIEgPut54dV0oLbZwKKaDc16pBXfvuWxVIxs0paaFhvolDDpSAlhlv
+vFxejQCfJa0H5VacnVK6NE/6zNdPRZ1QnD+fUUcD5UFoy00hNwfWj815VAeLB1Uvzcj49V/W4YOc
+3efMubJ8Acqy5SFxgZZdFtmmXXigEZbuNyoqK5OJWXbpT2gOdcqqnKhZeYVeCXOBshltDVCrRaoU
+KpCXc8LLEYlHDr7dBunhwmCWNcC7EyYfY+rI7NgjjsziM8CkDLYHQJbQS/AZ7D/AFYmFvqtJDiht
+owMEf8qhl61B+TJlasrJ06Orc/FgreqYh9vpMX2ZcGeRSGX1dWDjX+hYFBbeMcQdW35UuTUOvDuw
+cRoieFMoj/lRVcjIByc+S5NMn3KRZTt4C5TZkqHUspBZxshpeqGybwvalgmwRKNn4cWJIimC+BDp
+iJut7t9SGv7GRUDqO8dGG/cOWx7vH/kH6Luh+w8KCB08WAc1uIm/LrGAh3sSlXZ2I4Nf+J3YOeNA
+epV9biPk5ZTkUInDY5NxGeMQKhDOYTeLTNdvXGapwrLe42sAvimfvrU5o45HMJ0CgKgQoPSarf/f
+K4AagLItLqPs7UZyjclYwchipKb9m4TN8uB+d+bY6tdvPhZZKKmza2ZV3aqc4Jf9/Tpw3QWO6mr0
+nGUgVW79UbBIdJAgmR+zqZWEKy8aW8Ti3RsS5UUKoHb+XK0RXBVyHU8g0AwfHE8KaDfzygq5/1GG
+qfaokOIwUSLGqJ+F54i2uAGaYS5h6BSpa4hi2qcpVmibInKAZmbsuGPFzIxJitxP3zEWmp57V7v1
+Tm8IwJYP0xW0ihzSsNYStZM8ZUVqVLNK32RvB9e9zcgqx/nvXS3QEUZN6chK8rpKnprB9rNeZz14
+0E2e66vBeiRKXweAhxfDHZ4zNfKLDo9M0pLI2MNq1SiUZZqqydeJCv23/kDeQnshZvr2M2cj2DGK
+Y9KwXzCKCchb2ZOex/tJVRkSJKyK/eXCH/7BwRDD6m7hUwKWNHQgSLQNy+J0nPXkrxuPaE9Ex6az
+QG/8LDiCCRJg/OvhASGsT9IPeQcnyh8ZWapunghY4lIPcKaUEfAuo1Nl+xzlACAlj1ZxBpUHOU6T
+BSE8flU4v5Qhg/9IA51kdsTVKc03ObJxYDTc2m4lKRY+rWxB03dcYARi+q08R9vMFTpeiZvARwO7
+fjVf3VCkgQeODMFxE2zmVL6yzJD4xmsKfj/zchpCvxjFnaDTKlJVTlnz/Smh9Z5C1B1D9q4llFOw
+7TdqQYGqYG9Scm9n5SKHh1K2hsxAa+VxuGHFxAaWSln9V1xbB4j1HeNd05FMPRdBWw+a7lYOaV98
+iT2S/xAJe6CSjwfmb61bXT+aqGzzSKSlYlwvGcPLlT54m8V35UYu4tuuYNw8s98KnzFAwZdYIgQP
+jlTIqaVmL8vx8PffzQYnoVVRNOkPbTe3yvUfZ46lC9HhD11sb61yC+kaE2S6NmSJJZlMKalIgCz/
+SkAqS29mxMT511O3aKnvVDBjY7FXKM94k2ahLDVVMCpMXsgiqYzSTiaYYmy28IM1NF1TmbsUtp9r
+dl0X7Mew+ZQRWRIEKYWy1mz/Rha6jdnzlX6M1uc/LIadiIhVzz6qkbsAtJz3wnp3y0EN2tjH69+1
+mvedz7tggj+brE2CymksD98SjS4T5QMr2uyBVfTTbHvmMixlMZPsmNNOArabvage2aOh2E3OYWAM
+jelVUcLIJkfKeYaUvtEVljjkOqIdijH9aSR3ClqXHy1+99pZbLSBOqfZ3Kn5lGlySoHyHGbITCwj
+C7D5wMwqOWk6S4V3VhirnOIO6TX775kFTwjVpKmJR8gsF5WAYYDBvk9ConYciE+KHCSr0orefWKj
+EhVXdyrICZx+lrpPuyky3AHUDHW3rLkawGqSKn1Y+VU05hZ2A8sV9lLAO8iYlqVQZaTH2cM4Bdo4
+jvFJ6hdl8KcSgXUdyXYKRTNWxpmgtpzPpyJZY+sBZrQ3byvEbc3Em6weM4iEgyMyRJGz+ndW7qS2
+couhvryWqtAMAWwFSFAem5aT7HjeCq6sO0hDFnhjcDFFy/b24c+gYP1yW9jsJIb2k1KqYox2L/Kp
+s4FqGJNO8qmpOW2VrEciTQzT+aURAHYelTvlIeV2TbIRGaWwxaNQLuEpyMP1X792RVeHYdNjMiLX
+9DFpP6TKR8ahOtzQiaSnTqiGKMvRsnT4j7c6hgqX9SSRkzS+U0n9gR26kmB6HkOeszJoEKLFw1Wp
+Xmsq/Jqx4G+1Kp+EPSh55d5TIaNNOAjZXIjCjq13G+fKVdn6hrGlCJGM1N8svmyLVWs8M+T91T+r
+6+380Vcukdbcnjks8CEv+ZYgATTKo0Me1Qk3UOpZSa6h+eAHirZvhDzezCe+O0YXaR9zWLet0cM4
+624gW7xvVHyX7WMqsnzbWDOk0fguHxpiO9mxB2orqryXw0WnYLqzY7rTPw6Pwi4SJncFNp34evkQ
+npSzzpnm3ELDrDStxq6rzKefj4xozMb6SR1/23TJOuTPYP0XvSumJ4+JVpa9u7/4ZUQAYT9d29QB
+af0evZZjAS4XAC6K5GwmTByLMam3yXSdw/g0YzwcC+vF0NiawNPTb1kSM8j00SIkdlBRBr/MBJ53
+ZiclqV0LU477dVdGFPpayUIGZ7T40rIvEOad197sPrONEHCxRAGzvKOMdEvkwBWiBKtYk/4YTt0Q
+5QD6pI7q3CJV0iB9rYTVr5+Yb2Mi6BQXgXJcUeGzlAzRVwCl87GsGiqj4zMrI8y7MBE4mFN7K4yJ
+d7/o2ub184pPJehswZ4LQLwmma7Yt4RyUXFFJhdRl2Ky+QwAw2V6nuWFH2MyqCA0eoN0Ninram5W
++plln2VIfd3ABqWqgQNIrRTPb9HNsX0jqnPOT4OEki1Ph+2cTwKRVlvYvClWvUok8YZMftnKgQK1
+Co8ZHhh/HDWhOgG2FRpuob/PW+rwnnvvgeDPmrnd7Wr+/ONKDVS8SK1MGpKP6LBxntDLo3ZtxbyZ
+b3iNfZEM9p5yz4QVQXtMsdUrvBqTFrc4+O74jCZ9KDY9hSI03vGSu5+ei8UcKbzuoPmku/R3hpSp
+6G/WDvXczMQ8LzMuedLKCjgKSdpMlCmXGW9eMmzAH0GeyxfSmgvDMYVxqWIjAmhYfLkscSI4ZH+7
+HLiu3IxxozHst+zEgv/SdKYkMsvFFBQx5ykAe95NHGYccSglpza3h7EbQsBTwYFAhl5zh7p+sIF9
+xSYxWY/ohl66ANBjt7ZOAIRW40lAhuDYD7nQg4ov/rrqB4+gmrUcVOnrqJ7XsctZv/j2Q0Dr4Bwz
+wXV4ya0n2tcIwjDCXI4RwkPag6P4je4hKQ55NuuZpt8L1JJW0lRf/jbxpZcW40LXqa+W+O02M+RP
+hmSJCumLppucaJWDK/2E3YDjpH5Uv036QBrH603osfaxnaLOdfJxRejC+wn/CwD/hucx2BVNndpo
+cluBjxDC9kMt5S4itfRxzqgsekQvWGBA7E1+dQCHwpK7g2mlNEtPhhx/uSveh4Kc2mboPsh+1/Kk
+h4RI5UBnB4tfSsuE9sEuxZKFP4dUakdvEYCot9xH6KhLdz+Nv367hshz+D099Cz+TLPcwXOD/ZjD
+OMmWDzhtkXB7vMV8I5dSo7bnd1RJkDITrwhsymhAOHsH/ejd6gYoHcvNhL4OMQ6hn3Uw8NU2CEO8
+IwC/dDUTwnbFYCDGnv3Ctq4hvgKHBu66C0BbjLkZaSpzNnTk/OeSZD8Y3ubC2UgumlkmyN3C6hIz
+i36Ie1VQJ/DRGgUrvADmow/cxxbtfXSMBcseDfAK/uo/KtE99y41AzVupCOz5q9YXxH9Vv1WJMX+
+7Y13Ravov5VmD8cq+PpPSbmtjhYuUFlLFDWvVNFV8hP90PrkJcuEEVWFlC4+H9/KuW3+pHTd7jiF
+f17CMmmINRs7z/y41VjlmWZKBI9NE0GKh0nM+LSSogXXN1aT21UKoENbkVh3K0ZfmvG2YhailRpT
+rvJtV0zNc5UNOcyr7UTl0I4bqLULgGkfLyxtzCE5+J9gX/Igw1me7NJBGIdaxGvdQx3yI/NzjjpY
+zxGYo4G0cGoW0FvmFu4d46cMX4GP30BkgL1qbWjJk0zd7g4/2ilkJlbztacPDEwiDpaPiN1PVBcd
+DLqjSGhr1pLQwWPMOC2sd2+4tQh3+qn2uJ27NXROvox401ZancuszqQm/mcsOJ81Y0wE4lXOewCa
+A05VoKgZkUMbOzE44P55H05Z6DeScJuY4Thiy5Eudws92AQdz/otivNPWkRGhKnSIXSiBY86slMP
+lVtStbO50doa/MKn9BMlN0NzGOGF7RAayxSFWq7Rq+U4wbdO08IKCOHQaKxqqAVSDfWgAVcGu/5N
+hfcg+EljqMWmw+Sf8SRUPN8F4L4JN++rnWPQort+DuUdR2FmWFoZx1kbRWkD/8dhn3xISPuh5G0F
+FPfXRtLUBcBJENIUsdPb9pDDqSOahCQ6FjxYRjWtnk3+ia6UJfc5R/w9+TfVoKbgAfwnbkD5jZrm
+f6kGzf9QDer/1PFfVIPmX1SD5l9U41rdWxWBXwwUSv831aApu+NlCDpvDaprLuM9OsK6+q5xSiTF
+6crLiw0B7j2Lrvi7+99U44gFuIGlzL8AGKUfLxLCfhZPHBbGc96WMq3sJ4YoSFZ3uKyrqZJ13Ky5
+3BVAUUpyyuFLDpWHnvAgdXZiNLdVLS6Ao6yOjaFmC8i3WhzSDrJ8SjHhEFgUvwnv7WtnBh7n8r1F
+pm18yCcDddXI3tV3wjWPnFG1hz+yjM5zhifGVmrCmB2kXEQzREegIU5xZ+BG4H5ucBzv6ZWanPRq
+KTs/rkTwGvM1COMcBLz0hP+uLJEW3YhzPqWfDmEVdfA+yzEXBEIUFEoCaEaqZLEiFbqnsiWPok58
+qwxAaeMnAkjDBXXaiYNSJKG7gAVnWDIjIbaZJ8I3iZxj5LUG1qV0N3dfysR1LJq2tThynKHgBy4U
+kY0OReLNiIXqJpj3ywQUxjKZaCd0YO42rF1pBPBgF4immxDhOkIwFslODJTQrhVJke2FwUVN/bHI
+BoSl12tUJ7m7661Yl7SZMHZygL14f8cCNt/no1xrST/Hh0wbNQn3NqNi/I4sF4Fthd0a2yk6/oYa
+nSnQP7+teqpUDGdSNhviQ29v7kjd7IlLj9ju54Sfd9QU/oR9ZgIRGtxAAzfOhaHlpyYcoy1x3+2Q
+PrjJmOxK5t4V4ymZyXIs3Zk5rjeFd+itBHMach3IDYz/rrDqz+xoVliHBbGahM4Zue+HJDB/B9cU
+7Ids9NyG+L2l0h65W10qxQ7ZjF6T7YRcpVUovDo5Ac/Da/6Fotc52PYD/GHWBEiBlvARaM4ztW0q
+o0jnXkXMiBgzp7U1HgFsd/hizqA3FoNX380nQbIXsSIgaG8/2btJK4FKIduRTscEGkCVzVuxdO5+
+ejyxWXQ8uNaXSrNDbfEpo1f/ixiJA8BtF6kNiv9u/LpoynRd7PUQKwo8bjx5GqeQxPXJ8s/mdJH2
+79wwE6xCGpY7B5yBajyS0d7iIdOMd9TEb3xo9zUt18s0q2QZfXvarki5kMid2zjW8fEg5PgreSAz
+vLcmBRjNZVkmuUie7DkIpSNgCj9nOKwqP9ZAzNicvaypvU7eOv917wIgNjEqw095yrtnVGbxQeTv
+5zdiZ3Wwr2w25VdoCsgrsAf8ywWjjdJCgNKaDwm64McEq48fuDUOQvZ3YO2GXecaqHVSJfY8h40L
+4lkHqf8z9PTSCZH/zplQbFoy/iHpQTu4Uhvc8s+iUtSjEe/RKXvJK3/nP6ZgyUjk9gXAd4wqHGhC
+CW8bFuGm5aHDgbXXBwzdjBTeXEs///i6QrLxhgjWCaP6lQbtnTU/sXFEH/3I3Q4ea0GrWwKozGEZ
+UlPOlRX7FgDWXvxrLAJSKSDRm60ZlY3TDFW0SVVq7zB+SzTGsYlYyGwoKBDLXwdT0Uzy6Zx7b0pM
+ktVUgnA/UDPmDiPqHGBd8lOoAJw1RWNY9vqv+3YiNYKaxaCI5W+0CZ9JQmHFzeLDjcYBCuFPpAR5
+Dh6VhQ1gkGfFSCQ+JFAF/PFJEUxBZtjdQDH7USEfnhvqQqcLjUkhNnjACna0YZbFj/c9QKD1ynXw
+mlb01olyCaipUVrGsZyJQ+9aBXBH8Dj8l8Q4xP9ERaxdr0RRZJnTpHWsXDagOVEQypQC7fCTsygz
+woWuRPq6fx0tKs/3ST5VVmq1ff6IuU7uDFF2FMClWmrPnmstXKbOIoHxw4R2UO5ORg2f40+YUX+D
+TFeP4PFoepId30g4lFP9u3HBsZkc97SSL5U9utYaDGPq7E9nAYRLWt8iNAr5CJ62Ih5iudTH+n+j
+XmzvAouUCW0WAEucQ+P6BG2qWYZZTCo9uLU1JyB4ojhqnAOSIZqUZ8cXAjoSMxKM+4kRhPGchHsv
+kdbbwEvkJ3kYrDpnLK3qMl1fwPcgkHZlvuNw7ZTryoko6adDAIQeElrKPlDLYq6Us24GvS4NYNbu
+2hPcFW2qeVRbnS7Rct9EFi8pbhGuQFM+NlZnIjz6MZc1L/pgYvJcl3mhvgg9B8IWo46ok2J2mcvM
+QJYfAoQOD0MUiqXWpjE0FkB8OrE4cKzRToUvlPL7/G0+40NDVLxBqbqCq6zSOKKZ/fPdWF1S+P5p
+/RREwEEXNu1gknbMmj4Jg8SvpXQrhP2ULE+Bulugus51TILYdlzINbChYhIElb9I/tXmgiz9oO34
+53we9PMgRAjz9L+O1if2Oim6kCsXgF7BC8B18ikifkST6PL/HReAi27AfYb/eu/Vlf9qlP/1ZqYr
+dBSMAB4pVWNH34iUgtrO/RGGpYM791SMHvm8Ss7/9HV48b//lRHCg0olxu+KkTLt9bZFuPWS8WkX
+gDtVGfK8aXMTCYxgkSuMyrOcmBxtzPrpx55fBDzclDfmXAxR+/XvBaDz6w2ksouTX6ver/dD/Ty8
+rBaRhlp7/HtHRodtgHDGuXDD+xhCAcC4vAZyMBRsmmNKfvBUvl9HV/gCIMUqA/4HzKq3hzxly/qG
+j7/BoLP3/YEHKHN/RAfRs957Lk80Bq+Q7EJhyyUPpYNCZXU7e6cczbIaTfFbscPrJElv1ejddFnc
+iRqy7wsCtoy6AFy9hcdChAPT35bUkC43A+KJ+ohd8q0D7U5EJVnZHkG+bjMv+NKF4RodYM1HvhWT
+Hj7z/Rvu+Hyq3bomMfsymsGlWq0OMcTp5Rgnv1xhm9wik9+gDoKNT1E9wG6ymDEm5rRc5Xa7t7DC
+usP3hfklZvLecnqR8u8WITmnfK2kWw6qZtc/ZMc0nZuscKtHbA05a4HpIScOBITD2QWgBPJX7468
+TQeixETnnVyG0U0p5Iswgsbrp8YrkeRgUZEgkMAoXcQDaczv7s3YCgEBlOdt69suzKdFOMmJgSZa
+VktYD4QPwRx+M55txCn03QXAsvJHCOabPYzgej72B2IDI7ybI+kR7pM+QzasdCIfwMQffPfTPWnw
+/eV/f471CvsocbVj4D0+CLE+sZ4Dztqpt8QHQ8g04N9Al3+SkrSQNGvZNvcvAAzWR8EXAPwV5HyP
+PmIVcUpzAUBmIOBHILwbiRZBQ3AjLSyT3xNqdfSLEOJ6rMpTrNeLVgsB3Ay10DECooRi/m74wJ28
+p1cIL0UxfaDDcRryAgxDcIg5N30/gMJnHjj5rCyenl4AIsNFIP1YDCbHNCsdVInscmhY6CUrj+FH
+CZBv79yW16mvZPycXslzftsrZRjOduTUE5eN0L4AKIgOEfxRVyATW2RQKl23CQNv/AWA+5/OfVQ9
+uf4XyRt3XhleZiqIA45PnTEGO6yTfjj+3mFilxZFPEABCZf102pc6OVgibOuocVfAOR4l/jbi8GW
+eNf9PPsFzFmoR80F4INVG+aBaDwLhMQvDLsAgBT+fr0AtJsSxi8AuevIfrLkBiKeBb7Dc3wBuLQ4
+iXqskFWHKTIbCVN5uE0qrtI31MLEXl9AzxlzI17UvCeijwy9FR+GLk7UC6gB2aL2LmPPrSGAoZlu
+SunvNzItgrvCJSH9T+Yq7gvR8OPTedeLkR9cmG0jUqtHePi2jnrZQEtLJr2k8yG1/Zs/7jyBPAgF
+byAC7CFfCUU4av6lHKtXL9LOjSHRE3GiE9FZB8w7FwC2nigiZL9KQaX8MfYCcGh2Adh22K/QwL+y
+NmsWbXbwqSHrtsTaNjyNitynLGF0omyaJqygQGaQWdAE8zIF+paOhukF4MlhWhbeVNMKj4fLjAl+
+1bPkDH64h57w3eQmh07yI/5SXQaCB10ArkmARFiFtS5Fo6vnvN8IQ5ZbCwoFkSkI4wsglDZhzqqt
+a/Gg8QKQchmE+wVAnpRBhk2SXvBT8iKXq2+bp94b2ZuzYJfHIc9y3lmT1ecgX0gXgJ/lZ5AUG9Sp
+FBnqycuYXqKbGJy+l1B8W+mr9bbWUGY7rc1d2XjXrANOTYe4Y8kdTDoraHVvGd++GoN/9OrcmmRh
+G7ZW9xwPOrH79ZuWEo/adFr7JkKPJVaaL+m5C5LzKfuRhDgYMDTR+XFC/1evpgR+wqeUVwLqC0nu
+d3m7R2mdGbjjIcAla8W4eSsL25jV4t4ftF52e94TNkRtvMtVw17JoaR6DALsGchUFDyPIV3OzHTj
+CwDnZKR76LQs4Gvnw8vROUIR8GwFLofFDRot8I+pj+9X/HZ3JV4ACKf2oEMhkubl5ki4CHcphZQV
+XUoEpXlLXb69bl+N23ozFwATDBm0egEQgQwdXKbibJfka+ItONOv8bcTZifjG8z8NeDFadkFQPTz
+5Xbs8lLRF4B/Lk9HypZcmReUq7un9y73/aDpIM1ub2yVLs1cWJ171wqZwfECIBNQdgdijmjeinFg
+ppewl257g/2J+PFUVj1TqJ9nvWuVZB2PLYlWuwBk3FgJe0gqvYxrHHHiuHZau092S3ggxy/3NL24
+ZN5mO0cV9oPDbX7tR3uU2LX7cadioWIwT9DN2wL5HUw/A3RWPC+nkCHmUoU3aD06a5PXng73ihcF
+ld5jnPGDPiV7R2uTYCR6MGytuTcLZ6wEITRbhIMkLwBv2yp7L8utLQ7B8vZHXLFUNei94Km1I1FL
+qUyZQTMR+zRNnx+acQHY1yZBUkg1pygH8s0zuQtAjCEv5FKqvN/JnLbFgafjEMBTb0GCN0lSjE3p
+1hxq/vYFoILapPeW31Qoo7/J6hrxo0rnlmSJr+y8GzTLaJCJtcPW4tN8/O1GbprylQuAl2/3c8R+
+fKI2+njqSHsIZPljQY5YfqIi2zay+wFT8rIW835urmpJB5LfITlgj/ljtbpl07srearJVnSZL10e
+kuQEqXIn3PLyqheAWNpwdTIH6ITZkuz757JoBn/mihXWtsI7zJT0zMeNgh98wOzI7MUvwR633jix
+HPK2rxS9RU23cJnO3CACYl7y/TVUNWQu4HP+6bjP+UdW5jfcYmaa21GTJOA2zU/ChL3Q5XA0kTCe
+Z5gusjapBxfPGNKZt71wR5BnQcA+/eNHBiHBoGm+LAmKIUru1wtzgb9wF4A5xisQigfeeGOSw2Z0
+pHl6Vu4ts0WWxIHY3hlHP5CF6XvTmdMistmldDSUnN+MST6iSqMkzjKZTuhBjMTqqJxEoM+rDW4E
+Tl2e0POPKW5+A6l+GWhwsNvemuuhl3bPRjkP/gnT5SqvfVwDfgDK/csCW8aFVwxerrLfHU16LtMR
+hzjtCciAtme5sIb77g9dzbRnexRyr4w/M3AJzhBbIfjEO/T0YQTCTInyAtCXB2abY8TktwEJ4qCH
+T1xFnISHYleiCDWU7tZbGgUbYX/P7OMvZWMXQ7z0SFEXAHP6RMe9fQNrxB0jZ56FIcgdH0LdgeNl
+au4mGQ5KmVFeywuPG18Q0sWhs4HScTZCIfwqFwBmufyr9RSw1X8yba79cmI9UBsT+3u3cKcnAUXQ
+Pam1YHw+g/3gUFcPuJ2jC12A4uO2PWP+eSxZKR820YQakgv3Sgle6LSMIQz2iaJMybRZ/6YaNBQ8
+gP/EDSi/0Vz7X6px7X+oBs1/6vgvqnHtX1Tj2r+oxnUPSc7HalevqP/fVONa2Z1rrW+4bp6MR9DN
+oKZ9r7FE3X1LkdUWP8CwdKw5MUl2sP1vqoHEmZ5PvnYHIMqR+DATvrCamZo+xD2wg09oDuQzR4cB
+iGfKnOwhiTqSuQA4n/+A0UQ2n7K2HuCJ785LKlfBy5B+Yhx3nDhiiqvjd5v2e50yPrDj3VeNImJ1
+4Ao/p3QjlPRRxvVT/qM50QMKbIkkqdBBsxayCn4nBObfT4A8o1zNIk6EA811EAOPEDLJ73A1XEEv
+HMQsb6ltrWeEf87VNcEI6fJ3pxo4gz9wV+gQHUSHtGriMN/CRy+XRRWCslHasTIv5VrRiB7I83wT
+3b/8ivJyXS8fX0Wv3oSWohBZWYs/9ZS4/MBjsMHTCBwxAk109zi9FJg6RVpRc/fCPMyPHjy5FLWN
++oJukFM4FQobOGsMjSK+RK+CV2PLS+DNo3lIHUuC8dHmcWuBeb85xFc7p9Wyr13V8sfExmsy18qa
+0LVL43FEbDXd9Q2FhQOjFgmBcuOWiXTDvgHQ9LR9iUosuCR9hJbmOM3ScNaa+AExocsU7sfEEclA
+bT/Zs+LC9Lx/RN2hA7ONweUg6klfiRFRpFqS2Ih6VDyRpwgtHnSvQULGaJLVqFYTuO5EnjySCUXO
+ol2iuvpL7ItqSYqgNCLMFxcmJbozMtzPATLDvAr2SzXHBoQEmofZy4xEjeGoUumENeq1WHbC+K0M
+gsCSQ/uhNF6SscSiXSGGqpuguiZiI2ROmLxBljzxRHyxOWYlSO6HshAgQwSw4H74JPbyg2i7kZ6D
+PPIRTRThjdYGrocvbHlsKnEJG4T+4Y46jTiBBJYavZ20U8r7VENiYzcp8+W0QmkK7Su3IpwtTy/N
+beO5Ach7/noilTO6v4KfVlhkGxyWhMlI5sUv621l8y5mERsliTBcCbaRrVB9ZaE/NcyBfvZlgE7E
+MWaBEhfyJLx3jLZj9DAWEk+KImURR8Gqwz8wPeQDh/jxLLLxtiL3KOJZJhdHeFh6F7r3ExF9Ducy
+y6fpIR9JmluunlnW9G+e6Fag6i0LKZdIX3fBiBeZtei0M5bqjgK4dIkFevnQpHf6AhBUM35eQABf
+k/wDW74AvCCwbYuTHfF67GVmwsVCUb17GuKpbekVbHxjfI72d/bDEVHMdhCbcXHEtAwwjgFMccQE
+f6Krhf7D+fK5Z5Xwyc+MEH4eov8pzcdKkn2F4PpL+q5dtuNjyDQxpgEz4/oJwX/vV/+bcMfLNaCU
+7oYscD0KpIAoa/CHJ/H2J1J+pagBHTFss6aN1phWgsBdu/uhkOzyWNAiZm5s1T7OKkQsrGBfl+gw
+itYoLWrIZvBf7PU1hnMkzbodJStfQf454yXpHR+cUoMdggKVYxz/tBuA2OAxO9SYnfOuaUfYjy2V
+g9GkRhaMKTMp8cZVEE358pjfR58SzzQROSPgzOOy+nGHBgftLdeoZRkCRFDLc/achKQoDyW36Aho
+RPy1uG4oOHau3bkBGVf/VO5+mnS592WNNoqJdi/FsJl4XNrUIBDUzAEKcN0UxoVpoFBzsMHdg1B1
+POl20WQxyapiahxzaG0pL9SNFVrRMHExNt131VIMS3K4tLuWLrpq7hvLhnRnqvact0+Ps/6e28X0
+lyPfM2QIfZcTaFWinUTSOp9nbcdsx0BlQ6cQ3o+njL+b8IO/gp+Fzl7RYd/6OytTlJViVEmp8UYR
+dTKKerfXcyBOddUT+wtKnyhyEh0Xtw/UQ53ElHyDfEV0vD2PJhqhGjoYEW8uAIFROJtLaRFcVf/9
+G9ZJPoDED0+eRi1su5xXo2jUeyqHcjkgdbd3a5KJKTkh8rSamg6fQFeTpl0IMi4OTXUvEanrlxUa
+2CdZmjd0G7uy8Q0aV74elDdIVORd/d07hCyAdkieXCc57KYQMHPog/BvCLbtkrJQ673jY91uF+zZ
+K6NdVNpr0jUyTSZ1gjSjLRJVVAodSIpLTjkguxVryYBrcMBaMv5I+YnNb80LwOuJmqyzVwL3ritx
+30/haKc77jrGmK7MIGdhP8K+jeA9TtlAx+qbMqEml9ZyEfQJwrYd3rR9nAb7WbPkSV7A0zRRA/Kr
+kVOV5zP4m3bpK+S+J3w1mfUT8GDZAAlDj60ujWPLY22/g8Ihf4KsCScyu/+/fuY8Lt/kpIgsf6QZ
+Dqw4k8LR8z8PjoJz97tcAC6DH95mMgiHTTksYtMISQOn1HCOd6d1qmg9B3g7ylIAKrgOh4frF59e
+pm3vuJ29sIE7YkQErreFfgQyotqgCQcN1UCxh8whqS99/AkI2W4wJJDmCvmqkvlZnlAaSQD1Lqg/
+i2DAeCSY3akJq9AUXEcX/yU9HFO43Ohu/cw6CqyscOZI1tRO61fiAD9zmJKAbMd87aDah4QgTjwu
+AIs9A6g1y70N4OKgcgo+fBiRS7xKmo0QPhz2/NOMqQMvkbvEpFfA5c9j8sUfJfF04snobVTnTNQG
+IqbOGrHa0vFz+pWdvPseWWf7NsJTX8WsjEKrbICJWUbG8zrz0mqX2mF1QZrx+O1dyLjVd5c4lDj8
+8ZulR1wiFb4h2JfV7c0vQIKdREjVPrD/IHwkFLYyE7PwioSomqk5RUxMiYsJ60C/50fL1ZrTmhud
+tX883EbUzpq9Oi9ui3xTBZkml9ZsZ12mExdu10hsAu2uXQA641OgNXsiEGr1dX4e+MjWU8g3dzvh
+5Jqn9vaO5Xl95DH2rPFV8lgz4zgL382bHKacd+/HeUiR5kPqltHbMeY6+aNq4zohZVlkPTHpbb7S
+EtD4GpEdC1v+TqcL9hcXPc+kVgqXkGz6dFo/DjqVcKjfA24nLXh7N6JXRzXKzTX9xR+JtPPWuRUV
+YWWJUbs0PVuWh27dZ5N7vsB4fCjdMXz8Xcgm2ni502qTGwESJEctEzLDHZ7Ha59F3iKVZoR/Gzv/
+jkpTRXj221Iaxi/CqLNlhXG48H6SvVuVu5biMMOmBeq9scyxHpLIcwEovACMhCJsVsiHBHLTGqED
+GNWqo1bGb26+1rPEEedR0Vq+mEFReY7lYAiNVvQvQ69iW+gjP8Czt1HvfPEwPeRcVqaSv4eqTMWH
+2sQQE4SpSaU+lftWjdtl3v+ein0aVBHhoyhKR/CPC7XJm5kEVbTZdyTKLiX7jr35KPUH4TKI49qa
+A02FUZy319IWrem0HrmN9KUpKzR4kL+SuTtriPqhedoxs4sEqVlkFAdNJyQGLxdVkciRZG4YWocS
+UYLiQJltLz3ywb8hdQSMe0y3jOdf0hHLXfUKM7pohwEJxCpa8muD1PF9PGLnTA4rOlxw9gHhvX8B
+uCdbZITrbkA4Qx6uX3piNrtLXb71p1zrla+nyXVh+Hg198eCxxF7vbs1q5NTkMfqNlm31OhSoOPD
+shcA1zOi9nkydvOP+9of6dHJ8zeXRSHRtb5ymsm+Damb5CFNg4ER4QMYNn2IzeIIv2wV++Yd0XFZ
+p8iaZEdRZhmE5zUYN0RAooSF1vg2OMcja9//5uPlMVxom4cVN8ov2p1mvXcI1xrZVsFdVwB5UM3J
+4WSiCbyi6MqJ0MY6iIeGfnEuUjljsR+ggBoAAWO590sLkKvI37bj58mYBoVjeIJ7Of4RN7dd2RHz
+NnIH2Fwy2hzXEeiP/3qvkZabZq8S6JGjMCEEYlWI4WDzqjEXJdj2nZfB9iG4dx6nmTkNw2MMY4zm
+7vvBFehh6vBYlVOl04r/h7C3imqz69a/QymFYoHiGtwLxYqTYsXdKe7WIi0tUCDF3SkOwb1oBSse
+3KWFtlhwJ1AggZD8eQ7evZ9vfHuM9yA5utfIPVbmuuY1f2uOtcBALkrZIUh4MdWNxW5QUiJoCfgb
+HF+herfg0DafITQ7K04mBUK8WTZyjk54tGVMfp/4ENuRHg3qrHCCMeuHpWvIYBg6Nxq2kNxUXJPk
+wr34E6GAIZu5bTh4c9QTAoxXkovXBC+66JckWhjOf9DmNyHRHWsGfzZz2P5BkRjCuemqIgtFWcXX
+ggT7b7Sl1EqTUx9bVlOm1Iv5zbdrpJ+y/qgb9IgyB8zaoLSi+TSSuibo5u9i3KQ4TTKIzmFrkSBd
+0e+Tf+fmvm6KrJmZql7pPrvwJp2rWUnn3mlA6VFDOs92Uek4YGa0Wl/W7D4V+cD3QFfav+u/Sg8D
+jBYWgjRq+naBmeWl65VOLAEHl2I1Ig+ZVeAqRHTc1jgVxkMqJDzW/Dw/9RX3jctt9OYWNrt7E6qV
+dv2lf1Yk823ibjKHgYHb9wJqxpn83Ev8KmmcG7aEMMQzu2V3qwXiZIahFz73wMy4ooQNYUKqY+iq
+owbg03v+pEx/3XuEDA9U2bbM5Tgykw+jnAv1Ikg5Mp2+plTzGpO8Kd24XgX9QHfTbP3yMHOJU+et
+uffFPUGZAqOUrz22P0SZJcHzhtNa6YP/ZeXT6B+5wlZ00lQYo4Uxjr1KofGA5tBXnbo5d374Lom9
+9UGBe8v4ZA2t5CwzKozWTY1y+SdqBeW9Epm0/Xc3t9VMUjWMt3lTZ8Rfq3pnZHCyRN6fO7RXVs2R
+xjGN2yCwCKbDWfs8kxkB4chKM0wdCjdji0CJ1URQ5k6LZYCs5EBf6SGfh5ycrPxFac+BqcIP9cWZ
+4ktBXl5n4TVU1XAmqksahVI8uApINTPDwULaJNmCRpvWLMUoiK5XrPrMv8pI3MOoEC/8V+Who9on
+uq0bwc/iDzm3FkY1zAfoZT1qevXmZiIBrkG/VD9/RoCfknJFwutzv/NURqfSS2vk7y9Q8vL/tQ1o
+Fxru0pMztssxCN0zMYJ67s6WY95tZZVq4dFjZs4dqy70Q0941VDilbJDqe7MBW3iexhqN432dIM+
+YncrJjdFEoIjg9BLORJ5T1RwQzFbolFf+cd6BcMdU/sGg1JkFmEX6bp8RfenVHRF95FYrWVVX23/
+80ey4T+7pDiCwik/vmHSBD9QPfvsFLapW6MZxVCm0zbLo/FsYYhS2I7R+buluo/2Q2ODT8+kybev
+PbMwtLbxjyDrOqRqxd+o98GrFI/VbrWt3AsI6SuM0y+/uIxYGFdIzibx2kkPuMd90RbSyKoJU83x
+Xp9/Tek7em9Qo6rosdSjNy5biyxajak9Im2mxpxiJBb5uumEpgCxo8wyVGfpuVuRGr6u2wLHJu3D
+5PquO79u0KBrwqb240vAj6LdoumGpdT6h8ZqjjqqOvdlIJX0F1y5m/R9Boasxn5/3TVIgLVE28Q9
+5+eHOfynbnjWlsyBhaNicCZfocziTx4m9INJyWCJVPFa8mor7gthz+6pipBn7Z8veen3wZFlqluw
++Sx6Ex7dvF6jaVRzkT8QVu1kdFa6YVKsAWQ35eRRmpYXykztneNDLwPxYRFCNvHblFs6OHPRntFj
+h7aDJgGlOhVlTmy6PQifElYutfrP8L5gOrxc+ttPxnCdZMXponW9j8a8+5W1GiqkipFb6JL5Jm7Q
+qEVGDZCLZ1cbdObe54i/NmNdoED01/fcQERtbnufwvB5nt/zXIP2xk4Vn/X9na0bqQB5pi+6G6XS
+BtAHpbtM7nj+jYQ9zJ/fEN55TQPg4i7ew42Vz9VvL7uOu0tVT4CEaEZnlU/qt7m7xwah85H0oght
+C8y/4QYBDivgv+ED3D6Ch/8LNx7+D9wg+G8D/wU3Hv4Lbjz8F9wgJH63rTxFhqPxf1/w+rCKBVtp
+R44/yhp00BZdmx7k/endiYKOxdjrZjlH7biTSkra/8CNpCvVO5eUI7PpAUPQyBtet3KHnclBYLfp
+JoWjB6f1ss9rQVEHa5bBDCNXoI7kVPvYG8MOal4okohqNe1DLOeHK4eSboztDBaQZ3VEv917bq/K
+Rnd8uvFAoNDzn1aYWc+lFhcsYGjy4rQpocks+pkN9Ka7HuPaZbHiTkmWGyjQaclrKKvXdB4Mi84T
+I6VPfrLUDMw2xwIahS4fePzhx3whJKUDlDzG/dDUrTU/Bv/+8Vx22hBtTuJjlhOPCKi4dsZErRuH
+vW+MJfzcBYWBvmDc9bttdjtroVFTYPEQmRE3mGwyEO0t/2ru5LqZAPiWUI9CVHzA3tm5n41xgQuS
+DV4bmkKD0zHUEkCwSjor16wQBYxmjbw8gv5PI/p5T2+1hAMpmIpbFT586QBzhK5XvM7+venSHz8O
+6ylK/w0b2OaGubjbwGEF/yB8N0FNJrGXxGfcJZmPXikPguJC5z/67Q/8xAJGBZZasAAShZp9V1qq
+FheMdt5Tz7vpQZx/DwLD65+R2Zxau0E8MK8NMSqQg9LqO1uyrOvBG2dhzXugdFwrKKo0aiaMBeDZ
+W5xbqm8fpM98Bu96AMMzPRu7aEsCdOYsNWcXm+4qytrf8JMNLIDa9Bzi39pId3BdRfjhyjZesU1z
+QVWh8OGP5gfECrDY6VNDxbkLB6XH/KnUX2XPTV7bfQiSxAJEPrG8RYhGpSOwgGQbIqReWahUuGsv
+HAjMuvUOMYdoor3OdcfBW0yQQf1vA9BeRzwSI9WY2nVdlGVrLMIadFc84KPBFZ1yWICJu/u+Ie12
+47Ch1DuB5XqGL/sJS3qTTmREgZ3hE0Ylbqx4/GQ+NlgA2z/RseCJBfzEdCMa0C+euv+zVTCdevcG
+kN4ALCAxavEG45p1bhyVK1uVEwNlMh9KYrb/PrWIBbz73mVz81233PrbdHftvM236yIMDiR17KZs
+6rA7vi+EKTV8bfHCYYESC/hgnw/eNlMBh10E1RcfXHfehYmegp+fXUJdSv4DqhVz0l/c6ISGS6tV
+brTms4GiJFK3Kfg5emkpwrcMvTTPiWrAuNeO/LWO3z80FpVl+2SzZW2TfIwFSB/AsYB+SdkQLEBU
+T4klffjSiDX5hLaCIRnh854Zup0spDnQ0r0Fw3NUqMy9/gyDYeQsIbDrJreV2D8S4HO61Q60Cfez
+e70fdxMFiB2kteTdPnntCQA0B1FVyEdUIQKGyKZOYgZzuYgPL2/rnVQfvwBjqL8ZNn786XPTvQyJ
+7u3+TdDZc2MVt3qCBZBjAZ8gB3cTiosFSPIHBrQucjGVPRWKg0nz8fR/lK8m/JCg+25RQ+Nuob/D
+AkjlIWsMHlsQGnRWPIYBRhS952CjY01Z5n4XwstwJtXl/RQe29arIuJ74GIs4AHkDz8V+HTeNqCW
+2PfspyRhrCS0Y72bJubxDhIjpA0JhUwwGhlHifdpp5Y/2y8wOrAtg/DfQraVW3HLWOu8PeaoBYus
+7yK9mYLpCFLDwgzaqmBfi8dTewJagETe/sAC0rexAL7uM+ky3yV1q74zkAM+ONfOkJmQrx/CesIw
+h/58a3MKUVFUI04Ma6ha/ogvw//aAVKT8ejzuB4WMEDKd3VhLS+g/veTBP/EhuujrKjXZSHHgytx
+i1r+/Pp6cFdGoQYkb/s49xRa4G7BnbYv8hntc2ifSZwJeAr3B8eVX65AMfeNPOuOpTzuqqcQwujc
+GyN/N7jXMPTKgMBpVpcklOC5OlsZsxoq3yDTAZ3xhNeQfLuAUy+ctAlk/NeoN+HbLaCNB6m7P0j6
+Coz8bkJKzBCp5qniMred88a6OjkPbXPKLIQ6ocy7pckPiXsFVsUC4nF758JEb/Hi8iHT9ua/c3vO
++qDDlkGJRxg/+28fK0vcNm3w9IJjrby4cfsXvJ27B/tWz3iCBHTimVHtSJtl0AgWoJTSK57hdoAU
+3Oc9NcqNgEX5DhPExmte2yx3r6G6Jxvxv103z3LgR7WGnryFyWABsqs7E5s3GK+zLyF8iL9FrsWy
+c/akMNKmecUclJADHSj3OYtgiGDI+5XvaMiYy0LBnVhxY7gkjMtrsYB1MHyYF5EMfcuc98AAKVmW
+ateQKzLZ0zt3XYYFbKXz5Vv/xmWvqnyABUhMYwGhSBtTLCDj0/NXutCJvwLM4A1rvqXutVsh5uOQ
+F7eWYKqXm5gGlwgPMzki58lYqHhvM8et6HXjVO09IEIierv5FUnhi4GlSwNHeaO0ZPLbBpQuDdo0
+7iAx0PMNhB+H8XZRGmSoLCrIfNKN6aRpI4DLbpYvUrrr4MUB4d0XmuEE/sW21kaO3ttzzBEYr2OJ
+my9DAxUaFGIcX7K4JnLMfMQ98lpImbt4X3rjRvO8lj3Kkac2wwLOKhunuU0nhUX1kofF1TEBXsbL
+DPYWDN/H6xn6bwUl1S8/9aT0qXVINjpMmk1EUmFsOnoRZh9jqovANybyJsDzcywAsN+6Xu7U2QAe
+Xz276QTxo98lz17Jecrx0IPEsjjqgn/GHxeyVl3YaOxkFgRFuu3sY7b0/95JpeePD4kNd9Luwifv
+GdHorovmtGtcRNYEoH9uB9GgudEEWMCFdY0mATLYmDOB957fIe+O9CbGA/phYbizm1pXIJlK9KNW
+0u11pxEW8B0yeHonBCaJql1YAOdVHHrqstbdq1mQdea2aoLDBha1tulPk+eLzodMPEtd9EBnvsgS
+b6VIKEpb7Ba6/jHsBjJFZ8VV7L0WLY/FAn5Us2epqHOGmDB8uF14HL2sEnNoewx+F9GqCgtpRrWY
+Hb+4e8a8iAMRWPfWYfSXOSP976ceNx+xgMfGNLY6K69i7rN1A9qDF4QBNMsOiyA2yhP0Mezb3Vpn
+em3HCUd97LmTzxtztN8MLREs7w9oge21ldgex1bD9aKVb3J2+2oqqnbpMYaw8QoLeChvbLOGajGi
+asr1lYwf9ZjYhBRUb+JL6T9cqmlCBKRc19DaVDQmb5UwYMh//bPhvjr81O4HELYVaIgTXBKa2mcM
+zYXg2Jx6YEiMHLqRr62uMpURl4Y9XXgq21tT+Og8MTeLyXu8U93z8/T3E9VGbk12JaZlhfzg/Zmn
+s4uJ5WtoU8ZuqvZu8SzVhTBzzrq5LCyg50cnZHAi96sz8GU895xqe+vEXvEh2torMuQgeQL9g2gR
+M6PqrYhqmID0MPTfxQcIqUlNKxYLOZVAX8VxQr8AMZmyrvEcgXm/UV66O1hA9AeDwvhpHXxKqFgd
+Ny+bV028QmkAoaCSyendmrox5uBHZ5wfGTmMOJ3j8f0pHkP4hyTvmaKuNOogZ9UEpo3bbhKU45kd
+vyFfMNJ3ecDvENQfcgl+eTd326Mg8Jw1BjrxG9LHgZ8nzB5nLy7AESqrkpWFqm+Btrs2Ng7oJd1D
+D2OK3RNnCwpPmvmen8UhHHxzbUS3XNLEyb99wRGFezFH+0jkpj1bsegz691IEuKLWrUtFPheJw6x
+HwgBbn9iQczpc9L/OB2MRn38QEnU5TFA2raKHID16i7nAI5lWhymxmeyNyZeESMBwy0sCJK/Qvvg
+5O3tRAUMc9x9RmnTqG/NID48Dh9Za5GozkVHhR+LKrMHN3xNq6jFUNzWaltkWOyImWRqrcnEnUv4
+FcchF3ru4BhyuXrqkne6dFtFRfcQSnrnCmzWl7EApu1U/8qb5lNmZhfe4a97pCcizji1Lo9E3ZVv
+mjRLbYT0zbcfityAYDbcCHN17o9xacwHPDe5TiIihcC4fsr9chgoS9Fr60+ojYxZZMGFUuQD7/dB
+4Dcba7xvjyXF+G03ySLwN7weu3CD4nBvM6KVW5G1F0775fBrfc04DveNrW+oVXgT2YQW/SGVQM6d
+2uPsd+1iAfeg96LjQ7mXnrp6+xsw+fg1PIY4+QetdElcMEi0Qt40q4z7fJPvcVoNyZMfYBfYhqP4
+sQAc2E5QXWfvPA0l7Sj8/NrYHKo5Y9alEmK8t4YF+NE5W9BCKQ2ehsV2HbuvWC7vms+mi0wUbnP4
+CFM4+qVwfUeDc2WJOxdHq7IEedY0OS5je7zFjxxcyt2CObEAjuPtP5aEhvU0Uj1Grxw14SUqLwkm
+q1kQxMtc8b0YIm4bgDvChVaZX+6legp1ELOAN3XiSkYoyG+65zaeIjfZ7F4Kftj8jdt2VdXB/Ymf
+bElCCUWE66rx6ytBtWWcBlxBRmouHAirSnUhyrPyNS8+elHH6ZkP/vWCphpS4k98/5xve41WKz2W
+r8EpIh7Z78aPlKxZavlY3WLiWE1LRazu7/2oOHTiWiIcJUHr2C1q42hzzuwOPbouf+ITLKpFyd5n
+HJHm8q0byT4mK8v5YPqZHGQEz6xf1ni1dY0vnGBoAQuggyPCfKs/McKRkPV7cicfJDTYb+u34tmG
+wn32U5Xx01SPP3oTmJWdvnvL7DAzgzBOQ2iczNGH/LAei67IdD5TDJ6FCz73u128f18BCktlZzEl
+3o+jaZ3REB60GQ2n/iIg58emDlx4IR8UZyvoovvWljIcnIs78kD/0/idZ3IYeseSZt3wuBIuYqdk
+IlpNXrKlqT4sAymHdw6jDWcVYpWnW7U/nosZyLH4js/wjfC6p9NnQ/BeY3SEgB99iLa/PPvuh3Ci
+rnuswEU8fGfrPyUFWdcKGPCrvCgZeRB1zF9/Tjj0qpfy1e5suHPSXo532n6d5FJq5xjeCQO3bKGO
+oHFlWqNeP2UEacDaBFkz2Twl7oQwiZ7Gk6Um3P7MxhNpdQJTWHn66wQWeh/efOb3nkYzI7sVpyWL
+0eTgqG4CPPJHjmA+5yZRyqehorKqT91BT2G5mw/klkKyq2H5n+YTWro5lGb9nOYmyFX2vR++kOuw
+5/G7wkdtf8nHuVxgz/Ftnuqg8JZ85/tK2J7ZtUBSDh+voNzly0ecgeNmtDKYwjkc/vpO8ajUSBNf
+DLcF5M1PP0Ega8qymSyfp6mFUDpw3ZjeY60WCZsKsWtwlUUpDhm/PTWMwgLs9Xh7OLopkZsgziE4
+hG3te216ilLr+5Lj9fwmU5/JciFuG+TL8DjGA3vFRZFA1S4eQ4fLl1Ifgel6oU8oJsbpA6OB3Pp6
+6XP3z6TXogpOaXRSJX332bnSnQMeQQhlKDkI3wqj9g+inuGuNSfX5semp67TNX2meKV2LJsJ5HiO
+qzqxsrayAjFUerJLvIYREmfPWvAV/bVXsKjnj09Yc+89IUsFMSJoOt5X32nIKDz778fgQB4R59U8
+dv2HswWLRtLqyqry0LGJp44/eJEjryLOs2r5sgHShSG1u/b9K5yhZRSVO3BwWCR7gk2I6ouJj5Rd
+emUpeFl48MFAzQWmSk5ilto4hdSJfn+2O4GP8eR4v+3TgDPNnoDbWStvVIF730AkfWK2xDLrl7O7
+/3DWR/05emaepevq4Ab48Vx0bUbSbqSCTG2EhsyT/MWluuqn2Wu/ZFRQvP++4PUhDivgv3ED3L6H
+hP9LNQj/54LXh/9t4L+oBuG/qAbhv6gG0c9ndL5TZDj6/3fLBmEVC7MGAZmfIpmgPITLo00KWJpD
+PxV1GOigX/u58U1WPib6P1RjXKPcDn+x3t1Yop0c0OGEbmvUU6Rq86TsM1J0kjgLrTYbR7INdGd1
+9Kpq95XmvPabD48IaDvdEQh6yiZGZ4fhfTgHI2Scb61Q4nnflKB5JOoydXiumnNPyvIX2yP8SAsD
+Ed/n8/FicmOqJr7eeiyh0RPWr57rwazxKT1Ep2fcWfP1f8zo2C5ZtO0WPIoBfEO4UKQluSyfk8ku
+U7oSPBcnexQsdaGMIxSXJHluyCTn7soGF4EcGYg4VfTG2V6FrBlqXVuDbRCAJyWgZFMv2gUznCr7
+WxoQ1zgh8Fdx8RCNYXKtzO6OG6NxaC8Q5YOfcvvRMdHDhKx4FPnpGTPe+oROpLQ8QZSGHu7WbTC/
+qcjQdFM1sMLD88gM4IwXlNgiFA0PgzerVcltzXgKDMEL41Knlqy9JQe/s4aP+tcrddJtrG6vbRAN
+Ud9ffoIFCFrk0x63tw7Iyyzwpo0z0sganSVD48mZHYl1iX1MuXG9FQhrXAo5dQU265Keob///BbP
+ItUmcdsbZDbD7A+RsrNyM43gMlAXi/Ecq8YHiQ+l+AQ8uP0L/Vq8BFzW5Hk4S9Uryli6HF2iVbqF
+0A+gwmPGJ8iCPmZ58IDGMm129+nfPLocUdFkGFcrfUka7clNS93pXEnVIVm3nwje6cyzWO/jAZ4k
+SWrGGmXxc9w8wFz4r3JR3Y+5be7Z+v1ZkfER8L4ddUL89I8RlvRXFiLIGuINR9Q+9PHbGlP1zhpP
+Zkn6KGs3OGLmdCU6JsmVYWh9nt/oc89TeLPFn0UzceFs/cr7XYI+IddT5spTGKOpvsvyrYdZVGSq
+hhNsGviMxraZ4mHuYBqi++iqRo1Ay2a0Ub1KBJmiRoji50hp6aDE7YrrCloDNacD72i5z2eyrw2c
+BnnJCNX8ZuPJqcmTwPJuyQRX5/WpknqPXFXUpQvff9pWFLPHAtywgPr4oWHQRUAbsjN5lOhpTX7/
+UsY03p/RbbpfI0Jls4FgscD3BFbACa3ghkvMsegiRMOOpRexd3hyfHRzpw5VzK80Xscuc4/Pwy++
+20fF7vDQ3B/QUkQMKM1KvTL2DyQa59WCvFtUzLA2VkMg+Ug5dcKTJ5zzkYg7nwqlAvTpcDVm8RWI
+Ss5XS+aw3lIs48IAHhf+dTg7p+cSqkLjf1sGA5zyW9P6JyhmtHD9fu9MZOdSobXPbuo++jatIkz8
+eH94vjowr0wMOHKUIZQHTjxA24cWXKn7edmjyq9sl0rhjLn7FD/OlQLo8rsf66XepJ+aerJLkyRJ
+dmRWcta7TvHQZf/tKG0ntfRUITg67vOvLtVlM/9tED8lXz3Mjd/t54w7oc7srwVc8Pv1lFmqjvgq
+HnBbh2sr2Kq7nFqSwQhI12KpLqdhvcsx24PIXjNRvJFbweE3FSz7JY4HzwVomU905fxljTlwEt5+
++ilHSlgpO2kVx1onQaxfyE7ZkkBk5PCHunwyoPKIwpn+RXeLGD6JaodYZh/Tojsz/YWov2b4gHnV
+pOWfBKCf/uuRt+Zd8nzvqR9VwuEAoe37WpiGNtsh/Li+yogl5c+2W8+n0kTPYGIxwWZNvMercf3k
+6y3fKrCAcsm4Z+ohpBRT7TVi72tSdE7I9WYHOtNBHUK/fVwyKdRyB051E84VMYCSnXHNZ3g7TWOY
+Ku7eK9iTzb9XDu1eI7qEUawJ52CqN8dYgIYHPicA//rUsvPpZRLwTUJiqlSMIMk9xAWmXnUDKcSu
+PFi0v21SpBRxIytBh1S5NmVm1I0LX+uL/YP7saNI4scXOw67YJvVbZS0hGcK9wOEfWUBLkyLzNba
+rcSWnMHnEkLpraQR04Uf2yn43c7f7ZAvytVxzgsoKiC5wmjLCsIxEsWBmvgS9YT416XjxQn+DqkK
+fUOZfivGTTEDJt+fqu9QhQc2GP5SisvKrc2T31tYdUzdSBYPNy0VnUjscQH287JsBRWCTXSRkn3g
+GTu5ykG0yGzbqRFxaZjRqaH54kgzqXMkAgFbae0uv+T7gJccutXeedocdKIKY47ViyBo0RruvX+B
+BZBT0syYscC9fi8vPBwQeLyx39Sf3m18RBEhGtf0fe+blB3cyY/YZy6I8auvL26pbrb8z46bUqJo
+XYmUfbODfFXom0ZjYIwHNIfx55NzUwuOI+pxoto4JetH8TSzdcUPkGh9G4lRXRIzjpXx1sauZ8Hz
+crz3Np6VMPp83rwrLhziSfG2mcdEt7JD2A7tdfK/yBtdxy4IOSxnM/9clhQk5nFaWMrqF2Q7rBhL
+coh7twAWdCdc89FJlmBHHDd0HI/B7GKP7IibhiL7bmZDu30SoEsXAUkAUi+zkJ3jiWz3t1hAZOZK
+NxV53nK4st2E3KxCFsgUoJX3l8r3nmaAvxF+b/dWIO/Edj7KecEntUIZqv6Ma4pPhTv7vX9z0foQ
+emE8pegKpbt/dTFw/l3/4kXGNV9poTXlqMLANJHvitLCe1ZYnZtGoFDxlR0kfMHQI5tiwQSy3DXZ
+QBo9hozrrqSD4tRz94KnBGsOs+fvBObxQ0lk2Ueumu38TrPCuh+JxA3zn0mSt3RzpMjFVPJ3D7uq
+GF8PQN7F/nwcXCI3P0kSWVCagRHi4LYjVzR+l+muw/hFOUUGL5QuH+lnT7zimlz21JLRNJ60Mrch
+fnbLyrLkTMlb8Qrec07iwYrz3VpCFJe9SdU1b6W3WyXT15Pw9D4WIO8pBSU2SrPHAUa8u4B5j3BT
++NpJ1ygnwRLVP7zjPmri8jL8lnlVeFqWJwzkW+qs5QzUHaMq23HpeKTIIpPYrCrSfyX2fmdC6bp2
+HkQ9594IKmMW6H39nW2iMLBex6zSJa6bYlg73Xu8uGFhlj3IjPYTAS/RKl+7euNGTXHysh6z50qh
+tABt5YfOomh4bAoaj9xO3dZDEt4nSiIEPD5lE/ILleVGj/w4XBBQsLyyvVJjHR316fgwDcQXYlp0
+TMdXdJTz0K7MsDqdk0gpbaThe0/3TW5KWAXfvXz9uLH019DCdOvO1KWeL/D49jKHEdyxuiUkP77a
+lPiGKtpo9QUrd+W3jicuW13sLkALH/+iXGvoBn9jxBztzolnPkt/aXKdq14ZQi+fQE6P6m9DYssm
+FmCKf5/luA8YIyj05HsmC+LxQED92wiR0DPzVViuxGO9aLfGTZwmuptyHBECG/B5pHazIeBNkl0i
+QuIDN0eIUYfvk9n2gIrMeyEhq0bsacqq9ZzNs75WnYVVknhhGDcvgrjSvZax5f0xPiyAMtKZpum+
+NWuyMd8pHpHgs1BblM1he2yan4hJ3KDlzYWTOefHSodQv3ap36MgpMEg9LzGNV7ZMR/6a4r/CguI
+7p80ZAmuko64MTvHR/v4WJPg3v6YmnKr6f2u7+2PrOgvH2iB5aQvsh8Xh30vM+4fEeKGnl34X42W
+b0NB4POnoExj3M556hn2Avwazz1nLRsndUeXc++z3meyBbd6sckxaLNvO/37HfN7PHiv29dGytA0
+yCIAmLJ31U/nVVEjj6z5Q6Q5x0WGogPIMJ6+fxK03q1v57n6B3//44MnXHr9t2ZGWADxXCt3Rt5d
+lelfSrKuUz056fgdCHMlfDP9iFSd5IDra0hDpsLW1yeBZMYpqNoPjdoiGggJVnncoYs31eXx21yN
+mWEy3t/nUk+s/hL8prj8/HAfC0hzin08flhjOHW1qOyvkUjs0bOBBRhrghHfORXSfAaDDMbtp/i4
+D26a7AhQImMjSHh8UOQkuIP6rpQ/tSwlVEtHleJDLwjvme4PdcgfzjhV/aaUC2o5t+0xywYMQim5
+eotZ15kzJfA5hUVHLuBNXkJGhvYj+/UzD2WLXaYLejmnZOfrGpSeezGJDY22Jv/BAn5hKsosAuGT
+M7pIzmJJuasRCmRRytuBMdiWM1fd71rcX1jAVpjEOyxA2JnnBDo8C4sGpd/yihH1sBhInZhXa0pk
+xZMyBq1wa6iKDVumeZ8uqp0LnEkeLLwwY0G2VfQxCRvwn9E/Cu1/44WZURvjZgp3y9gbs2siUeOo
+0lephg69YzBqVB2TutIhugWLV4L9GCmuTETwNLZ5fMUv/HaPtCVNuXz8rZUCY8lPG86oSbt0bZYX
+C9Ym4tL+gI57uWTx4XvILEjcBqs/nfILQO5AWVAGj2COqYNLrjbv/QqoQInQNm9zwafsuSucc3FS
+tbT09QWReH0uGNwhyqYPAf5y09jFwO7KzI0wfEVRAtoxoUG6HqdIspR7K63NDybzV5HlKwZM8vcr
+FYC+pPRhAXhCz8rhdGUcazoZcnE4O4i30b1x6gSyw+YMFHvdmT1j1uLn8IFJlYAu3bMZIhVdWh/S
+M+dCTbmfUJQ4w30yh52Lq9d9okK623XEWgMnlqs0pmuyUyrW93NpXiP0BsA4P/cRnVorocFSAjVC
+D8IWpDYwIqkczsTSqlf+TAmjF00uSGIz0GsJU7iJrsB2mC/KIEfqnX/j4rq8u7rGHkSiwHkz4zd1
+Jpc/FoBU1jsHDWal0Fq+fhDtsdZgfbh3TfaS6eUc41z7bJK18qyj1E5OySNFCK6Ox2sMCdgO5hMT
+R3bPJM3Xn7jm6r48affmUnNEDmWQlZr6cqSVzahBrMq4KuUJSeMEykHNJ0YSo2G07qh5KnFumEJx
+hta0nwzAKAYbl4FFBl/Xhh17AfYv8+knFNxB7cnUXjVeQj0d5lq9iOmWlNlH4hM6aRqyfooPSJn5
+Qp5XTM/Z7RqtxlNqDLfU/JFxGOjV1fzWiZf6s7HhYH84mXgt28B2pOFbfnILZTl8IIShbQAJodLu
+SulGh9Sb/8K5k7yH9UjTlG6268IomhBey5RYMIpwT/QIDLxypAOfb+pxreAjXDFVPTEx4SOwmy52
+B2Rfeab+dPK0ARSRnGu9CrrlPaLUVN2J0/+YzdsyZaoSkIkFZB3VCGgRNUI5zCiAxcV/hI02zqY9
+P8wG9skF/+m5x/Z6aunzlpo6l9v2m/55T0P4im7x0NhpcUURGYZ+fIKoz15gxVijZYeQSQcoaWCY
+6S0bnew3PyXGmUG8yQ0x5mBkuW8CpcO0TwgPsaOaTetUpDfPlmNsHPPww7MG6pLvra99NwKnJW88
+X0ekB0bP3oJ33lmT7j9Lg3ud1pfGSfE31KakThy/kFVf8H6dTaU3aDPuedPYokzV+wSWtZuaLG2v
+eNaNeOo6892mXXwjLk0uvWuPlrjxzZsQ8Jo0bHHkBEMT27jj90eoV/uet5lNdF1K9rFABNQ9Gjyd
+HahRFkAA+dWq9/rb2O18KGHuqwABa5aCJbFGhKMYTUuNMiiWn0rqfV+dRMJJIcTmtB4PRds1z0P6
+3JOCYFNv1H8/7Cx+4G04K7eI4/xwYwOzoOziIwXuHsjZxsTsxQPNBpYsa5/KP+Zj7TbRmbEy1OC1
+Zsqic7HO9r9uyCKJyEdywHPrGhmh9oe1jJVd6ePg7/WiVSdntQSqNFG9hKHrccgOEYqtkZo4NTgW
+IPHvXg1CHFbAf+MGuH2ERP9LNYj+p1eD8L8N/BfVIPoX1SD6F9Ugpidf954iwzH7v3s1iKpYhk4K
+j0P1wdLaLy4EmT2Y+Ok+RYkbvUCsl2oYVmA5fV/+h2pcMkNRf9xXHJXOu43lGWJ3njs3KmdzRzNq
+mZ8k1AqR3S6l7tGksll9miDtjbR2tjGiHLkUsy2d5Jultezw6AJV3tL87RL4yKasX7+EBcSLpDbn
+4l2uiKIjU5eZ47l3PhIZeZR03CYLHX0RMUTKMtdiAU/mZ0N+cLR6iD5mhVnwrZTDz54PsMRLtKx1
+7zvJmvvGjCaEJtEPM8pyb9X6WjWghQ48cE8trR6YCQwz7rnYjGyPKR4kn0BMyDy+3c5LPgq3aVIY
+V4u2OQbHE/+R6r3S0NK/jJM7qaEPBg1eXNjJEtJhAdp0P5Dk1vsRW8VvC7z4KifynuLxkclas/cY
+XOogWym3Cyiffdle4srklqcSYDcrkbKEN88/n7XNuaGpyh0iQXeHuJIlt9NVNdHXluVHI9FmgphD
+lVk+uUhYyxIz6dfR5FCXRbFt3G1X0anTCtGyXOiOAbgbCZn620HWPEhTyA1H4FDxEJ0bB0ykn5t1
+c26T6+KemZlAwmHC/XzCZ7JgExyJBeAc4K58i//SHVO3qpn24uyFX4uRQPplofyrUbt2YrpUZVp6
+6hc472lzzkktBZsiQdD9M60+onntvvll7uMwXF9xzEmMO2cg7PcoFw+4+tbA7pMaamCoBeFCadXf
+i9ZrBrZCjFrn7kne5J52VwcEm7CEeDLK1iXRfjJKAb5Z1Oh4EI76JN0WxPr3bWsdFrD+LtdXg5bo
+TED0g0a/oIAP2Y18ZOy67UYsagcL6J/AAqQq8111/4hVB8sulYFkLhCl3xKhj1Sv3jxQ7aZdIcJQ
+w1bum+28tTCdeoFeIUN377yS1clSjRdBJ3wlVC3JIvwTbIAF1PjrqhzEnaDp45z4FmJif1ShFzDu
+rgmKJzDfLbsAd1Ih8ZxOg4mduEnPrQR5fli+p8bN4e4B5Qv+3Po9fHHZxaelia7J3sjGZ7L4Fccc
+aNNcEIcx/aQ6tYuWIvcjcFh15MN0zzDc7HO/xYGAU3kefw7Zro6bT30b2zDktvEctK0Y3w7UjYb0
+5d90crqfvdgd1f+5/8GLpibr3cM0e8xVmYCLmxMFMDOkjsPkqZwUsl27YHUUCtHm+YCMfxTELGAC
+JSIRcZR7CnzjpaCFYTRLDVKwfGfiumdJ8OS9Kg2lGqijBSqz1gMc/PkV7gehc3YjZZK/vNaslS/0
+QMnhyfYjOwgpbCE3V2aE3D3xUcrmi0wUoL16rW+559aYOs8ktvJJ9dgShMtes02sog38XNkgNe92
+rI9IZp5COeqJgeJvkWEaX60Q3kYt0rdnUCzgwW/JUzeV12xeCQVXBsPX7T/kH7wy1pHGwQBB52Hs
++xHC0lcaEx2qRRgnlxaXDIY2mf3+TnMtofdX8NLxR7ovHBvEsYCWZ7q2AU2rIT/+ECIrWvbrq4S1
+uiKWfPz5Epn5NYDm9j3uo4hnxyOdnp01AZbXzWYrsNK3SOHnLBNaI4tLgqy1do2d7xm13W0PTtFz
+APSTkF3vhPSPI0Htb2zPDoveC5DT/mGgCzCEJIbK8rgNB5FiAeDTknvHTCCnjdOUEqo/R54ARjhv
+mqKO/8Xbquu1jowfWvNBVN5Mxl+ceLpGjr+GZKP1hvpibmeiv/DyzA1Getjqhvt3/eL/BfbFCLEG
+C5gZTDVfctjz23Gsm5dycPw8w2fWFJEQrpCFiKsEjyj/UJjKc2Lbs2KmomAO4KgRiZkV/oneLvfm
+tBbQK42gN5PmvZxA0GukstyaP2p8J55eWCU/igMKSxk9nufRzBq35NIL/hEJWqrUkaLy/3aoDDbX
+6KdF3zmfezg6YmGN7+CJ2clvNin0vM34qSgahW1FoMZX0R2PiNaBI2OS/tw3kkWygiZDBxrnIfxY
+wOftthHEolRpWJycwnJ0OrLFTo1Zbibn512l8iDrtkDnBPY2n3Ikw3p7YqN4ZrvdjdvHScOkzead
+McwoNbiBrzSOKaOaYCdUWQFKojK1jQWE3VmwUPLfEibsUtvF9sVrsAVT880aozQjYSmhhG92F3st
++SGHc0zGfnFtmtGmprEgbuRP55qwnCyL9nITOR4SOu9Rl5o83Ahy8Q5hXOhbVypO5Oqyl1ulewuQ
+phsV2mLriwWcK2ABkcN/g4xYZ9bP/T5expPjbfgdXagXetpsFF/XKEMp4fh/xFzkLixEfHFdjNXn
+2M3Cgc+6mwuddruizwQ4dtCxAhwBthhCOi02VvAbQj8K8gRIH+AmNzWPU8raKbuwdnhF/Dbvvma/
+Bnm6/ZgmWa+j7V9LnvslhZTDQBkHVJXCtX6k5uDSA2e+4ylC2k0VdSLJqbzfG4FgziT7a3Ow2GOt
+2LJXckYy+pDm5KafE2cUNpzWNefO3y5p1lrU97LByp/7wCoo5woeyI02qOaZmSYW4C0EZQpaFOvp
+6LPyQS+BHaaNDD8Lw2WvHJd7yXAnejqg4hPGOku8fR89dFHTfpBoTIAMjxZJF3QOtq460vVufwxG
+A5M7sCSBuWDIOAXdR2O6ZqjUtOkRCs1+KvtZ7RfWL0FgNoM5ZjMveDHZUM5zwRfSU7XXGe+ykVSr
+TpA0uSU588n5zpPexhRyCa+Mpe79/va7WdY1HQ82Q7pa0rI/dfFjIFUu6VFGYQG55u+rwpfhnVuy
+vbfL4GvQNYm4XiawSdaUj0Peml9ks1Oxt73tEaJ0PUSHXmxgFX9kwWEKDsMMZiSGCL4Z91KoKIlQ
+eJQMoya64gwDTRoBkiva3E8rbCjTjH6GIOPEjzvrQH0vwvYc+7jcv+jf2PDbrnHWWFCrtW0wv5Ao
+rCIoQhuC3/oeBw4OwB5aP6SIqo5ApdsXvY9+iksndrxgPUF11Qa7T7T7zlrsJiXEjNdXtQRyf2YL
+PDTxXOvV6gU8C0FMQ9mS6VlLwFwRrZ/laPLt8VAqJsgmkChrd35im41/kFFf5Nd9OmtVM2tmPmEh
+AiKwvKUq+uEg4dW6ar1Fhy5q4czmQQIEgWdgHoQF9F1D5pJ/oukqHWzAZy5vdH4jxhvA6zyrqMHB
+fJzaQJ6OUkG+mWeSRy4VqjIGKmBUpQ0lTiFN5B95pZ/df5sdnCfsP0vKafPkoV9GSPn5I3TE20eC
+59abJqqCndyM4G0W5qV4NqfrDT2OqOomZHnExun5eywg56/p68+uOCuQdWSX2uUBzdXj1OXu+9oR
+uvGFDcC8tAdpj8prV/lvKEDL8RbjLv2ExRiqlVfFKw1SrVC09GHh50aPxs7aWaURi6xswYYv3orR
+gvQsAQItPgU7n2/K34wB298GaomSpd5ahVkyZ2+naV7qdsRkpIfoI5DVG4xkEInXh6vTjbApVmhb
+RWoyy93fTAnaF8ACfkwY2iCfurpMEO90vS9mzVSObs0bQ8GSXTBmjIci8oX9/aZrtgpzWMAX8VWL
+SDqtlh+E5/ao6keX3+cwoLNX0E+ljjeNufMRbWbMbhLuX5rI3wdB+hVV4P7ucUaranO8oFN33gAd
+SJ9Q7nfppPb6EUPeWEz9pEsHnO++LhBvrdjDWvv17d2ryirposSbGP4s3Dv7lvaWGn8H490rVJ5E
+fat4F7znWACzGxYQYIoFEPeOcAZD7/JNihdmK+ceQ1iVjUTStBgzDRZAwxhKLpvl8Lkkw6lDEKN+
+Q/Pi1cNUsClUpa9+Bx2Kc4oFjP+5UPogdFNDOmmu3tSzsxDNaOp8DIgK8S1xBj2jDnRN/Wcjc+xJ
++alIQ7cx6ld/2XjVs0v8HXdShvhT2Ebu8HovzWithVtFJYYGw3Fu0nArNHrlMOiF6aM6iXpHphpH
+rjYUd8+J9LpoGAvI/hpbGf6C5cYFzevfviLEkF8mEPiTiluGh9xpkF9Ll9mVWYBHYO6jj53AwN2L
+vZ18+YLJsqbleb8LfPKmGnQut1XIZK1S+oUmIpmymsXb5jwuRLz7gMf2GRYgXJ60AB7Y2P/kQORg
+KkUqL9TsDCdea3itw9OGH2AnFkGIaM5f6sUCXtx97qJ9p3YwVlYwBuG9+lLb8sKAjQ69sMlZYG4g
+QHL/O9lSzvY44/yyJJDOlG+p9TtY4uueuPaW085FzdxMXImr1uI4vyQ9/ozMM8+eN5PyVRRwBy0s
+oMAER++YFYmpHz7jVJDjCJ5WS/lL9MWt4lfwIWR3fqnFeQotaP6PWZ/O/87DECkh3WnVvT6UcAXf
+EI20rqd7h5kIaFW9fTucT9hVCls+Hryy42DtrEKYoqVcGpC53+BqpZE8XZ4zGxitu5R2byvOUZDy
+LkNlmdBUNHF17R/BRcMFSIxLkRJ7oDjaKGTLbiEWsP1E+PkrLzfNWxf0AeaR9/FZA+gvFlBY7T/U
+r+V8ZZUk75rNdwZuqCrznSbh8vRqQOUlquzx2+N/rA5WpFi3sDD/Hb9zJOSHM7a4umFEOfVFCFJ7
+FhPFzDWa2T5Ku5oCiwLEYAGKcNW1jrf3vBVhsUvJph2bU36VGHwsoAfXcMCad8sI8fyTuxCJxZKw
+r81ymj4tFpA/sVXzbLdRwQ30Lskzaw7jgwWwirpg1OK3yJms5mIhA9HtWwKzmUApe/AmDNW1HxYB
+6fvQEDaMbLkLeAp1z0DzgMaAVz6Zqk5T6BG1CkfaH8BhG9TYMcg0JCBrYeDXcUJOI52PgZDLq1j5
+YMjkC3sWD3/D5GvHj/uJCqZ+rMk2p9/lPQYKD45KZ6uVMqUESHfM2QbOvUIvp5ZfT2oi7mMBdwKs
+5OgOOaB5+qUYVW2HRxn0zuUQul8N5gQvQO+ny8ZWcebgQDFEbk/futsUj/Oo9CvsNy05Fkxwff5q
+4mZy2owFJD/1XGpTciq3OcUZVnrhv0ID7UbNnaDzu4wGLuBVIiXzyagn5ne/hI8W5EFoJW60k4BO
+hfL6PpHmSuT+UC237jF/bt19uFT+OGc/BT6F1sjTl+gVmD8jzJO++GTi0zQxMndhq1aIbMxm2xL+
+kpwUw183nNiMjx6RBaNZg/buQhyfKn7U8tR0iuZPJ9tN9+0NFrB8970BzjtKVlH+EG/M38AvXlZU
+WdB9y4AFzHTfvsAClrqxvwDEuDiAe+S45AAAWdwJcqRufo2oEnZOCi/oF6u7uqUAUVZRKHeBbF6B
+kWV1js3RzkwNVtJOS3jEKjtP3Xfz+rpLuWQkZbzdxGwzNiKMnz91l7r17nkhd/X5Uf/trcRTA6NR
+2rUw8CtR9Kc4F3YS8bfu9cRIh3Mz8sBO0ww7h1Ul3zjh5FmPqAK37MXvTtoepxUKraZt1G3uNsqC
+05ePLGm+J8lD1PgSvwQMhmjJ9G81xtil4L3/aULYP6W4/IDmE7AleA5fJFJUlzSQ3fLBWIDRN/1K
+RP1o5ZORNtFI/eFpeJPfkkU4FjAcKv3H+fdtblNzhmlCu8GmiA5lcsoWnkrblF8fkOiLeYF9uvwz
+H1DnXwUB8/bT9UmG8v0A3ZGp+zItqfsRjRCiT3g3oj+pchahVMSMR0tTTc9VTdKJ8MaF+C/uqjnq
+f/dqEOGwAv4bN8DtIyL+X6pB/D+9GkT/beC/qAbxv6gG8b+oBgngtenCFBmOzf+XauD+59Eqlm+f
+j5omrDct91mrwJY6RlvVtExfXDJmeP5SLhK8epwa+h+qsSsK6rKKlH16yjjs6SW4au7PDZKskuVL
+sHWlErgZbCP9cTtQ+3acfrWdF/nZlaW+T5CqJTsrJI1lMqpdMyrseD+4EaX6Jau6MMFJyjNO3NiG
+075DJe4yme3b7Rte2qcJeE9daGW+kiMiru6x04Gr728+yFXm43hew7Eg3nfGjFodEOe26td8t98s
+F8J+SrC+6qcu739UAft5pjrtWmGFUR7Fc7M+2DXgighzmzDM2j2eumAneGb8ivQXZ8FGec+9gjS6
+gmj31rxwofIAlunNJQg5Qv7XxdcKOuAWUIQ2lkzCeYQPn68XJWCn9dIdh3+Y48yKSDqFPuq8algw
+5ij5CHIoVCQn3RP05Cw+nYbghkXm6zhsp1muSPMERTnu+qY2NvvgMc7Yn5gghXS8JA5m+Tr2KUGG
+X1YvLxNV4r5ONLS0CNW1UkTySZn/jJV9M7P6fJgP9DmFvu6cvZBoNyz9xAX+jAhfhsgzJry9yHfs
+kciT4ZDP9+MI9+mV1czY92g0Mzz+lJwh13l8crNVw56c23h8/U3dGx1c84RcZUjCgFjDpVnpLcen
+CGq0lN3O+LQ3/Ep4JtMzujPDMpodbmVYJcI3w5A+vvw+c01YMBD3s2czS5a7Y9pkFkmboKDdRYZ4
+MsSjjYzQFN9Ck+HBA45gIoR2S45tmujp9w0oYUHtHGmTU0sJs6hGa7wwA1u55lo7AAvQmwRATZSf
+4LPi+7f+0p32PVOGyDDc6xmCTwfTez4P/ibG1Gv40oT+DefW2l5gwa3EUknx2qmjTQwtoRYVt24r
+HujDdkLeDoT5zpsbFoMdfbSK9QNjud7yl1uLH9kefapXyonO/ln5khIOsfpuTuf5IG6wgWrzb/QD
+WFry439OFSTHwfnnXEGce3h4/5wViPMQB0DOKkymoG/H4nOaGlrcNNO7jvj/nSr4z41aZAlmHzdv
+hEMdNj9sCxfDFslQ2q6PshnTvStJUgbsRWfwGVIZWZmcZteizH/TvvkaeVP08ufLDtSfEXWxLIqN
+aYZdFVLnhfigVVXyNX/lm7nJFXK4NIIAX5/SkOuubATMCpJXTjA4LTfRPn+0Y5BFB2T+/Lm1MRss
+slGoykAtSmKGpxNS+JShPKWgix0uDtDqDvdMIxBhjRreggbqfhiW/NiLFmAISLwhX1/95NCsvFPU
+xA0sq1qV/RT5Jd1b8+uRxu7q91OW1wVeF3BGiYnetW8cyG423Tr/jN/y8syMbpkEuOpZRGpLSlkT
+aPynTYcZyO4PsgxS5AQir0RKCduAkl9iPxGpecUfKWUwv20nA/U8EBiJsnr1zbs9wAWW1edd8yLS
+PlBnaf3xxy7N0zctiVEDY3BlrocG5tQBNMC8PB0Sx5g2O+UVD/3NyL8m94dMzt0iY7ePj9tZYMnl
+/QK/rd67NYbUGtZqtVs5DKmLCco//or69lDXd0pAO7oXFOh/ivBMs3mCBYh0iciDOiPXm0cea0dQ
+K9DhjpfA90HKthqc1+Yef9tRhe9V54An7GwqCWICA6oDZcqeQEOCF9q/mMxag6aIEvzYDr8qPM60
+0RuVz+J82UUsj9A1fC3ZpvILRHGJBbx3jdltre1uuzr1QRd9Rjz2JfgC9nmFf+AqIvk7iplA5vnL
+VhrFo4ijWWd3vQBPykPN6jNPBWrCcKHx8oSsB6sEOnJTD7UmKoH41MxclopmiJhSFi/9wcIShY4H
+5HWcTwV2Y4Bl9fl/GJwZnDdgOaAMvHbSd07MXbQBg0/GBJPDrluSnkgmxXQKmQ48yrzO0YaY24Oo
+ax4TC8aaPGuUcqQb+kF9NDnTbMqJKW4MDx62+J3Oxc8nwEr4Xl7WG9W0FjU4vn7zjePrriXvBRZg
+kF30ShPGnrptpvYBlVocmOFN/sBcOToypdhCVi0AiuufJTQqvL3C4Ku8v1foLCwo3X9vWsa8cZuG
+WsFhW9eJ3tttykaNg93bk3Xm2pzON/IUXZ5VejEvgZ8TUOkiYw9gwKufvpdyCJs2UxVImPjzVX+S
+IzzoEBZyctWoX6jdFZFR9btlaPtratuYHa3rRM4AdBMLeF2hQysVgbOttiK6J1O8P+9y71VKBPf2
+TZZANIo6NMmxgP1hADehZDQpx8mWE2y4n3bU2z8t5i+9Y0yLoMZhWNY9Hy8DxcgVZkl011lK5fLL
+2AEgmy9z2wffF0ljlTPDMGvNPcRdBt997XOalZ8WBw+QT9AlYxfcmlaa47iiBp6nzqF09a57p6+t
+ljCwYkkib82HNGHPGRcvPHOb/rqWZvlyC/i1zF3kZK/3LeF6ajedh1/fmmeNRHgrBSVWWsroF8TR
+TMX8zvcfmgxatpAZmfM34xxrbWWK2WLHBxvHWDRhBJTz1NbaQMmoLC2nt+xMxPLQ+rqyN8Bw0af3
+1JMpG5imRvBAb01zBjaGxhmZqH2u/xQjfGxGJzo87O4r7/dXa+6lu54zanov3fp427lfSft4jJxG
+sRn1NZvt/UHC5ypju173j9Z5kgp97yYfK/R2xAK8KjS489Po+J9PcXeQMztQvXN7xftkquzG4gW+
+ZpZIbGcwh/nrtwMdHPV0UFHJLNYRP8/nRMGyMQ6eclOJLbEClouDh/FdorWHL0CU+GadXBZtB3mO
+14HvSYbVpDzrzJjNVMJOYQzLV37hhXorThu5SnCukNWVP8ySmBm0Kjy6J3IzqzcqZa7LTE9KEtW6
+d+spViolKCUfIG/pqdSEKnQjzT3nMmcRj9orfNzy5JXWt10rFqG4DRlyuChHS2yWpqABEtDUE6XJ
+OQnOL3jnQpH1mmJ6MuRQGsJRNNk58rhYPs/AYtzvoWvR8KkcAlhexahbl0NqEwjOqOMBCjSu/fk5
+OfRctzhpU9I5vQyd0FaoJU/E6LB64hzAMdGxejwE9u+DpMtLHFzeDAO8V79e4GIBmla9XaJojKpA
+tXl89pnZ0ydD2agsWUapuCwTlyLjDzMxKihIjhlD5WXh1tTozELoiyjBg4TL8Yj3sUzLrBFaCLTW
+sxF3tCZlmyYHQmKK0ntWrTOLk5ptp1EBIYu42Cbhptb0yw/pWE/ODec5BkvIL4/AQypv8fGvovkB
+m0j7wOgsvaGLgQSPHOjIefL5nPeb1QzRa5nbDRvjAyZIJTK3IfoKTtdQd+HIvTAk28iQNq2LE24m
+MsSzyrSKS0a+dI1ZQQzp6z2YegryYrps9u/wt19k97YUeHw6RJy4GWnH+ErH9p5IMOY+OAK3+Ovb
+kxNbD28btRdp0dRdL4Oz5rJUOXtPyYvxQ4QlvrJsFeYkCcn6nHutLtv3Q3F+BPkgrqJxg+rp0jwp
+h5MJnqykHgJOfZ0pOajKOuNb8HOmLcMy7RGXqMTQGeanL4tZvXzbRyR0ZXbuw9X5AnDloY/eCA4Z
+NRNt9xamSsZGm/OeDLgSJ9PJyjuzZ26QSmtNlmHuzc2LBrJKY9SOKzN3b6AXGM+mma1Ck4H3mumC
+IniwSctesoiR+XibE5lkmzY0+KAtnyAmr3a3yyt+V4M1Opl30DhthC6avCegiVHARdDl7WJPkBZz
+nxqBj+gpUQVX4PKr55skMfHSty+z9J2bvc1U/h9hZx0Uhdv1/SWXbpBeeinpFli6u1GkEUSUlpDu
+ku5aupVSBES6G5WUXBoRWRakNl6fd977vn/vzDNz/3+dmWu+c536zJlzVbcO5+Xv9c0hAWzRo9Z0
+HmLUJNNzJJRjD1xFxjuZuKdvcwPVczMfTt4O2VOiwBd22B4haKpNrsgEDp+9ZJSV79jB7JO3tAtZ
+9FLE9ceUwO3j3d2hsVm0oujpGbgLSQ4VfXARyD7sHzve7RCAxP/tWYh6GRpCd/9po+//1gzY/1Mz
+LDaaJBvbmci8awbe6U4v+LJoWLG3Whv89oI5cr5tn14vFe4qFCyOJuXMF5gO0g7ZmZmLI5hASTnv
+ssWndt3pMCe+8KicfFTEa2erT0Wov7XtMl2d63p+jaj6efGmgybnTYd7Cv0t1wCCPU22iyApu9jG
+QPqiXYj48/bZ4/6DpTH1th8zt6fWbh+6+zsAIGsFDpvZ9Tl2OdlPiG3THWCB83qpwq+C1Kg0zeT7
+2tGQMh8OBdkmZPJ1YrfUCeJTMaw4vu9hMyPKQUAEkZwpG+9E9vtL6NQvuw/HNy0fmXxYBYvXFJ7u
+hXya1357HqgMTBTs5bA4s2/qeEi/i4u1jZD/tbO14fTZ3gt4MeC6zRRCRc5KyUB4NueWSKfS4poY
+LtiXBqpbu/vx3oC5+/1X7CG5zp9cYoO3EfY/hFykudv38mwJ0kw29V5gTZEH3Ekj11OQ1ioDK6JZ
+wYrSxS2opdyBgxUItcXUn/OEMCHFNhavrU25h1+3M1UuHxceTABsXpbYtHYOvH5Z3OThzZwWu/pQ
++BFKRJR09oJNIpwutG3Ru+Nsn6arq+mlaxur2BvcMJItoNChyCtiRZ9wih93sWP7huXQL2ljV5Hr
+9xNCy79wZbknynex9Xy8d+K9vkXxeuZSyklNI9sxgP42m9bKz2fhn0zCa+8+Bk4POPCxeO6DlhG4
+eqV7P3YgYPszOQxAIDX4i5IVg4x1ytE1vwhsq+UWppzCFhXAtlEVsklQ6pJrDTv6gZhWXiCT9f3T
++x460um95qzqfx3CrpIrtT4ag2+EfSfm25cKLupSkjhZ0jDY4Qqwhbi9ynFyHkj3YcYAKJ4yaGV1
+HBZByuLJfx4EShSIYQCJFCcqx3VOt8jBfRGsjSl+1EbKhq6MMh17YHee+C/vLtIxn48fS0o+lnRS
+HVTTR1uAzmyARVoPJlFmc2tHrJBPayGB2rDV3/CrGwzgE970bbqij6hHZyhx3/JUZ4HDz9ndviVW
+2aG5M42s1yKoJuPMb76HaZ8oWXECg1sSsG7kzmZIdlGlRlakM9SdSgxkIO0BiCwQDNF+a+wDjyWN
+Jt/Udc9b4CLHm8vnIDSyACZ2KebNRWd6ZA24iuFXj6SPpmOvf9sM/ywPp3/hSA2m6FT8aW8lYVPo
+ujewftDv0vaORgvU98ce8MRZD84vEzW39bfsiQNJMY6OQ1xRWiq/CX7STwd4OADobagoonNSuDM+
+ga4VF+sEPz5CTZUKZkQfIrq50J9T2xPEjsbJ2GCngt+C0yXeOq1P7zGpnMKUvYt22WZ0LIfkZzkO
+dU9g4nB75Ud45RmEuYevi3WYV/hw3WXvDphC+75y3wnqwJ9XfuYsPq0hO3XlTnKYaRkt+9bXDk2D
+cfuG9c3AmVq6HeoMmFJsUGcmzMlMAaw8ZgEC79Cc0SJkLOkfNrIh7z3fvw9d3+PNskauv5N9M71r
+t0Y4vA9zg+XpyYCORUJN3lwY9jqx1Dt9ZrZOKJ6mpsJ2YuCx5soLqCLODNIMVfdDRTHdUqneGO2w
+hovRqzY/MDJccp0CWOB0kYtS+f/CqTVmZhn3LJZmbZ/2eJDveHf5qJx9fziey+uPlYxjGk+6N7iG
+g1uuO3I+LByIwJJVRHj+uILjR1HnPrvmQJuwU37LlFJUfb9HmhRTvLSmjUjYJpmt7At6+MpHudfV
+J3f+URdYLT6O6InUuc0BwlVezHLiYSlU9+1dnokyFjbwH7SBHfDfuAHOIAnpf6gGKd+/kcN/M/wH
+1SD9B9Ug/QfVIJOqengxR4H17H+f1SCtY0PJGKQmNX3/CO92pz71XLVXNJlNf9Od2PUSjAYNpE3M
+/Ytq4NRJdpPkTd/LOqoR4ghtDLG//jXx7o8WZ7T888bEbzDlpeDDxizroTyR6wwOQkJgw1Tgj84Q
+2ZbulxFK9BuUsHVZakCJ+kxsO4JOWxMf8tXGmYJ5lrNQhDLubOgBjLp6QDoZP8pvtaDkuVOJyKEg
+k7BrxOy2hkjR+6lSV+FCuyz5HjfmFrFnQSRE+ufEujksVi6FK3L91gscNzzEO94WlJedbbobyO5B
+sjMEPh3t0h0WMElIC1812f8lOAz0PkSMHPJVG7rshSPq5yErO5UieasqANz/ZOLNfi44VpQ+pbG9
+8mbSp8jAUpgOx/YXgsDT9MW290PrPizPlHv2VYem7vpD7pGNUhEu2eSUg5Gq29U+PYS7szBE6o2y
+/QHHviCEOgTSlHlZEj25wOcdiIvdJwQ604eBY7jCclZGysl8qOU+R9I/ntZogthJE6aVJM78qWFT
+nvkK03OMf+Tz0qlHq2QrcpxbFJyv33b/HSdKiG5ZVzdaK+HEdgJ9sVjT+64bKVc5aP+a52HCRayv
+dR8dvc7ueyXkj+u86xB1xNrfQOdKKREK7qBRsVd4jFSWeb9oY8Psh+YhIngQf1dBnjl1dGcnEke6
+xT77jFqk2QIBiy04o2/6kad3Kz+OL+xDBoZEXtryBEcvrviwQA2ZvZbuMiQ+9CQQ7vJeFD6XA3Jv
+Em1ZwqFhsugOlay8l1khoOnbZ+djPXlfW48/HeIbkqgW2Dh1DsGb98OPBkMSuhH2x4QvRyJ87mxE
+HhK6/8CK0FjTumGGiazeELcdQoI6Pr22Pos52HsWyGIIlXui5UDRH3LjoB966g/mZNQ9fej1za5v
++eLZ4bNN1KuoOaEdWNdQUCIsUMrTdCLjp8NE68uLIcXxSFhILTbhgL9IvCoW1uyw7+HLDutcUR+a
+s4WBtEvPknY2JDMLoqR1IuZkAVFiMMQuNUz4eo4zUbG6ZrRb87bGBtokTSw5/rvqdBMcJ5tKSTcA
+01ksegTHW3nZKStWf9UlP05nG5Ze+lp1N+Rlx4/DcDGeH4ITz8UbTljOf37uYmVe9d+2o2uqX48u
+ir0cbhC/7v3ZlXg15OP5dkmnw4aJ4I5O7fLJ66fyD6gm2nQmPp0rKga0L4X+3IbqXYfYQWJ6hQ4z
+J1YOPnmyd4+VVP74fGrn+LOR+Jed+7+/dKDAAErwos77LhJEfdTsE8y3LO11l+ZgtdVHixjAVGuJ
+bHxaL6/9IM7hVViCh1iAgtn6E1Ilw+TFTJ4A8Eic2mi0Q62psoxTfjjH4QBuxO2vAGkjaoLF/dBP
+HOGz/K8C0MflkEMpTl7xqUxD/89FX+P3KvjSRVRZOxtt48yqHA2j8vkHOF2pk/62KsMHjiGGsSts
+fnlKMsHrC2cDVtM3MnlZ3wITW/CxsGDlWTmEn3K2w0HXasgHco1oX/66R+W0l/6O6ZojrB7i2RoT
+DH+gn/Pn9l1uOc/ii9/JkD+rjreAf3LG8tNSBSXtEwNI5hiYFJ8rn6L4Io9Aa2VlJhMH4Tj+e/1d
+YKc7O0QIa3T6gkKKxJEW5ZDqFVdp4Z85Vl4dR1DNnAKokXpg5MKcuPDdsXObnv6kfWiHZRBxu+UP
+aSHBsT2IPq6yMLxJfRuaUu/k7rGydaOugjtIox/Yar6nbcOYY8hy44pa/WM9h39BMp1Tfxl9afwj
+KyztT5ckAWvjzwVa2Lv8phBObDNv1MoC18eVQ7ir5Lqsnd2PnFUF411kIHJ+C0qe8eIxlH/IgniB
+PLdRx+7upci78N3qo9aN+AP0q+Jmh1R+q2rijahYgtSjStdd9CGKjmNeNvFw9ENpq9wgRPPPQiYQ
+R2/uqEFzCXJ4aYQBJA88uyzUAu0QgQ68mr/OoDXOjO3e0CTqLdsxaQl/DX9tTj0Rr1izg0Qhm2/s
++AFpN0J0rIw3K1b2UUVMJsi3L7cD7hBOIDaFV/qnygizFJekC5Ch+B9TTnpKeOAlMngo+K75GK2D
+Z5Ipa270sqhBRaeM78Eg1xaV7qU6hzaLQlDWTeW+TVmTTPHvbmM9CBlXYFtrRwgNBvA1Tgw4qzI4
+KTaX6bFhpb1LB+EUENju103gHAAummpGkUSvr/M29Hejxpfbmhsu6RxSMYARv+c768k/UU9zos1K
+QUcYgOqNGZE82BL+WpHIadRug7lrJnTDDj8sGTmYeG3AIRjJnGi/cowyxWNO226AwLwftE5JaUV3
+w2maXqfSF8t6eC86vBHdZo+EvX2lYXKHcBnlAQ59uxwpjFFVBEK63pv0LKJlKRo8i27CF+OL2FjV
+fTZ/YgBYgXyLJPkfyNmGHeWnBu6brQwqND8uNcaFz07amb+KdHn8oeobmtWxDyFwhQHICMYs2VMT
+pR98+r4yJ2nBcaWbocPsAtntfRa3+X4OWYiEdQ5sqWEAL29ldzEAyMZ0sEz4NNJ2tfU3BtA3koxs
+EoImKXu4DqHcTyKOCC7fgaSF/S/biuNPviTeVlD2cvVIwNXum6vJYcnlxygTCYV8Mwf6vZ26qiy2
+wVvWIno8JQrFeKStIZ1X2xAGcP4mABGwTJP2OFxuImSBiD+94rapp/g6EO4pE5+xdd5qxDJ30223
+6/ZMLxLFy3Ng11F0V+i1iyTHAF7NZYeWodQD4NpkEWheDvbbDUi+WbpYp2HcK58nUfnea7rzNP1J
+GsNZrLr5nHf8HzzdbAXPHnEkFllBh42ndjAAbFqtJvpezYumkIpEUFxncpmo+CM6X1Qkfa4ZBmAJ
+ovZLhchDzu7ABxMx0EJ59tta8Yowujg1PdroSUnqT7toJRlzo1Etat9Z/egIDkLtQx1D4eSRQGlu
+IxkG8tKjeWwyygdvKZkUC0AHxSk7XxQ9dPtr3bX7rvgT1qpCZ0x63fQJhDKfyY9pdhCx79Mhm8nr
+b8sibfDC1N9Rj9Ip5SXTEf7ZNO7S8cUAFCFHIwefXqgkedWSSaKMv1iUXmnT+4fScowV5Yrdmm9E
+q7/6trHaKXH7+RN4doH6eurKiU1mJe0namEVHIpAofmz5OJckRhAABjJ9llP85J40cYlEAPQubd6
+uMIC6gqq98MAJrMpVbLnLcx+HvXAPRFP2LThcLhSCbMCVCK+2IsKIXUB3XbamnnFd+gXL2yoAA0o
+UYA2hLuzv/mRUuNRpnN0lpNvNK4qf+/oFja3V25aWTPaWzfxCoUuqlXpc3lNpTmuhT4poQSXoSeT
+xlF15vTsKnEVNvJGbfAQgqP4wFeRXyt0qQQn/XWHuDLEnCPcNCE051RWt+8TYVHxMgu163kXrUo8
+aqvcLWwNHD6/oi5sA+3DjlsJ7xSVreBO+yI+P9PoWONYH50LfUNuvbSyt/p8WxiEq0me1vAieYFo
+h0zfbkD1Cbu7rZr1ZvjuInWb4Tks8RvyixHqXJC1XQAu8i55aqywCPoYfUJ+qzkjE9hvazXx4q38
+ktUj260A3qyRsoswQZcU/CgejQ4yyWL2QPud7yHm6X/63vIHOxFLvlCdkXML65MkC3uYMvtdI58B
+23StqZwBxqfv0QYC2t5UsbRp3mwRKp442cTh7ft8eYck9He2Kmio8De31g0xla1m9A2SxQDaJapj
+rH8sEnH3haZTI2mCP7/Plmu1kmtGnhTlKgsAP3/7rQpJyjDTXoDIWz+f+bkhivZo1foSkHcEv8cA
+zAflFSHeA9eWQcvQ8N83tfSXg+HQkfH/+fMkL23vFefWeYcLBsBr7eS6MX5EVbmoB7uAZW34O6YV
+CdOuBf/tYl+kXbyeEhVvn8n2hBZGPo3ZPQ02lB4TUQGTt+4hjlv4ItQo9oKDer7YvoS7KKzcmIDI
+NW9hechNT/bdWxqp4Qq52tf9YwI6rD5TRRtf/eZ+3NfwPiQ5qmZWDQtyCFSqIcCBNlB4YnGSswmA
+/VRsCJT34Nlkae+W/HxCV4wH+Ng4iNtnigpP0N7Ak8jlbeJLfYZ38lSK2eYJh1iocmMSAvKvcaMF
+kLQ7AXrZA02EPo54+KKlcEY6JNZLlMo64tpKSGHhLciSALxYFKHosT9KHlYvOmD/V1Qz7t8I+7Nn
+fTMRA+yKt7uLeqt1dsh2BLA4mis41bYu34D+Kfs97xk7Txm+ygCv7kHVZGQiI0WgbEs0HqU47LEn
+ArvQIMjz8CcquXKFHGJjRU+iZG98d+NBH68DSeLOywrnEpL+3heVboSgZAUcTpvzh+Lm5UzetsWE
+sIDMqw7OC17vr13MNqlKNJvVogys26yvn8ga3n4WcG6mVkExFtMUaLKETQmsUsVmn8pLFpNxphGb
+2SRFjhD4Owrh5bytFKPnwqa9P3kXJXGN0/HoXU3SU6aUz6sPzFet1yBSz3LV2seQzsJU5PC/D2C+
+OG35Du15+NfPH/Wh9DEA1qN6wO6dlOnfgO/xe+tSsInPvsuWqFw8CuDKmPGompCagCBv2t+k73zx
+basUR41xquyX/qUinYuLFguC7HdMFCMm52krYbAjAtmkeHJDxLPsUVMYLCmD1COpK1y1Ek6nF/T4
+7QQGsPymItEQVJEoYEMz7pHZl5PISMumsY1Pkn6Q/lAG1ZWdHWavJ6NButrBrifKHBLeTxHhR/mg
+j47TlMj5XCXcL20wYMfjmSXFPBuwjChYtniBTUk5BGsvKvgHweAtrC88WWKyEkC5ZqVDzfyeC0JD
+3S72hBgvFWiLWlx+fq0ezpGNQM6P8pNcVE9P5rfwAwuLu368UXkk8EM8v5b/yyeOYXd1ssLIxAuh
+m3Z1bIsoMtvU3pB5bA4cwDZZ9Mg9tHB2npBFuKKaUbksaUQVlS0W61qYxRAEN3FJU2f53gOcSbSj
+2UV3YgCxjLXJUiFLkL193ofiuRrulbwwssTsyPs25n3CO9na2V6PLRFH0Tey/Tm1HKCeug2aKCGN
+Fp96iMS8a5JqEkkUS9O0kWBmv1Em5IupCq9L37lggOaMeFcfUdd6OcIbHWpKFhjGiQqXGL7EAKCH
+0lWn62NjN+VGhOb9sZuz9LYNDkwoOQr6394x9214NENIxtllm6Fuz4aLoBqF747D3k4yNQ0WuRnP
+Luwa3sKGrepdZhuoIElx2MwDlSATlPV6hGr/MPKtcbx5lWhCmBX6SIZE0PZnfCUG0DEghSXE0kxr
+MKKchRNxcDMxb7gbrsN/se98bxJOu52+tWb+QvFV5pa4RnwSDQTv9eudxOdsbf2+8RaC+fkiHlD0
+R/rQVxmL1EOFGoGsPfoeke0VKZHzcqoNezcuGpE0zeEZ7x09rR7kmlvBNQvcJSBYIbYhlul3mfVB
+dpIwzSkrLmX681VKmjdhdPi0htWgbmClqAtaB1urtjCIxBdNqo1/M3Zf2ZIXdFLLwAq9empYDvnc
+Vsb8+/6rdsEX6EGWGmsG+nfjz/j77mQE/wfBVP7wwb4kMbPw+KTeMkU19HsxD23Z99Zkt10gPwWr
+ikQMIAJucpMXmwIrXk7eQkItHGdXfyieXj/Lazw0aNqhI6D1++esBikWO+C/cQOcQVKy/1ANsn/P
+apD+N8N/UA2yf1ANsn9QDfL+iJQuJiysl/871SDzn2jg8xJ6ZMHxpYWp+9VQGndqv9k5tcTkdlZr
+fyeof7FG//+dlIQvmjzIm4kr1D82xSNuv9Ij7MLBc5ww3Ks7D+6b9A3XMyOnjRxAhs9OPHYGMxLQ
+ZjRfefWokfC/vwpN4297x7qFNYWWZ4UcdK9/w6z+UyGyvwr9t0vgDJKR/0ch8n8rRPbfDP+hEPk/
+FCL/h0IUEZZeOHMUWH7/++YR8jq2wG/bzBfBMlP2ABpj3JcbMjcdPbdpRpwYpadNJr5xpZP/4j7I
+wbkf2aQzFBGDBl4NYGYan62vT8uGowdc8QaBEZP137CweN042RE2iSRqqS7kLIsHPnpAmHRcaeoM
+MQ55Fzf8mZx3x5jh53FrvRiZ2dsmfVk16EFcL5VSxu7Jh4o51p77d1mkTyi12FMf1TkvFyk2chvd
+pvIZcDIPGfNRE00zDzzZOT4aT1uUTx8fZrXaL/1d2r+9HJQtU0uj2mQQgloc8yoOO/51ykxdcjHP
+6vFoOBlvINSsn6Tr0TwBR2ChmD8VC+XsrCKvYSWgp5X6sYowKbxLNX+9RCdtEcdlncp8pdRYGFYq
+h0fuqNhQrLVEESHK+HbYkIh8y6yJOOeA4H72phZxxS/5DWpK8Wweq9GZ/ysJNYiwrW5HhKXPVTO/
+nE6Z/7TX9rSFoXYOMjK9nQwUjaf+PWIeuKBoowt3TSCrjD9/N2kYhz6u1jvMDtOhQVaim6lJKFfU
+KdygYQJqt2ZQnX28aUreX5wPMo2zCsbozCpt2EvNb6of4ve+qfwqJlqJHu3UixX4II91fzKhTS/m
+V6pN4k3W8LVA+KiBtf1CykXJTW90ZVJFFHnbpDXeppVTIA+CjDyJb1BSFlUVeS2BbQ/JcBYKp4ge
+hdry6yjcHcGvnFc4I8rxiewdA9uJYypFv61aBfInZgnBaxFm2JzWWMLbYplJGVhmYK85ax0DlfN9
+WGY+Inh9lAzW9n1cJa2Zzkj5jQpvKJ43jvL9Wo9Z8rCWw3dT0YiXRdD0mHVL4cbpO0Xv8V61VfEG
+2/EGfOZSMGrBKzsmQ/yH3omZnQwjNr97a/qc6myFraXAD3vBkBnzzPToLCBFwZ83DYzHsonWqyTw
+5e53spSc5B1RlAbvods31UXA6FZ+eWYQeeFHujcOfV/70U3ykaP0ZLrVmVgNJr1sNSRx2ggjNiPx
+L+PT56rbJ3OmfzKDnOatB/AA/uCKD4wFD4BVjU+/2A2DZg4GleX3GEtzqg+fczUPm0Zyp2PTZp9H
+pabw00Zp9cRq44JoZlWex7FglWIAElqqBe/UMp+0kVOmYADd4d1cm1nHB5HxnN/ubXxWSTypRVa7
+YenqtgeZt5/xP9m44xuThBjBLh6mk4i9MD64re4dBPYbFvGTsnY8L2uxeKYTtF08wKXJIwWycP7q
+PLJqafzdmi6ijz2PAXx+nh4vT0tUpEL1eoAQaGkmx6pYz/JoxbhxG9JtQuD2eTL0/grh8OTgMJOc
+v7Z5JIarhzVb/vNhYct8uaGFxGCN2OsuxmKq3lLI67DG3GbeOYlx3TSqnmtlvkSv50aB4Blbx1jO
+r6rHjd6L6kHOd+q2nIknVhgAN+Nj9yjVqlUuj4nN7yaiojtjOxWZahzghnbeqKwapLzNnVIxb4Tk
+T/ufi7iaDe7ez4Hx0RIJFgxxCvpBTKe8UMkdO2x6l/SdNh7Rni1xY7IHudZQdeGmJnmkoRQuAavg
+WNRdF0q/hHuhpLZ3zmG/HpdDD37TGPFeXfPKBgPoyZaUOS0snx3O7vUGria0DwEjCvyeDb3xtkKR
+DtKoxzQmBehmz2kX9JIYNlaR+1oC5hSfc5XJejYEkLW/SdydWJbME2sgnPJFSAR5MZLo6ddYE3fE
+ETSlT4Ik2fV07BXA0u5sfuRh9Cf1UUoF04/tJd5atLFNZGyastXkPd/prBMMzDu3YdjKtXyYZzny
+VhuQ14Ed3FgyU+FcHtHp3Gl91ygTwYAL3oMky8XE7guyiivqEh5SBKmi63hdH7wFgmOGfWYocgnk
+yOksnmuwXSFOPnzf1SYsllQf2gANercQot6R8A7Z01sVUn+qKFLC5nICzxUOgIdS5G+IJa0lxRCE
+H36VbUfUvdFUaPqwr0VfOePzSCxnQXVM9JnKy37Fvs9/6Cd+yrSfOKdXpZS8PUyqSqKqrZbZPJs2
+WIkBeClTBNaKyNFMgvKTlumPpAtUwqRKH2IHZfIEOYmPSXAqir/w4CT+zM4lFm5ewJ9C/1VfI91Q
+XJ/VCracH99Tk4GryC1iaSduZp63oVqGtGYUnlL/MbiVkfQQ91NmGf9jwgL4hcRXmdGnPpE0nxKb
+fEzzvDgQLTy8SbofsIsImdUYmAPOzaIfbB23DLGo3AcVnn6gPrhytuJWZ668r7c50XVIvOkerJCL
+vTISx9t7Zn3WwoXLEWKuS4jjOsO4F7rgqXbfYRCBFqxS4APjchEymz7D5i2/UN2SUhpYNxGcUT7g
+G0zNiJw68B2ZyGv91eCQqL6UTWJW/FxnbIOgn5W2uZZiw3UvyVZkn57zkIgjBI40dlauSp84+lZw
+ElXyaphZqcY/UnyEfYDJyK2yWr2Wu0olQCxhDtxG66aXz0OJXH0g2rtYVZrIye562td41nDrsRZ9
+9VoPT042rbM9WVFGT4tCWFGuLBaXT8kZweNFuqTIG0eGAXjrpUQnhNy31AnJPkigVxspVRu4Mkiy
+MVXDevvWK+760ngiYpiXPfcp/3jMHx7OuSTiDXO8I7hYqI1fWuGsvoyo6BtxRXclO06TNBPqogjA
+uRGVVtMbovHEjAvx3wZ8mUorhiaI2zasyAyC3OAJ7KFz06rxQ3wi1kXcGq13RXFEcUqu6Wt7TTCJ
+Simnk9NemR8H6jG1UaGtuSYyNDY0enIsEFpHVDoVKKArgoq7FZ/B6RLkL1+jJIp4kmiXwE8PpJrr
+Np7hk56q0LqAeltGQcUcPWt/3K8MRsm6fZYHfoLr2Jl7TijvJEkkRTadXyBz9WhZxNIIP4hLErAj
+RHdnUrO61tMhkt750Rzdde10DBPRn/j3HP964t5kBw1NlIJR+6DvKfDGLeGe8jmLDzQ4vCEyM48s
+Ie3sI90DHnHU4pMDbE2UhbMBpSLt5gtLZrre6RYwPZQV/VhEbMB6ENuRKtU9+s9ZN4kOo+rhwWxU
+YOeAoxarOorOzZQEGh2IxTO4bBp5UdrRpqOmQx9U0Ih/RXL6J0xQtGGYETLb/Cprpx2fiWq79Ft3
+8v7aMBYhH5ZQbWFHwGS4HoRO6Y6tdx3Y6Z7x2IHFLvJt4o4gJMH4Iml4R3ENdsfiK7+pYjZ+sPeN
+pznkw5Egc65VIzt0Mr+ght3GL4Q4WhPf3nK70Hk9uewp1+7sJ2MDxTGqr83Xh7NXy7j6JjFtpKHv
+PDplpc+D3Bmcp/pceqLwXQ52shUe+lVzWhJuEG8x1/fj7B1MfOV7/lbyvppBLVF+Q1lHLcHwbFJO
+Tb7Osi6/KabfbKD1Qz9x4g6fWx2kkeDFj6/sjxSfXUUhv8dAqrkAva9qHSY5RAgtwfLGisYjBxN4
+DHhvuXhwsRToXhokao+wap5Nlu9JNHs4vFZlow8wfCCmoxeswxaE/fF2Y0ELJ8r5WorkmXAsgPlR
+nGS1HRdJgg9VwqprYOEka//OeZSqai0fPSiizChehrh53CpaRnyihrlauychVUsR/0vp8WRGEYAf
+aVU6wTTcN/hBWxxYTnFLfXOtSt+JxdV5+ZCK/siBPbwWNk1rorVzi3BkeyMDHSHE2WczFN1QSFI3
+pcxySBuRbU58L6RJPYpcIGdQwgDO6F5hd7DKBGsrPhliyklbWohpLFNOE3qR4zBjIETdSBWVbqW9
+3HQyQzled6vMsIN4ymj63vnaMHWCcSCDstCM5zRJTX64EyIlRrwKvt2IFgHmgU8KgYza3/vLz8Qy
+DpmjuAbFZNcvTMQCh4H2GVNOvt2z+izcBdCQT4/KZmkisQH9J52RgtRjGICgeJlhqtkALut0NStz
+xeXVDJfvD+r8Qb9M7O24ISyF/bFDGX/KvLdV8blV84mGI1hR4+L51yY6fZCT3Xj+wYC9hFDpwWIt
+zo/owAZBeI1eWE60yraq/f3wbd2SjvnfgMGIYpiSxSldTMcfwHKcSEGSX+uRoyhdoneQ/BL0lOVv
+CO0K3IsSmv4IAb149aUZ3KvosFayM05zbt80EJGnah7OcAW5Am7lyVO76wTXBCNXfpnIyzZ8pKHA
+q+MvZYXSscbmSPA+eh20ZvKyRAwKvK/ipimJjbkm3wDg68gaSpQZ6YSwaP7ZODkmLVZ6dFMnJ/6J
+sOwSUlg5MJd9HpLzh0RB019UHiShVoP/+CWpkVU2Jy55hUwjgVaDKV9A1DUWkJxjmIZ6RTIsqD5r
+2ewPXCdiPOy9K1eX8ljsyvEUUzqloGCN0tiVlksJWKsirCkiqIF7PvtKSWF43gGe4tpLBCFxBWud
+t4PaIlXURgIp+l5JVWA9w9qacLmJ+kRLMQYLof6RAyAiT5gIC08myWAZnhsJdE3IQwRFvjLO6qCq
+N3KVgyxSYlEN1Qj03VizRj/uUqC/4X5J/jxOoidNw5SKzbdf4SWd+3Ne6NfPsOTKhZvKD9vM8p80
+LGLF/OE66hqBTdjD1Dapl2nczN+YPTw4KcdVJLu8tikWkQ9i/5DKo+0aqkY7NTEA3aUya2dIkoEK
+QQqU9/v+DI/hipDoNy5BLJKwt8NiQ9REDU0r550i0QAEn6ms4/1uO82YYXZIgLNcnC6rA0L+p5EQ
+pwCqKdSo+DvVaR54S3jh+6kPjizYyIXJ+L07i1W7sVgF7mc0HbBymxHyMuiAeVQT4m3ZocLa4JpS
+Dc5l74lcmissdwwqYhdMRFl9dcbKmLr8rvLF1URBWhUOK4iGfuAHDS9TONeTF7zMFt95orYqiwHE
+DeiIAdJFsEiCHnQXOwGzNLwCbB/HhDZZmSTMDWw2aqQPZ6nVZQ8d1AU5CZkx5rtu+OD2gyJINwXa
+hCeUxZDRYTphTUWMavZKk4eq2Y0DG/yQbrWWfGRAlHe4RIwyJ8I0EU77wViozfJK60S+NHwCZZ7B
+DX46X1mxGLeTiJ3o+HhkA46rgAUEEM091JaJ2KAUva9je+ur9N1QcoKS8eO4o60Hi3lJgM4Ql9+U
+FtHCpi7Mo1hP7aW0SQLxJNsJGZs6U7VBDMEgp6QRDFzl6LO/wVfWWWUEl4ObUA9l1Q+d1d7AQIvy
+zcYK+gP4aXJioSveOTGBarq/ozu6wSFfFtZvG9yblRmdCcvLwOcQ/4cZ9uylQXRVLgNy1N/fmMLN
+vgPfax3el8HT24DzMsGgNwYagNYSxYNwWqnklmfOXhVihrQvf8eZ8v9hYUHopfM4zGiVfE34SIBT
+vlSmzKsVVHeV78ZJMCmdXWepaa2iwn5911ZGpiRYNGX6cVpGbxi7OEMh5V2XG23JSLE6CLj5S2E9
+xU1Bh/q+WR4DaNu2CE0Qu53n9JnQjqLmawtdrch5nR7+a5K75DOO4nEwq6phMl8MufK8n7VYbubz
+tTCBadeDtvyYPRVWezJYgphurwD+dmsENb3kH6oqlitpDsgw56fIXyozrBsOO2qhwIO5D2SBLO87
+xKvFS9VL758q5lYwOKlTBxpmpZlz2uNuhyAl7lvHXiz4LSTKBPJzZbkFsvKNs/ejFXmpnh9ThbnD
+NDIKfUKMin/JKN/05EuGcedd7Evae6EfaJ9NV5r+fN1WljrL0NSi4T9v1/2tLrqgms7gn5tHyLHY
+Af+NG+AMklP8h2pQ/HvzCPl/M/wH1aD4B9Wg+AfVoHR4AX4GIcV+8/9TDdx/Ha1jKw/L7h0HTL4W
+s//ODard2TL4JC4+mvEcl11Bixq3fGP1X1TDvpxdBBwfVJtzURhz9NWIfYiGCt8o7+Ba/9y6d5ly
+zj6kvps7izvGYpovoYBzyuF1jNbx0TUGANhFQ/pufDEAqbcn4dBbA4LDGwxAnrZbTALULcPFDLKt
+h3LRDZe0kff2RQ0IZ0ayE3y1/0yhF8IagObFclO0uY+pCLIdRJqdf09m+T7FAIGPXzbLnKPzPg/W
+1r2XPkm3JfXelDXM09zgjCQSxdW6sD0C7/iCfcLiijGAKSo3wNMguHnkKJhmVgctWCOMFc4HaciX
+U9PcwQDw5jwiyX9G9UVHQhKs9gh/gC9++eCHzmNrK09wjeZhB8VWRhSIlpgRYu29gOEsPoK68+XI
+H5gfgGNRy1V1F2juA6cdtOOexLDqleTMh7J3KDNfNtwMxaWIeTnOfLgFK7PTzRrP83a3+3dleQJD
+Z/uchmtvFT1yqdNYf/J7tiNXDqXEvEBTXk4i4JNhP+Bp8xnxpXTfMPQ3BiBUkGQ3aCdgbzhlNKqF
+0lrQEoUWCuouhyZqe70mPG2IE0n86Z5CZu8GHl47eRSebZfYN44B+FdCHjLdCr4k1CH4PPIVKbz2
+ifSgnlu/fsKWH+229XkZKVtdgAHEWury7iQVVDzrLa7ghVKtvHbuER4xZChpCWrOjWbGAAKyqZ8V
+3FRcdJuUYPdyjVPsQx9k3zI5JLyCLTjBIjCAeHnb31tI2ZZaBvnwObRx5Dbo8ObpTuaRNAagkgcj
+h2Wq42yks8tuLpG4lTGezj1dzmAn6QkcvFBhDTFnqLsrfr9JqCB9Wwim8YusuwEdjmBz2e0fzM1V
++tHBjrfGrfyMt1XPPxEq8oTBeNdl2LJwZp5hEfCE0Uq5XBnJhgmGmn5MxI0EF5ZRikSkAXBzXC8W
+JijodfsJ9laSJGUQt1pJvHkmvpkcKbkIlBJKSjKjzTLCCnzSr8gK6Y/qfEDFIYU1QA13eXJbZyOe
+jSMf1HpuddfYD/JjPXNDnZJHvNDay5EdGFelHVh7HRQmtKRKiJwxoUwJe0r5BGjar/qrjIv0+YGA
+24xhT5P+VyYJbHZGLFN/rWDgyRfPC/fGGC1a9TU/ly+Jt4KQIVQekZMdZEZTV+0s3oTaLbQSQn6t
+OQ68coZvQ43tqQ7vsnGF0lYtFBdsAlfDpVxS55FPczWPLI5ZiCr/FIXqgQ8xAPc4hEBtFC1l+qBi
+tk4QGs+l1VyBv+233QwtW8HTPP8pohkB4HgOv0HGXKx1ayYGIKnpc97+5JGre/XB9MKTRxapGMA3
+BO8F+4gPH72STJgbzGnCWtwJzEvVsqsAYQ2sT91YsTE222+c+ZsdV8OH1irRXRiALJ/ts74Ks3R2
+kR4957v89k3eOjYOQIYMrmBxXuw3pLvROctyzI72jyp1E5HgXve7XqNFs7QZmcXbmnw/BfCiCwag
+c2EUBjyInrjtKQ+ETurD53QMp60o0eLfLG8SQKvBA+eQ2rtmPgQuWvctV9n8o5WC3ZHIWFwKc+2X
+sey48u/Us/es8AD7QsGRJ4Hh/uy4IYuxPoJRb1nOi24EHcTRhMnsjDDNIwzgNS2XI6WU7DPkTvWN
+dQap4Xy8RCulGvQDNLPrTYKFzmeik8+jKpDX5j2bhiBkN9CF9TelXVp5/TH5UQ0Ltgut5w92SmsZ
+L135FRbGzZ55ZZGaBQWrKBm7mZRNd6jQTYB2vUzpk8BrFf1znfyPRNS+ZscI8zK/p1JchQbqrSyE
+rKFG5awkqiSa4KIuoBoJrQszq32L3WtXa7Nx30D1F65joYp1HSk9aQ3vaXSiizW7q9hjIFkSkaQx
+fsiF544Ek+FQCy8qkhtj0Zw8YlbWJqtz0ExsA50zWZOToK4zrbtDi3cFmmh5yFmL/ibizes/Sbbc
+/qcNu4do5ib1OVr31fZWl4YZWzdhgdnGpNre58wWXA8WI3GO6hC2y78sGmcsC0dz75c3pzGAub4b
+76CrsOCbmjYG2AgdbzC/DwaA5ZAOLbojYtInFUoyNDNNnsvJo41PUnmJM5KXcW0zIrvx2DsUeJI4
+UdaNrg+mnWSOvEudknAvaP+dFhdULqNhMzazrCQLoeEfWGte3Luexrk2OvNxvZmcttIDWt+ivtd7
+i12WnvJij2YNoH9mNf0KNy82oAhVB8FubF0o0R9q56LPaneeDa+yBq4MCZmZKKTn8PjO5GjZV/mp
+6wQLEwSTS+hScueHWFBMboJqPCEK/CoRnCx9x0rtXwdUtOmPx7gnbjoptKfkcmhiOLSPKkk205L8
+AR6oDt5ZDGCGnuyhnsPHyJDxhzI18C8DsM+vZp4/YQjfMNX7fU/7E43dEyLuSP3jdlUmlSIirfo3
+zgHaWbORtq5x00znvP2HEDYD6gHpSR55ktFW3hkGEIMBFHdFJVKqLTjBXWfdvsvYxckvGXkswY7b
+RfYgjaPQdFzCAxdBAboxpxCkuaKaz6KWUoLes2SPYVmk5cQCdybLFQYgTFtUnAMTcN04E5R9Xxey
+sioPPqlTHtn2YJIuGnxh6/4FAxhsPPibj4gQoTkYQJZhoSmbJ3zRyhD3rMJHfP0b7Fc3+m3Yc06b
+klV6yAsChBl+bl4k13Mt3+keapabcnxGK7aB0t/XpWi8HDQX/CW0Ez1lJhWnizCy7kJ8uniQW54U
+q0D+N4XFa477z9DT7oFWnfN7iXj3r11WMIBohJHwa2Hcb+S7KCkKLNYvZmfD2drnyOWd2U8y4xAd
+OkcZgxnHKmXRrC/wXC2xtHteynMcl4a/htud+dG+57Z6Xw0nrAp+jyh3ERow4ffM9ror1sItNonv
+y0b3Y4rjbhgH9RMpRI5gFapUSu0kIezoMwSyD+9NfZjryeTdKr9Xi/cluqFYHmLu8gvZeIkBfK17
+kna3FOLriMepTpexFZNx1jFLIcGpwnV4khVGGBUQkdeJPW0sXRP+Ltzyglcz60rU6g66uDdnuO89
+r+UycPITCbyrMnbgVWWJSoDfYgAnsUJ/1NG8B7IIuC+5vn8oJKlMETph4koI0Q5EIXwz6c0c6YMn
+ZRzZB7cNyRCaEbGT+gpWffsuueb5EmszPPRqwboKVy7V9I/yuMcd3qjOZKXBcMftZCGFDhiAI29p
+beGCSwgGMFLfXrzaaGY2a+zyzMQhMUKg62b5oozmzvDC1jqEfJu2AIEAIVxODc1tMIDFdIbCRoNX
+GMAC7yEy0LqRnNbdfWR+nuz8y8C3janHo9GDHjzy1tQo0H15FPt43Slh3oiB8dmG9p2ZK3GEkQa4
+xnCENCGs0FEHyH4MG5I+/32Ds4QBWPLblGEAbzcxgCJ4yzq2+MPjOi3tHcG5a/uHepGJWc27jbg6
+bVqcDRrCopdpSRp8spkMXXdrEjiT/DoKtjpXwOiKFhuC31XM1rHqyWDuqxWmjRjRIZV+3YGzTwIU
+E0vGJ6fnARbY375jkc8EXVDB0ijijtBnSqWdvN/1igxepMWjFqY+Gz56clnrBe6nDtEVvT6p973Y
+xtceKO55h20uRczYFPrxRP7ocEP4+0rAw4X0AHunZJJhJUemwwSZNu0la8MN1WlmOdrPvFlhibTL
+OcDwF+oFe+z0ojh9sK68DFPwjbpNUdpF4+RNi8+aFd7DtBqv28Bm82I8LRLPh7wH7whqyhx8DtEc
+4ZYW2AhHSIDDHrrhsBku0bSkVOi13kalTV7qAXgmwKE5kw4uT3IIGWpvdc6QSHuEWofknewuuUNc
+VlMy7oUrHvzBALoDEj/S8xh+LsQALKgdcH5kDcx6EAid4ly5bPKxi2RnvU+pRrkRlT5/2GwSIjVx
+0DTFT9je4Zp8/eCsTG+jq4mtpTyL3EklooMHpWZDQeZGY7xxMgcasvaw2+PPrJZgIjRfdfG2OLnF
+r4xg/cFUfVnVYphkoHxtDPYLzWGOP+vJxZP+iVpQSqEwTnw00syd91D7rIkzm4QqkZ23lDmaPhV9
+vOqPQzY28nRHYVpb1FitjNsQSLyP/Xtx58Ew5MJsL2TUn2veL6E2iQCoZonjTJEMjKgcoMFS6fNc
+zFUXqdt91Csd/yt8MGiZ3vDuDyDpjTtohSM645fxX2eBK+Hc4bwbxnumAlqE0r1O5nJwTSkfmlHG
+tuYnYZfEd008TIMNHaF1MIABX68Q6I4jBLT6VE5vSHixCMoOKWJ2np5B/jRytt8LHnoFiCPxwAoA
+7ddQgm++iwdE+Fuu/HT8qJXH6mERDg+HcSoO8EOjPiwoNPHR4WPz6rlppr9e5efhSsWfbtok9Del
+DqNlyzGKqSFWeBF3jWb8HByeY40BKAhEgs4Ef0AkhbhTUudm9qajCN4kHDSWtw2cvUtsNoTMeJ5B
+MqExkIAzINw2NFyS61qLivldaPA0iKs4BzpCXyiK2LAJJvvCbaOhrBefy3S8meCDNHozgKIdREga
+zi82Hwyymt9DdhqLl+Q/Js6wqTcyok9Rq/f1lvZGbQzpOLgzjx/l2t2UrPcEDiwVO/ePz0nEyJ+J
+QPNdixgvKYYrrU6q2aqsjDKPgmg8wE27LlaKJjXeTfDFfB6nZW76EwoKDKDAnvlLRS34f5Yv7gp/
+bDhHmqJPhvtvPELvKNBNPbVreXJh5kdIWjKsc5OeBnYMgNMMWYwCbXA25Z1gZy0VoFXGXa1QfYtG
++igx6XgMgBJo+lXkraXnV3bi6btCOxrAvkp4lu68b/4hY/tacDVamGbU9oi6sL89CPAM+qsBqhs1
+ZSYnGvLzDrWFYPXHhaYtZcP0n7sQRUoXd4pQhtmVChSUm2i64SzpErjz8tiK/o0pHiy5imZotxE5
+EQqlW27fcaMOHqv6hsAlWfwjDrl89IvZHfEKx9PiCRcBBof445rHTToDhGgi7r+dXx+sZ5E/PIZO
+LYvwvvXPc8CmYL6yMJH3nlZLYFtHQ/w0b2jiwhrXJ5J+0rKVfXFRCcJD4K86I8hSmZU4L09gT02d
+kXTV291CpYzp5Pu6MbPiea9rdB369Z6JRv+NCh7SSoZND/5XUnM/3m4gk6PDAoVefoUXqcgsg9M0
+zYR2mLB4vuaxxQfO0eTzlSjKxKB9ZQd2dIOIIfi8jNIoYrPiPc9lsYU+bRA1iv9hH2SyDoJ3/x7U
+bgY5/0GjtWnQUm19FAuWZ8KrpopMl+ROjH80ZW2tI5j0NJ6n5+1jTzxe08q/yT0Hbe7xQfsX0nrL
+sUua5asQV0ppY0XANh7fuptGOK2QMi+UcVG1hHfOz6D5fesiiC8w9qrs/MuCAD4xbHoHbYQ+y4dm
+2cjGT5OBg1xH0nmtxtUyGmRgY+XW+Vuq991DNzh33Xv3S9xPi38XrpJDCivC0HnZSRW8OvLKWt+q
+JC4alleD+qTcD0dkQj62IIKEJwfnzH1qQEUfv7qT/q2GMIDGpEmy8Ch+CJfmLgYgQOGQ0ushTh+W
+JEaTzx3ZabZsP5TyBVGt+YzQgCg/bEpCaThgvLe/LL39Dfm5Ued7dXBSML1aBlcazd3CtAjE/NVC
+/+tJLGzgP0ADO+C/4QOcQQrK/8ANSr5/k4f/ZvgPuEH5D7hB+Q+4QUWonPd6jgIr6v+HG0T/OlrH
+Vv/NKMHtJfZKbP7AFcFgIL7U4IagHQsT0jxHEfzicVH1v+DGBMVGXo4DJ+j4Gx/H05Jkqp7hvsGh
+ne+XJjiFzrroJqUjL5u+d+VOOflk4c3SaomxEIQlDmuAbrJblk1KEyk++FWZIDY9tvg3lCUXcjA+
+BW42GlylNTcPd0na2S5nsAGb1mlxkCcPSRq2/Jbk78ALm4qUC5+R3zrnJxzd6jgAPul3XmD3qKLj
+TF/32GFj0HpwFZSYTQI9GSIhpI4sLXM9QURPoN5vh2/XKRtGINS8cwUCyp8wBqN/7KC9XZ86LerF
+cZSvg5PUWb/9RjdzzlqSrDs2sXHYH+DfEAPkWxWbTG2AokQRxRylQcJ7f2vMfthRiJlu46eNlC/Q
+bjMG42hs4ZFo68jxm3qLj22LlAc45w3me9r5v6nNefOwdg6E5rJMNPisBVis0/J1KgjxDp8JLi05
+BJBKaTbTbEgjQTNvLrOf+Fl6Gw0NEGAAfmM5qOzvphTPwY+UuWpAliqBkCF76Sjk4ZU9FxIDQJiK
+ohX5ARS7odLOz6baZgqnLyF0ZlZ7wbdtlNRD/L8gJGh+vswwj/CDNhbNRQEpzcKmhNct3AvnDxYj
+1HjIBqg+r/3yAbSgI3cOOFwWz6ZbtnEcM9TJ80Mt882msmdw6MzwVBg1kqmrpaMzhcehGyulDcl6
+zKu15FOW3Y0bphuai/7WAUFJ7HMNFQ8c9ONqp9q/Qxdx+XMueDcNmO2y6xWq7iyCw2crXkZAdnzp
+6OXiByDcc6XAP53fpj5hAC2lxNRW2Af0VzSC0azghfDJmy26Xvww93GHv8HA/O1eCPmVxsMrMNxi
+26WY4a7tsk+4GANIMnqphNt4pVzNxaFH3KnO+4HLKuH5SU0123QCR8KJ0IRdzYea6W8Sgd15p+1m
+tnZTLrFbDxNd0w49PBWgC+psAeiT0TqKLMnRCG2jxMe343fuOtPMc/u6E3QoPkdtyTh+Cffz1XOr
+kECbX4FHsKu+STRTyWkD44uychWSke8m2Bfh5tIJ6iBN8jvLI/IbT+lv9h5N5CP6bAlqsNZXJRRW
+Q0kejesz567vTCR3o8LNsU2/Z93Va+hvIuCmoXBJyv5s7V4gc/F862kLjoPCgpsiiXcx7VKCdi4r
+7wU+ZesGdAWntx+hgCjQtbJXseISs7IRqsOK9FomQ1rG/mTC1/7irbFjzIU7AvScy4f7nvTljmMA
+8dR8RxemRkrKKOsHjXrvB+FU9qQ8VoC5RcW6Y6btUsWTPQr4u+Dv2NhpwkW4XhwEvLLHLZIRhFWx
+HY4lbNsvBq80YoI4az8zCGSs8V7BEy9/gSRnxYKxxvRLw07n6gPweQMxAH9HHc/4ByuMJOeMARhA
+LHMBpXyitB03LDmLPQ9lzVD1NUon+KZugJhc4yl7hiIfRQaPxvx6073dtS2//f8E31DSPXi068bX
+0udKrdrjanVk9lR2s3eNbQkmSpmBpU5o2UeLMb4qHT9IqrQd6REH6cWZI3+ioR86o9K3rcB/zOPD
+axo4NqGd5Bv4g3yD5SijP1gk8L9vAeBU+6d8r8AnuOsWwe+IAdAflNCJq/Lt1DoAr2njgrq5fULE
+SeKHv7pwLcRvk7V7BBvrtrn3nW6hDmJxfcYV6w2iol6cljRJDRyPI5QrEHWBS3VcV41E9L9soH62
+WjCJno9UtHk/fZOmja1M+h4SAXb6rpwZnBi5XXns8dajfKzfVqp7UBiMUpC0ThLKYVfvsstV1vJh
+AdXiKuNqc0Vi4jkjwbkJweJtLVw+iWj+9OKmVMgLdvSHVFm3pG/i+z0wjp9ngijhPQ/GX/H9L3OG
+83drZ4pESsduNkgiOJeeU/I8OAUN6ZBzgx6nA/pFaB9HnUMmL8qG/mbKgAi1fhYLVo/r7UaQex/Z
+rO33qPAdiTUvjUq/7jjud7zA4QkvskzyR3ofEGlRK7DXIw6zaImKNFB/T9pL6nNeo1rnFO8ihw/n
+NbbD4htWN+EjpW/qQsbyRnAqaaDMU791VMrkR8P4qvoz1iXkDQI0ADyFJKUYAKn1Nvvk3Mf9yulZ
+DEBmYgO8aKVSZfMu5oY1sw68OGbLNZEHPhvs5bOqu7N47moE+FZsSaXCBQpdCKl5y5And6USc+KG
+C+/tyJEc7hWsRSp9yVXdoLD4memVl9NUHT9OjT8GkrD1JSW7gosiLCzEjUrLrbPaP9c1J4pmacXg
+phOUXwRXpcY3MOSVGojylk7k/O3Qzht+apmQiF3IltjKsQAwgI5xAxWrFL4Dn09DfDIakG9X2jQO
+/F9H3Q3lGjAAPGt0DRREsx3zgZOCxH841KqgWj2WpDcu7H0KtTNDG+s5vwx+46KxR0qbdT4rj/ut
+6yQH99Gjw8WsyDiTNCm5bLYJXxsGRGCACvCx11AQmxo4UYMz/641UCIzYVydKyXwRnFWZ9aIaUkO
+A3jN/DpSuySb4/g7f0ZqwGLUKZ3DMSTpio6SYghHftCXCm71XLTGjrb7fSOjsoxWbWY0YzrnB85w
+CTobLz8bu1O4fnVS2CXhOEi5TUTG1hsiYhIeFTaAXgNzYQDuPQVvLyF/Yg+B3QJXksCWCrUX6wbR
+DstnEIllgWuj/kE+nIoQO50BAlekf1VC7fSwhNG0hJVam+MwVd9T6x8rJC/9c8I8LZEzEX+GZo5+
+2eF3hNBrKfnhUlKeJhFOcFc2CFgdL9PiCVPxohuZsQ09T+rVDKnCJVKNcupMSIo1N7AsW/UyaI/N
+aqiFYk7paxA24Rydh8ZfYcZais34316Sq/SyclNGcq540w5Ks7jR+8RMPQheH6eMeON/WjeToCR/
+mdbNXJ9DpKvZKZL49sL4JKuanfNN3VYmi4xAGhVzkBv+FQE/+TYsMUejCvzpQcNUpKk0PKA6mkzD
+MkGDkUOkVPB6usY6M0QpdNGBbY3JoxYnxlyorQrM5sy1w17nvUq8b+RF2I5rG6S8wsG9PByDXNVR
+ImeXR9cd/Wl5cK7mhZq3qXpU6lAxH2xtBpKbxw5zAMT5ZsYfU9JKAMn6FumzoAIzNSy9a2yzOEoI
+YSfZ0AV1Fyi2BtqT5TOBUmyCiTNCSkzh2jtoCalqYewo+L4s8q7t1Fc9rfkw7awgkDlKD+X5eYG3
+ljW9YZ9hzQVxU5ehGFnas1K4safPos2l7IB8U8+e+H/a++64prZ07UBoQVpIqCKEXoU0QkIPHaR3
+FJEWmihNpErvvUlXqkgTECyIqID0Ik0UVJQOoiBVioAfzpk545yZezhnZn7nm3vH54/snb3eVZJ3
+P2uv510rK4IcMVV8sBm1T1P5jHWZsoKQXnz5qsygjkQZS4JzVyjFQP+B59klRP98OQFKlfv06aed
+aor0wWsGra6CQPv+/Uz26IuDwcfnOMxHFCdPN/dduEjCC8ycmZeVNmbuUiaQsfuAplrleUBmzZrS
+hq8kRY1TqYLwOfpoosj7Nx4EeOeaS1n7bJpezxdU4+0YDZ2UexiuPMgxO06iyPw5/nqisaUE+Al/
+Yg63RunpjZGoRccxEWVKrxmEQXlnblryvtbihEWaxM5jut6O6FIEMQUe+WGp2dPYfSXpZjPCeCD3
+LeglciBRElizfD9T03JUke6RHPe4fe+xD+z3t/DuFFocj9aXC8gi7XMYqjyuECN2KitlwXb+QU9a
+2qmWih/tPX8zoqJR3MW3XMyn4tbHD+MUEiXmgNxQqiKaDa1BWte/6k6mSaKFQEzdsETqKVltXBKV
+i19CMoR6oP55scmULTVNs9ullyj2bq5XWy41QXj39Bja94TO3RZLjN79CphUMjl3+wYVhRUUsOw1
+dbc/ny54IT3MsnmL+nQGYdYkYM55WhXTGy9piUjzLP80cvpOOmEM6c0l5VucmR+pSj68p+8fZJ0Y
+En1bfiEqXZNEYIirqcXUeYDd2F2MnpKtuy64zt05BOTA0rryNPkyUi3gJNmsvJrg+sHdqGL9dhEN
+9bK0HGsHt/thLVShVJv0VTg59YlFYp+sTauRTJKgEB4lPJ+FUCH6FgWR5bHW29J3eZuaKTJF/MoX
+7RXJ5QeDPuVdNuDxssnwUoIUrIy2SxHAUMt+9sCXJgj2vWPkzx6aVAbJc3K25kG06HnAERpbbcMd
+uWcFzjELJaVtFJ6ehQvamn+wG3VWhbVaK1b0KlDjWwP7qTnnaoJRYml1aQimZJo+eeqYzrD5LQLg
+pcWDnOaburLTvTfXjSVbGxP4/ccLLicl2FMUSftnmG6r4IPHKMzuuQKZOwCQKCMdD+WX4om+n7IL
+3xse0xMTORvQbbMmpAAXu162S8cWE7I/dKU1JwiXI9SDGrci6YqKXHjXZzNrR05bzlmbQBSlnW2e
+Psab09m/OleU00JMekIY3w0CT38YiVWwyHi5qf96ZtBTDGj5QceaxeBN2IqAlyb1wugZ5rxJNTtL
+JtAtkD2oi2240Jc4Ze2er+5LXb08+rXkFYPqmGKrj3yaHqlPFeWYWbVPafmRI1WU92azgNwqqHEq
+dTX6AO3CZJuFIsHAfmOTfJ2AuQpeLuKR3BiCvdru+bYIH/oDJzonkk8j1gKBd4PlgIvXBCvUQsjf
+G+4Up2ZzTk9u2svGPNp+pSufm3s7EHgXktZL0nrizlZNN5aTRl02O8e9fGZIJyO7hEox3q+7BsWS
+vN1Q/R6AJL6RkDFMh9fbFpw6pwSazEeQcIOT/LMHyhjoScn9rg0+LMjFfQELRfLaZwl80Rcd4ZO3
+wLOt35DSjQZ18S/LlgPreJOlEqHyBflR2w0Fpfx6yQcL2DU+6fbNvm6tpzjI9M25kun1WAr0OfO+
+6JlZSLvNpqv8FKF5xow5LRXCA3o184L4JSyMbULm7mGPTpR+C69CEr96Wp6UDCTVtdoWalgGTiCd
+LLku4sUZkOiJCmbUeGGfXXzPlmIfhJHnIJNzuhPvM89eOyFF2tRA433gHFtKvUqqRTHffFuO6Z0L
+RUaIeKG7za0gkOrVxLlo2mhL1kiRyOUi+xMfBbda/M+WXOkM+gqA5m8HNuv2jh8O48IFPjaHLdMq
+WRjtk22oVa1wh9jcCzJsGtokG2zSnKDT2LkeyMiWNhWS24HfqxgHhQ4FBQorBDD0cMLMGqX2BjMt
+7ydYt8sDMy77v0CuqyPuH4raGS6DDb2SE2E22iJCgDfqkRx5I9fIDOmLePir3dD5GgUw5uHHx+iK
+dNVXZmFdNHS0H6e+FKEZhatp3iqnT/Dq63M/jxnCN35++7KETbUNtgI9EU+0M1c3qY3fx1afBEho
+JxolCWLX1DaivtSrIV3PcyoW321ls2pNrDfw18nmm02KoqRp8a605WZUZFdaROalPxOORzXjBeEd
+qh1bCnjmZyFPrcCDKcGrZLnkZ+IoS+U9TIKwpQC82HXFqcIDmMgmLbRmYHrymdQ52YpjoJnuxp6D
+RxbH0u/weHd+nDO66UfpVsuxPqJfkZTCOTHCfe/azgJvL0V6zkBvZ7Mm89qmuSd3OI64fy77NJUG
+iN0zMxEjYwjAVpqpT4m4s6biSAuPZ3awkKzlDxcCX7ypusC8j90t7WtwszIz9W3cfTA7G5jYdhW2
+t5wleQKj1UuR55BVC5rO9ZXJt5PRfRIWRAfrsqkQeg9+SvMqWa+Z/C2aLQ5Mwts4YkKkyh/h72FA
+Q0RMTvnXCAMn4Ki4AbAFTP/XqAa94M8hh6MyfhfVoP8uqkH/XVQDov6Slamfjij6H/9Uh74U1sV7
+Y6A7u2cDFSKjT/m27lHbfdUsbQP917703RZLgPiSv0Q19FS7FdcVmW6aubP7vJMeVG/irNSA5PmN
+22Zu234FGPLgM2cebwnAHzioUpKvCE48F5VDsBfNOGrY7szso7Zca1YCy4tBc+GwLPuGYDZoMJdj
+dhOzCDDxsmW4eSDxOiOrU1e930XL65+yyR98ya8dZfa+nupRlQg0WF8dwuVaCmLor6yjn2GYQ7Ny
+r0hEDHIBu7dWJa4UmZPPKhhKURz29yFKm66nNWdtTgMMg2OdlXxSboKoHk71JnOBR6yipiAmDRbR
+cIQcrdaCF64z+sznzPHoG17hV8gsk6WbI0vr1grI10lDI0I07szGPD0+w6Gczf20sDjSc3VHsd4n
+kb1yI27hBSnx47WgT5veEy5iTCC1CdcvNemDiAldT1MQpFrLz549cJZj1b57WP6xjRp/5iua4ef0
+Ps8G6JPHaRZCpE907ldFP0ekTp1y5tlorm52n3jQaTrSvMlixNMZhrrGsn6KIkluObD8TQ4gkO95
+Ce+zVEPsrWMJZ8Rtr22YKiFSH5grgbfMB7YhUJ3XhLgFNBOFmpLaddj529HppzsYeNhiYLv1zupx
+JOGhhQVi3A1QSVnpq7Nm1+heySpiCnshUkWcIU9PnEwrrcGQZl7v1hXQgkgtrZ5VY8e7BcBuXi1N
+OHHqU+BrPBcj+R61wae8hXyYmtRcEJ82eSwvkBWY8CAGhgtRNxcss9R+RpVrnZ8uLxFJquomnrZI
+4K5DwK3drc82upWxbbWBsW4LxXi6dbSWbpVGs1bq7H6Ak2/sqe5R1ckbnIqwg0pPwMfFBkklMxH8
+thhvel1k9VwOAUxRFz+Aj+RXPD21JKh1pvnNAMPzh6v2s2U6hlHktAJP6hjCivdN7Ex3zieRPBAe
+BFK4SQb5bTXeCjrWtdP2cupeHInJQuBd+Zq7L6VUAycUaab3TISSrMo+AE6MBsUGk0dfc3pM+omn
+be8ki82mKdW6RChV+5ACDzhA4OR0eotakE++jWaqy9UTFwKYHbKvru2nCq7ONGqohMSmy+UCm3nR
+vMEUYCSp9kSXmmZNAN2O6KYCNsyKiP+ppnR2Uq6/4v1POfwXyOWReRNbprbwuuVik1BaiBfnCVZb
+8DV2v0IuzUoRWQNem2byCZEsJIewcaI8PAIsnGhm4XwtaFsu72pNEkUy6GopOdHLHQbwWq7QyKeO
+kdh7mYUB2gelhfLFtAprHdExYTc4GEAgXye/28iarsVGZ9dzaMIpkANet7AJL8W5q2XF5xoI8BgB
+xCdw+VxKKtTkiorRx696nbhMTOmubTQVnSpC59vL76JivWqTuRa27XOj3Pz2NplsYRYtjnbjDr0J
+l1/CW/093Z7RjxwGzQuxzTrhgbwf9UV1mgywtDGcqHQFWI1bpmVzQpnPGv5zls26rkpac8FHtf0v
+hXAl8R2KLwW1yRRw054RPJsd9e4+cdeordgHpSsvAylF8nI888byHgRU3k8QaF9N2l9KO20n80Kz
+Qqw/tqs1Xq/uBUHkWmFrlin/5zGSelgFSJNtEeIBz21muxoEc9itdKCFuxg2UluH8DmXRGBh0eNY
+ZZVM2kwqn/NTs6EpGpyDsCHu24s1Bc1RRKU67szbBXT06dIfeGS71nIgZMdBM8lvYe63eG7lM7DX
+2vWyqMGZml+fDmLUPuw93MtZVi68MwJ2caGhYY9PyxIJ8KfcsMTPpBikQAXo3JO3CwkqwXshteaz
+G8hqIjeBg4ISVaF+md72pWPYa5n7A7WoiLQoyvvxef0cl4QoguSpyKeEshrsP9a2+uxXLUVdoNO3
+ZuYY3FLpN1A7IPkKQEft5K8s3e5FEdM2nxnicY85iGOChYvduoWI7KEIbKhh8qj9bBphAycX262D
+omCY0Bp6Qe+MonHWOd7x+mmhlzzV6Amj2ytQ2bPEWjaQe8Uc1EFpjMxkNM8fa4qE7xbYQXdq1gNP
+paNFHMznv5g9nUS9zNYsJmUHCn7WmSuc8KwPHQAuLl0XIjk5LwQDsYhXqpUtxLSvoM5aNz2JKbZV
+TauVHlxyTHP2Ve1Ts1+CsusQ0p3LqvNshhVOraYcOA7ZqeukiMerkvjCTJZ4zOV87V/U6/R0jcvz
+KaV6PTwZZVNQdqyfs8Nzcj1drAm/48DFUCC3b6jLOW6zOE8ljdMcNEyIEGqhR1LYnzsvOSPXZjZV
+G6naeIsazO9EsIWF7bAIP6aUUq3/Uobrmp169FYEe2dTr4RYmKSNz+Ruj3jJfAb+i2B2dwKP4o7D
+IMUV8SD5znBpBO3oc+q5BlVtGk/291a5rprMfB/6p3rpNbU5no0t63bQb9q250VOPVX6uF383qvr
+Hr2haAnI0oV4UFGtt5GonekeSSM0+isAcOUrgN4fHXwJx8MZnQNsfi1kHd9ERQvPczYBDIrEJR2U
+g8yGEsdUPfWDRpUDjzV7fuTmpapuQLalPKVtw3NN2ympkc8Ehtq2LLrfUwGTYLgw7jzrEnfj+WA6
+KFyeZxJSX+99jKPfVwCBrlsEvG7hwCy5NxpfdDJqaeg5C74YVh6lSuw4T59qPS/83r1JKpg5NK9s
+b2wQbIwzlenscJxUb9KxGdKaEZTv6Oryw2SMFeCTseCP+UgI/bm76SWVce3eNpcfEHsznyh6uzp1
+B3zqIcy1cBtcw9Be1HyNnKgJKydudgvq7DZcGDnlGLSB108V2i4Bgy+beM5Gh9nqE6np8n2Q7vU6
+xeLfPxIUtXLTpn/5S41AVLDIbU1OaVod1oMq32B+jtWJW+/4mlexe2bzIQGpzcoCHM5+2+0Tzd2+
+gZ91yuMw0QJyuzVVQ2tfAXQZL1/1XYQBH6v7++yd1SikoT/x1v6S1eLOsD5J0ZR2wGUqzQInG5n7
+bdiByop9IyWZXMj+IA4Yg/oKcDixAqAlHnzPwHlmu/OsRd6Nqjcyw0mz7SZhSnMFTy5Bb0CwwYM8
+tG6yokMhO86l4otJEmLCktA8UjIuB5/u9jA9ADDuVrpkOc4ilj+Hu82n4srSLKUWKzwKsVPiQN5Y
+RmcmWKr9sZ53y5L+HcZ74POYLidbD8Sao8WUFM3fzG+vFSy/Nu4nfZUlmxE8gZURt3S2hAxeG5t2
+R6lPYff1c2wvSD+Z/NDmoMYVxQkOS8jqzAfOqmLko3lUOfYK57vl7tBN+T8HqQTxybeX0yeZSrwZ
+YqUrZtFl7z9RUb1UzBNGt84svpLsDXk4Dg3sc2rn6GIL0MCzYILpv3hn14Ncm3JFwzyX7AdL8LNt
+tOT0MgbMQ6ngpPlyZpkONhRkLS9GfiQJMUYRte3Ee9FHLNXCOU4neMdzt77kHu6dFuVZH4IcaWrG
+k0sUyTHK3bI+JXFOvsW9UYu0Ds4exy3W9MXeQU+CpRXX6f2J1nUiyDQr+ZJG08IuqJ9v9xOYsacp
+S5Ynkrt3OfxexEirUppcanZWE6a5lu5wuB/1FeCEBfHt3uoN4zRcI18sl/R5bq0KGvOQ82Pi5J99
+kOCycQztz/WsoiKZpAsaWywDKrWOUkpVcc1rgHpPRa+K90A1LGBPqC/eeHYtcfEBeeTeQDRLHjBm
+HtfaqEXm7iZXcez+1F0jHbE2DccQ70K7tZc8n7/4mzeWJZmu4iHHS/VcTQtJUsfkR2Ic5N6NmDwE
+aOSB6Uc0CEJobXew5FVC50Lralen2Ih7okph8KzjbIp3w5T5h+WMh027JXTG+HV5N7svBYsN8kwR
+uBJgHl1gwxlQ1FQz8Sx/46upmkzc8Y7i57vVrGkLJqpjaH22rwBsDat9ggA5i+qjY5NF9mm4oNpM
+omlLRxsafpKzPWLGomkbugsrwtvHqCytJl1Y++Kq4QKqVI+FE7d5Kj+48WuOzwkcp7Jy8N7EN1Fe
+nSlKZ+eLo2ABh9Xu4AE7qZofGzijAXm6KVVMUZlvT3XIxfaLPWdQKvRzTLLuKSYK93tIoLvEMDOc
+a7ImQN6yItw428YWpoUIiaBs3b6t12epQv6wS4sUVnSV7PQ9d3NyC8GbLhfCQ98YGlOT1ybGyHrE
+zry/1Xn9wx6mNywy/INY6oEhtAPevGJ6SqhmPoAuDySjC0GT0rSASpBmveCOyXJiFvLOdeHIWqlq
+PgfXSPJgKsX6G+cARHINziT8ikQvzO9x04SI1Bh4RVMcGF8Z51js6miRzHf1IVKTRTk54emwxNPa
+CyJ0/MzjxauXNbijnHniVTNX1YgsrNf6MQ52eDjOUeWkKEiusA2mPVPnwn5tv7imKv/uFsdiD7t0
+7eMNaM5n+8XekOzQDZfFrMqvgDa02XE7xU1dP7+iPTVaHZFxv9gPH4t2K1napDhmQqywy+9zlM1I
+pMEjKbLvpb8CtlSGWbzv2a4AqZueyB177HANnQ8+kWToqXtbw/hse9wN4guU1fVK7Gr1VOSPATxd
+1+5GzTWd5AKD2pVZ1xyLGI1p8wPLDWLsVAMWEozvzEdFtPsoPG41MzzX1Te5Ixw279cY3DcUV401
+ouKezs5nIwpk/+SzbyB5EBLDIcJel8Oa8kklf9wwQ4y7WQPGGRXC7PAps9CZMwoY0tTRxervfXNo
+Acc/DAo4Jem9jbtiOyIc1mjcuP4VQCJLfR7ECt8pNTNouDOuroqnJJaX2Rh24o9Q2dM3r2AIf7lR
+z8en75nGHpcaZH0lcnmaUGiDF1ZiDzcf4wRDUgoecrldPRe6Ka7RSLJS0GaQu/dGYN+NlrycKiKh
+w6nMX7QLQmmzpOFG/xwa+bzNsA4jnfUYulNEjtaRNqVPygie/gpwNpSIZ1inZQAryvoPNV59CjCa
+SNmaqqfraDjLw5RGXteWNOBjXQWNWDXgFUlNxjtXh7ZlNFsjTtdwbckbV+jsBbDa3zCd3XVWt5SK
+yg8cq16l9stcddVaNNaLNYSQjl7lZ1YFTOfNZTNrk8iwd3vyqGm+UTH4cnOFk+f68D0jF6cgTTb6
+8DQlelCWal+COxEfN7vmDSqaSykp9b2gp+uemk5XjOMKbTbqPdiX214FJ7qfk35cRaWdoR3wEVQ+
+s1NCkstvivriMNMnmX4/fYq/KSEGVyZqsAh+faM1wWZNZzamLOkxK5tdsLQh4GaamBj/yQ46mAgg
+xvm8uhjD6Hycpqz0y6j7UYGF2WdwuQHku1UncyPnRs/cQinnRQpO9RMxfYyIQJy+um0XRXo9eEuR
++KJjAGsVobNW95kDsbfXpvhM0sQa9RpTsGIMsWka/LFqOS7INJM0wEnOPk7mnvpnpRGLrIz5MzCW
+6DwBTtUXzCX00s+mZHPyXQnXE3uL4iP7PaeIMymDmgD01UFLZa+JkLv4smhIfXp16nzHxAMBALDs
+eBLiVdmMZBP7qwQKkExFuvNnR95Wt+BP7xomVLhXvdSkW3tIXsBTQ239vHgTmW2TpPvdlqNF1GJI
+k80zeWjvC0ntRDLPSPMfPgCtM90rGQFw5DTvTlajKRPRUz35PD2w+1dAbTMje1kxWbvYQ2EEsoT+
+adplrMiwjWxXmiyrnmI6w9ZBvG/7RCRqIDgEdAMXUAlrYqr5fnsN+m/baxwRNwC20EP+GtWA/Ly9
+Bv1RGb+LakC+i2pAvotqQOtVMNr9dERJ/ziqASmFTQJmHZexjVqL7SuemcQxqxbn38U/L1B6Ricj
+k1SHeDPxl6hGafh8scvyLrcTYG4RClil6gnTCLbESZbSha1DLiUr0lTb84rR9NhPfsjBphoZXZWu
+eHVfQvgT1P44SuV6QcFMMWIXQ5/rCAaWVt3u6jH77P/SPVrAZddY1pmxfVRaKy3+4tqpRPxO9f2v
+gPAVGUcvUX2RsC3oqaHr5hl3qtjLh4iyaBaAO85wvTCma/z3becDzpOSDyrzvUkuo8UcjsAn1ei8
+ltMFiGUS8/brYKDxme2KlHckPfumqueCu/UpYBzlZCC6npFtf+lsL2b8/Yj6WcCU5PlcznTslo6H
+WtfE+z1DcGtdsCJQPIu+bjGeaYB+fuzpxpbh1rq1HuwAauarDY7+/KQCGYgp1JshiPVAzOka+Cyv
+Xxn7XJ9MbJmgGngZxYrWhnt2H3sgaRh8DWOeC2wzmzWUOzGK76ap0HxcTRR/04Kyc1UpcUamZ86b
+htTjlfZXgFEQd75cbaCnqnzYE71G6b0+Ifkt2o+6BRl3dZ7K69w4eOv8yZbzK6DiUV84x1z2hxU+
+OlkDm5E011DREDIzafbtm2eaBk9w7tJvLxcaN4pvEdnA0Mmy4QfCiQp4RezJyDQFnmAAd2zfPZBb
+6ORXwBLJOsUcodOvnBpgpDaN3eyQ0f9EVQTQpD/V74nHGkm00l7hUpJ+aTpwLkeMm87q4T1QGjI5
+IwSARXT0cXEkgumEEXGZos1NAsfbHuAqA6FiQicYloWA5C6xJFmFQeGR6h3reo3QTfLErryYezJ5
+sZc+1vLAbMWqD85Iom17pc1dP4/JXnyed9xyAarTwnxBooy2/8C4V4iqbqcaxj/Cw3kQyngrHH84
+/u+0zhBkeRRRXStGTJbzGbyJJwcCJmroNFdNoXdHON4wafX6mx50BysmVXJc6gApcm8zRXJyp3ZW
+0oacF/PyJEk5zTsaLb9YF4NLpUpB2i/GXOZjzXMzUnj1wLiilcyvJqfDqB/SJ1/XZ1BnUvrgo1iX
+aWsxgl6WA4pPu2wUkhrkZdPKwqizdZ5AJeG5Lddg9GbIzsuQT3AqOOp6Qim1YZYL5C5lYx/dlHGc
+YIuuRwLHVLCHIxP85YLsMvAeG6/JG7Mg2VJPy67qFL6YTDcFE6O4pJIZ3jhe/vieIKWgT10f5x2S
+mu1VscPe1UXlAEtkRHurbw3lAMGv4sliFqR3Xy9bzTp0JRGEby5kxK989N7PvJ/W6v9xDMbbmSkS
+PIyCrKu9S7LlfzP4nlIwame+3XZIdDWVTj6iOpas9HNWomGnAuT96baJAivo1Z1yVpv4dAjH+Nll
+mPAJwW2c8GQuV1Cm14xvIuS0FrYzLbrm1PD+oHz35Lp++daTNT7JIljGcok7Z+8LNvzTp+rNAf0P
+dECpN7sMak58NEwMi/ggqnOJRo0TpEMf1867vqMUMaGzmxE2sr5zE/r20+TZUqWg0P0BdcFJqLW/
+VEv5xikOcUZu3qiMz4JBemHWX3KY33Qc362M9PQxpbVrAa7168mYMdCeR60vz9VRxz7P3DOF+AU+
+DJNygZl2Inxl/F/geTLzidGau68uknv0AXp9s4O8vhw4CdZc4jKtFqfUEAgjmYlxTqVg2N8fK6l3
+h6HYng8lUOMTLSyd3EPPA2/XhGT6iG/3x5TzhebLjO1kP8jF749kBUKYyE1Ud4XpbUmus78jDeYz
+T97A5WSl60rMymGUTIJknu6VcVJFqezFHRw7GaYyvZTYxldEnDdIqBLucG+tohkk57PtJwTZxtJP
+FIypP15IDs9U6ipTZt1IgYwNvxq4i66xcAUn60Cgp9AUenmkr+QZ1zifr+n0GRrjxWjeueEduQiy
+ByJL8hNYyu0mVyPVgcSEZb1dIRFfp93Hn+COVhPe/ob9kGHfmW6zvnAgqU13G9GhK2fKfGStmxAQ
+25hNRchY7dSz3FkFztEehkayPs0aNp6yS0Uc59YOztucYf0KAIuvYWKlp9UWXt2k32O0tg6oHAK2
+nWPQkXpHPSVABFwW6gmS5/HdHAci9Wkf5b5dNOMyWra5WE85La3a8H6Ys8Vv7XogLVUQUlrRij12
+qS01M9/OOoDR+/3WO086KgyAJZ49pFcPIAmhX5t0bTHSA9Ki3/BGZareXqwSrMtW3Dm25fs8Ti+M
+hn717LChndYnwRmv4XMMJMSSsfy55RYPGXlU+K6wd2/Yn89MDpo5sTfko/kZYyQRIkyt2v2kkPMa
+pOWybwmF/8S5C5NLN/WVk2EZgAMNfUV3sQVBq2fDrMNLEhO3CVPrgZ7e08MjszsK76jkaK251OKZ
+GaxA23Or14nP7q3eilHspficevfcZc2780UWesdbx+3pZK+8uyvwFcC/bkn/nC9C26dAJ2mERwPg
+b/pU8XVb2/ZA4YQ69vhrVDPtOfu5mK20JCnJqHitHFw2yTFoQgY+D5vO0xpOS8QeDZ4fIFrRDn++
+MsuahjuXFfm5wNXXIBXY8URWcIyHyNGQhUMdodcbLaqkJD+212NXpOYQCHfwS3uVhJsP3eDoO/us
+SezBR9FuFUgjZKZGUvJAuoHzXJ9NlfOo0pgb4irU09DVfKp49QorEROariwMNEur+kAzzVRwvkIs
+s5IxhlO1hSNLXy84CKZ74nHTTR+qO8fa1GQAVGsyIvosNJTcXSf6z++WX4FxCto3RS9MCCQv3dJ8
+NHYx97FgDMu5onJLl7Thy8tOZNs1lh+Ln/Uqh1RxtIQSk/c5w3bqpSWtrz7FpenwmGJPGfBjSpUF
+z59zugaykzahmI1NcwKOx6YyKeZNvR4g7EwttfUzcuT0cpaz5a+n8TxfZ2T7YMKsEytTsN0jcVnN
+nsBeEOHCdYOihd0/yqX3BY9S3COgig6GM73nDJPV+r6zH1YZur2xddDFnl4eKB7cvjYc6XOLstbN
+lyx0bX6x4RGLb+CGIyWbcKrp9YVwGlMU+EPf8dQ0mzzCe9jN1qJNVe3jm7YKCd1cFdqEW/h3e2SQ
+ccWDq5IBgvxsNTVZFPQaqT5zPcQc1Rlf6gSYwirfMFQV8d7Delie4AR+HmaUl6rs7CQDDc9szC62
+EPwSD8bm+xbAL9NBd+i632SWRqY2XFRgHXoA0Hz0pAGbbkKeGimi3Ew+xjHCw8FLpwx8Si2jjMUr
+cDRUjU7ybeN4zltf91GUM9Ki6wRQSrvrNMi9pQIKUumbTucMLuGJq0XitsUGwlJp7J8wuzpD1GlD
+oq6K8WzP4erm0vURw5GQPu1n8W85d4VCvPLig7Bwjdw+q8ov3TJ3Ej7uGaZlM7yZeWF+wYJPePDV
+TppvbePg9UX7bCbzx9JizFXT9kwpdTxnA70CL/POxsVELmWbSYF5uAd71t4eEzGXFGm7XPnoPruL
+vvPuql0V4uDqy7hGO5/HL6z6WOXJLi8DeTnHz0B8om/TAQcNU5WOjXysXGPnTx1JLxQ59a68FY0G
+b4mr6W77PqK4YMuuIjI5PdXGV72Yf+pBGR0C/uJzcWBKYu32mdSNEQiV33J03ckzx13fzQUD0nu3
+/UcNDkc/B+BLV1+pZmazO2WOUkq9pkaoC8q1WYV5KlxR0y9xXWHStnFS0XMlE+8VzMBnBGvce4Ta
+5pgT5KHLWE0MJTS2hL1QX+0d1XvUHDk+5UE40atYzacTIIjegbwmsLT4f3LZzQd1S/OXF+mPBqTR
+tgbpS1o4Vd5uJn1IRHIZf/aUFKPN5rkeG2u5qa8AjT3drHU9OnjH+Eqs4nT57NLTqeLTBXrE3Bk2
+Sq/pQbS3Z+ZXNXgZWiuWM9zK62uG2yYmT6CyQwEDZqxTjHhv2+lNMyFGLenoRZG0KTeTbL5W9ife
+wzR6TdwRd29zf/4K8JSPlN9/0ZXabuB/54ULCDqf0pVJz8WU8bp/nFmNuo0swTunoijUcQOyerqA
+C38KF+dfvIpvXcPTIKd76a6Y5G43sOWWPhC4fC1wic1NxbVo8oD+7rMY0b6VsxX9ElolVBV10xy8
+QRyy78Vv4WJ4ekJ6LndsNM4/jLfh00gl23LlXusfyZPv5CBnmNVkbUev6rDLjJg2ArqNJnQFSni2
+CIivgLsdKdAADmjeZctVM6OVOt3BYkbLlFanfuMrFtZsu3K9BZlktKTyTLcvJ7DJXqwtuCYj43tL
+XELCa7uBnH9jhMVX1slojJS6aNDEhhrc02qhg1Jg0rqWgum7LNjflufgmgMNyX+oKo4SGwkKvlpf
+p+seIBQLJZEygF5HDtPchj3ZGu99cI3adcci8vFWqrpqbPCmexRsw/dLQUpB03vcfrr54KBd/MJV
+YmZ+DTZmogN24RUOxnPhn+bb8HCPGsaRyHVt3Fzro3VDyEgvxP54X5A6Qt8bFK5LU6WBvu36NrDG
+pQ9AqfJshZaYzs8vpyyjIQmKu2gSAC2jOD7O0/WI3SklS0KumhGv1kRM6CSJO/XomF1vUiRERqjW
+K3fWaNOjh/c8yKdBsc/CXtwuIAellfD+ul0aCKu65hDWteqWf4kWWsU9q86rrpC/RsJ2UJ4k8IKD
+EEqFE5QkRoZQ8cCu03mA36wr9yKpgLY0ensDr8kLEWzDN0m6qoN2tj5M9bdT7fh8qRE4qH5c3Dlq
+4VP9FcCx9mXEunsvQ2WWjLBhJMPU43qMqE9zgHXUWmmn3ohcQhNlEMT8JLDPQ/rEV4Ax6WvnU9vZ
+740hWzrmyoacQIpuycj3+hqswhUMZAEv7MRITUu6V8zl1hlbC0JNRtwBWN2xdMnbAU91u+ZPVQxH
+jqw+iRWQqtHYCCiLimtYt/8KGDlXHs9Iphgsl7fYCnlq6P7WuG+XrCiCe8w2aK4i2VigP1h1lOMy
+d9wwUtr79nMu6S07DIdnaFB/7DOBz33ItvqMHC46OQKvZs4n1dvQyVYELUjRgTDHrjz/WVY4l94v
+4GPnTOe4vvsFCsox13Uldc3nile/SJn2UfmY1bRysZIDwqKk5trIRyYaSDcvfQiXdx+B06IpxCiJ
+RVFXoXxWzFWs0xPlNOzGRELXRSA6F57yNxGe64bqNwSSEG+xGT+kk/8Stp9Q0VsshMtLAZBTTjnd
+Fj6VQOIdxracWXsrhfUA+n5uD/oVAG2RfpJ9LKOsAJh+7l7fdPLekLUs/4LhaxLNQvawQAxDwYck
+Eh2VTzkS6pREHOX9guyY8/wxD8+S8gy5QEUWx0iJZlDJcWB9RRqSbrD0SyFSdsPmff2n0ktrFaqe
+Co7k51YKRI8xpJWGiRop9DxUK2C40TdBb/a+h/HwUarFH3NsdDgmmpyKu0TsId3TB2dosDH1e+Y8
+cx8Z69SUGPo4d9wkrDRCLVykNy2zFKOm7nM/duJL+ETtrg9hXRvmD9W8TmkIajS+O2YLStU6FwOL
+cTv56r64aorCQzEjcK9+POPm3guV+W2oAMz+YdN1/cb1W6bO01MPMkvfp3OdzReJkB6Aw4p9a9Wu
+FYKnyh6pDC30zSgulu2Qs1DNcqLOcguTi+0UwZDeFUq0UrMrV9xWDawzNTFFPNJtucEkV+52eG4t
+Nr2lLgOyqM2Xq6Cm6lJPu9oGfh/VgBBxAo6KGwBbINC/RjWgP0c1IEdl/C6qAf0uqgH9LqrBEFR1
+fEMMSJTxjzcNhUaR8qMR1RUpz+/NXLLzeCG8foa2QgB6bFLKizrQ7MuF9RA9LdhPWVEEz1oDh/ab
+gSpGF21OnYY4P0x/efak7Uz9iddl784mlizQL/J9AJbR663SXtTyG/XHV0AeBlxs/hJahOIba7Zs
+OlNsxmMW4ZxdbVw7T+zZ/CTBbJMxHCpxPjZuTaNTMqbTifiqz/uzVajUKzbDJVtjCX6zj6QSJN/3
+vJ9+MBxPcbwz0QN8s95GTswpii9qd/NzWMEA863WXeuqAkyR3Er1NDtFQ8QJ6q3nwcde571yo2AI
+5YoiymiM7aab/Kw88vLgnLl/bNxQmLfap03qenMuWxstw/Hc/sqXV07vvltuCfEBKF3U5thv8K6t
+L+Xe3Fy0d5tlE02IUcfhRPoA8Avh5fPLX29agcm7Of0/PIisSPV3veX1Ca99usejVsZWK+bTTSgz
+uwYFnacCnahsIN/5BzjaoizW/oiPvjZ6FXdqLmXkHkQe9X/GR/0z0FF77B71c6WjFv4ctZfLUSG2
+o27WI3fA/W5TGugha466L4EtUIa/sobh501poEdl/I41DN+xhuE71jC2nvjoHEJEdP0fs4bhNZF9
+qWXIMk1B45XRMaOH7NABHauR03PFdX4yxOlFZRbKGnV/NvXjIly85O5I8OCSPOPH5WDl4cAlyYWU
+QKFRCCs7K2sc0hZtjSGgJdAYjATKFoO0tkOJW8MPU5BYBBpuTUCgrCUk4LYSduI4O2uMrR0KixRH
+cIlw2bhcvHRYsqGPK+GwQMcLVvYEMSdXgv1h0gUX28NraCRchMvD0ffwVAKFhYv/6fq5S44Xvtkj
+4QjcSTjqJAJpiEBJohCScLQw/PAVznXl7JXvHcFw6IijPiqwhYHxr45g/NkRDEdl/M4RjN85gvE7
+RzABfuA3wcPLyv2CmLWv70m4lRVCHAdH4uysECg7Oyvs4dEGY2dnhyFY2xDsrGwIOJwtxgotZuPg
+efG8h5gWXltdRdnA8CT8T/iVOg5TMWg07NtRAiP+pyMc+dP7n4DBwBAoNBohcXiPiovDDhMl4GgA
+7NfK/LfB0+OSlfthU5ysLrp4/IrdoZmd3a+k//RJYD8f/5egHm6owgUgImJ1JlwmONtaiyr4XCJ4
+OXoQFF0uuFq5W11ycScGAIlIACXJHRRkACJiIiAxydHF/sD/Evzz/NfUUTz12+o4kv+HfPlb/qPQ
+EuI/+P9H4J/3v6KRvr6ytuFvqONI/yN/6X80XALxw/9/BH7xFKc8OscP/F/Cv9L/q/7GOo7iPwaD
+/pn/SDj6T/0//Af//xDI/C1gWlbuMARSBPZN6cEEFJUNBWG/MKE8VH5oCUk0QlQCi8CKI2HOLvby
+zgR7wkVbmErLfW3PCyqOzgSYQcv9b0eDQyUJ0265r3yoZn1gii33FaxsfzpX+NO5grOLzXnYKULL
+/VMEH2V3dxd3mFLLfSV3F1dXwp8NNVvua34bnML0Wu4bENwOK4AZttw3PFSkys5Wrh4E2+9ahEOg
+JJAwW2t5F1fCRdi3F8eL9t+nix8KZthlgruHo8tF+UOfXvrW5jNnvzUXrvDtaPPt7fc5cDgs+luJ
+hzeI4+Fo+Js9EqZ6aP5XIywcgUFif67W1uUi4VsTcaISaLQ4DnHB40+m2EO1fHgFh0FIfDO1cXbx
+IMC+vf6liT8ZiMPh4vC/GvylMDRSQhSNQbTc9fi39tH/BP+/fWuEkx6H3wVB1NZa7KcnuOjhXfA/
+1XHk8x8l8Qv+i2MQmB/8/yPgtipFy3yo//789tuRiNSVcEgQkouezs7/P5v2A38A/nX+H60Df4/+
+g0t8G/+Lw9E/nv9/CP51/x+tA3+//hNHo37E//4Q/K3+Q/3Qf/9l+Hf0/0fpwCP4j8CgUb8Y/6El
+0Mgf/P8j8E/qP4ykOEoUdehPJO4/RP/91CIEShyD+4f678/p4uLiv1X//ZQDeTgigf/P+u/PRkgM
+Cv0P9N83aYPDHOq/f0lmwxFo+H+WzIYjvhH598jowxy4XxPmcARWAg5zcvF0v2jlLO9OsHE5LN3n
+W6mI762QSBz2763+fHJYIkz+e3OUuATq96l9LAKFRByh9r8ZYSX+ztsSOFHxbwERzN/IfXE4Cgf/
+dbmPwf0DuY/EYUSREuL/brn/d/jX+/9fjCD+QR1Hjv/EEb8Y/2GQ6B/jvz8EHo9EoEfN/xIDSX5M
++f4fxb9N/4laW53/H+r4/foPg5CA/+D/H4Ef83//3fiJ/xddbAkeP0XyEb8Wyf/ncBT/Ed/F///E
+f4Q4HCnxg/9/BIavYxiYfhH/J//ziJmIXr/zHUwSQET8farQoZSwcpYsOPuMGO5i3bVHdCDX0wr6
+QHS+D9PPOd/2SqL9q20oMFvSg+D2c7E/8J+K7/n/21d0/j4c+fz/5fpPhDhaHPWD/38Efqz//O/G
+9/z/7Ss6fx+O5P8v138i0D/0/x+E7/3/21d0/j78bv2HQGPQP/TfH4If+u+/G3/b///WFZ2/D0fx
+H3N48W/m/xBohPiP9R9/CP6ViSksHI1AYP6TJqawCLQE/FemmbAIHAaO+j0zQlgkCidx1IwQFoXB
+YP9+/ScSLopEYXEY9N/OCCEO2fCrM0IIBA739zNCCBRSFIX+ty8A/YEf+IH/Wvw/TzqIjQCQAQA=`
diff --git a/swarm/api/config.go b/swarm/api/config.go
index 0a7100c57..3a87488cc 100644
--- a/swarm/api/config.go
+++ b/swarm/api/config.go
@@ -45,7 +45,13 @@ const (
 type Config struct {
 	// serialised/persisted fields
 	*storage.FileStoreParams
-	*storage.LocalStoreParams
+
+	// LocalStore
+	ChunkDbPath   string
+	DbCapacity    uint64
+	CacheCapacity uint
+	BaseKey       []byte
+
 	*network.HiveParams
 	Swap                 *swap.LocalProfile
 	Pss                  *pss.PssParams
@@ -78,7 +84,6 @@ type Config struct {
 func NewConfig() (c *Config) {
 
 	c = &Config{
-		LocalStoreParams:     storage.NewDefaultLocalStoreParams(),
 		FileStoreParams:      storage.NewFileStoreParams(),
 		HiveParams:           network.NewHiveParams(),
 		Swap:                 swap.NewDefaultSwapParams(),
@@ -130,8 +135,9 @@ func (c *Config) Init(prvKey *ecdsa.PrivateKey, nodeKey *ecdsa.PrivateKey) error
 		c.Swap.Init(c.Contract, prvKey)
 	}
 
-	c.LocalStoreParams.Init(c.Path)
-	c.LocalStoreParams.BaseKey = common.FromHex(c.BzzKey)
+	c.privateKey = prvKey
+	c.ChunkDbPath = filepath.Join(c.Path, "chunks")
+	c.BaseKey = common.FromHex(c.BzzKey)
 
 	c.Pss = c.Pss.WithPrivateKey(c.privateKey)
 	return nil
diff --git a/swarm/api/config_test.go b/swarm/api/config_test.go
index a55da6f7b..82f29f8ea 100644
--- a/swarm/api/config_test.go
+++ b/swarm/api/config_test.go
@@ -41,7 +41,6 @@ func TestConfig(t *testing.T) {
 	one := NewConfig()
 	two := NewConfig()
 
-	one.LocalStoreParams = two.LocalStoreParams
 	if equal := reflect.DeepEqual(one, two); !equal {
 		t.Fatal("Two default configs are not equal")
 	}
diff --git a/swarm/api/http/test_server.go b/swarm/api/http/test_server.go
index 97fdf0d8a..928a6e972 100644
--- a/swarm/api/http/test_server.go
+++ b/swarm/api/http/test_server.go
@@ -26,6 +26,7 @@ import (
 	"github.com/ethereum/go-ethereum/swarm/api"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 	"github.com/ethereum/go-ethereum/swarm/storage/feed"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
 )
 
 type TestServer interface {
@@ -37,16 +38,12 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso
 	if err != nil {
 		t.Fatal(err)
 	}
-
-	storeParams := storage.NewDefaultLocalStoreParams()
-	storeParams.DbCapacity = 5000000
-	storeParams.CacheCapacity = 5000
-	storeParams.Init(swarmDir)
-	localStore, err := storage.NewLocalStore(storeParams, nil)
+	localStore, err := localstore.New(dir, make([]byte, 32), nil)
 	if err != nil {
 		os.RemoveAll(swarmDir)
 		t.Fatal(err)
 	}
+
 	fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams())
 	// Swarm feeds test setup
 	feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
diff --git a/swarm/api/inspector.go b/swarm/api/inspector.go
index ea3c4c049..2ae6b4da8 100644
--- a/swarm/api/inspector.go
+++ b/swarm/api/inspector.go
@@ -60,7 +60,11 @@ func (inspector *Inspector) Has(chunkAddresses []storage.Address) []HasInfo {
 	for _, addr := range chunkAddresses {
 		res := HasInfo{}
 		res.Addr = addr.String()
-		res.Has = inspector.netStore.Has(context.Background(), addr)
+		has, err := inspector.netStore.Has(context.Background(), addr)
+		if err != nil {
+			has = false
+		}
+		res.Has = has
 		results = append(results, res)
 	}
 	return results
diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go
index 890ed88bd..d753b3f2e 100644
--- a/swarm/api/manifest.go
+++ b/swarm/api/manifest.go
@@ -235,7 +235,6 @@ func loadManifest(ctx context.Context, fileStore *storage.FileStore, addr storag
 }
 
 func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
-
 	// TODO check size for oversized manifests
 	size, err := mr.Size(mr.Context(), quitC)
 	if err != nil { // size == 0
diff --git a/swarm/chunk/chunk.go b/swarm/chunk/chunk.go
index 7540af8ce..c8551814c 100644
--- a/swarm/chunk/chunk.go
+++ b/swarm/chunk/chunk.go
@@ -1,6 +1,7 @@
 package chunk
 
 import (
+	"context"
 	"errors"
 	"fmt"
 
@@ -28,7 +29,7 @@ type chunk struct {
 	sdata []byte
 }
 
-func NewChunk(addr Address, data []byte) *chunk {
+func NewChunk(addr Address, data []byte) Chunk {
 	return &chunk{
 		addr:  addr,
 		sdata: data,
@@ -107,3 +108,105 @@ func Proximity(one, other []byte) (ret int) {
 	}
 	return MaxPO
 }
+
+// ModeGet enumerates different Getter modes.
+type ModeGet int
+
+// Getter modes.
+const (
+	// ModeGetRequest: when accessed for retrieval
+	ModeGetRequest ModeGet = iota
+	// ModeGetSync: when accessed for syncing or proof of custody request
+	ModeGetSync
+	// ModeGetLookup: when accessed to lookup a a chunk in feeds or other places
+	ModeGetLookup
+)
+
+// ModePut enumerates different Putter modes.
+type ModePut int
+
+// Putter modes.
+const (
+	// ModePutRequest: when a chunk is received as a result of retrieve request and delivery
+	ModePutRequest ModePut = iota
+	// ModePutSync: when a chunk is received via syncing
+	ModePutSync
+	// ModePutUpload: when a chunk is created by local upload
+	ModePutUpload
+)
+
+// ModeSet enumerates different Setter modes.
+type ModeSet int
+
+// Setter modes.
+const (
+	// ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
+	ModeSetAccess ModeSet = iota
+	// ModeSetSync: when push sync receipt is received
+	ModeSetSync
+	// ModeSetRemove: when a chunk is removed
+	ModeSetRemove
+)
+
+// Descriptor holds information required for Pull syncing. This struct
+// is provided by subscribing to pull index.
+type Descriptor struct {
+	Address Address
+	BinID   uint64
+}
+
+func (d *Descriptor) String() string {
+	if d == nil {
+		return ""
+	}
+	return fmt.Sprintf("%s bin id %v", d.Address.Hex(), d.BinID)
+}
+
+type Store interface {
+	Get(ctx context.Context, mode ModeGet, addr Address) (ch Chunk, err error)
+	Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error)
+	Has(ctx context.Context, addr Address) (yes bool, err error)
+	Set(ctx context.Context, mode ModeSet, addr Address) (err error)
+	LastPullSubscriptionBinID(bin uint8) (id uint64, err error)
+	SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, stop func())
+	Close() (err error)
+}
+
+// FetchStore is a Store which supports syncing
+type FetchStore interface {
+	Store
+	FetchFunc(ctx context.Context, addr Address) func(context.Context) error
+}
+
+// Validator validates a chunk.
+type Validator interface {
+	Validate(ch Chunk) bool
+}
+
+// ValidatorStore encapsulates Store by decorting the Put method
+// with validators check.
+type ValidatorStore struct {
+	Store
+	validators []Validator
+}
+
+// NewValidatorStore returns a new ValidatorStore which uses
+// provided validators to validate chunks on Put.
+func NewValidatorStore(store Store, validators ...Validator) (s *ValidatorStore) {
+	return &ValidatorStore{
+		Store:      store,
+		validators: validators,
+	}
+}
+
+// Put overrides Store put method with validators check. If one of the validators
+// return true, the chunk is considered valid and Store Put method is called.
+// If all validators return false, ErrChunkInvalid is returned.
+func (s *ValidatorStore) Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error) {
+	for _, v := range s.validators {
+		if v.Validate(ch) {
+			return s.Store.Put(ctx, mode, ch)
+		}
+	}
+	return false, ErrChunkInvalid
+}
diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go
index 917c440d2..1b2812f4f 100644
--- a/swarm/network/stream/common_test.go
+++ b/swarm/network/stream/common_test.go
@@ -30,16 +30,19 @@ import (
 	"sync/atomic"
 	"time"
 
+	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
 	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/network"
 	"github.com/ethereum/go-ethereum/swarm/network/simulation"
 	"github.com/ethereum/go-ethereum/swarm/state"
 	"github.com/ethereum/go-ethereum/swarm/storage"
-	mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
+	"github.com/ethereum/go-ethereum/swarm/storage/mock"
 	"github.com/ethereum/go-ethereum/swarm/testutil"
 	colorable "github.com/mattn/go-colorable"
 )
@@ -51,7 +54,6 @@ var (
 	useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
 	longrunning  = flag.Bool("longrunning", false, "do run long-running tests")
 
-	bucketKeyDB        = simulation.BucketKey("db")
 	bucketKeyStore     = simulation.BucketKey("store")
 	bucketKeyFileStore = simulation.BucketKey("filestore")
 	bucketKeyNetStore  = simulation.BucketKey("netstore")
@@ -113,16 +115,15 @@ func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket
 func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
 	n := ctx.Config.Node()
 
-	store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
-	if *useMockStore {
-		store, datadir, err = createMockStore(mockmem.NewGlobalStore(), n.ID(), addr)
-	}
+	localStore, localStoreCleanup, err := newTestLocalStore(n.ID(), addr, nil)
 	if err != nil {
 		return nil, nil, nil, err
 	}
-	localStore := store.(*storage.LocalStore)
+
 	netStore, err := storage.NewNetStore(localStore, nil)
 	if err != nil {
+		localStore.Close()
+		localStoreCleanup()
 		return nil, nil, nil, err
 	}
 
@@ -131,8 +132,7 @@ func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map,
 	kad := network.NewKademlia(addr.Over(), network.NewKadParams())
 	delivery := NewDelivery(kad, netStore)
 
-	bucket.Store(bucketKeyStore, store)
-	bucket.Store(bucketKeyDB, netStore)
+	bucket.Store(bucketKeyStore, localStore)
 	bucket.Store(bucketKeyDelivery, delivery)
 	bucket.Store(bucketKeyFileStore, fileStore)
 	// for the kademlia object, we use the global key from the simulation package,
@@ -141,13 +141,13 @@ func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map,
 
 	cleanup := func() {
 		netStore.Close()
-		os.RemoveAll(datadir)
+		localStoreCleanup()
 	}
 
 	return netStore, delivery, cleanup, nil
 }
 
-func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
+func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *localstore.DB, func(), error) {
 	// setup
 	addr := network.RandomAddr() // tested peers peer address
 	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
@@ -161,11 +161,7 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
 		os.RemoveAll(datadir)
 	}
 
-	params := storage.NewDefaultLocalStoreParams()
-	params.Init(datadir)
-	params.BaseKey = addr.Over()
-
-	localStore, err := storage.NewTestLocalStoreForAddr(params)
+	localStore, err := localstore.New(datadir, addr.Over(), nil)
 	if err != nil {
 		removeDataDir()
 		return nil, nil, nil, nil, err
@@ -173,15 +169,19 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
 
 	netStore, err := storage.NewNetStore(localStore, nil)
 	if err != nil {
+		localStore.Close()
 		removeDataDir()
 		return nil, nil, nil, nil, err
 	}
 
 	delivery := NewDelivery(to, netStore)
 	netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
-	streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), registryOptions, nil)
+	intervalsStore := state.NewInmemoryStore()
+	streamer := NewRegistry(addr.ID(), delivery, netStore, intervalsStore, registryOptions, nil)
 	teardown := func() {
 		streamer.Close()
+		intervalsStore.Close()
+		netStore.Close()
 		removeDataDir()
 	}
 	prvkey, err := crypto.GenerateKey()
@@ -228,24 +228,37 @@ func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
 }
 
 // not used in this context, only to fulfill ChunkStore interface
-func (rrs *roundRobinStore) Has(ctx context.Context, addr storage.Address) bool {
-	panic("RoundRobinStor doesn't support HasChunk")
+func (rrs *roundRobinStore) Has(_ context.Context, _ storage.Address) (bool, error) {
+	return false, errors.New("roundRobinStore doesn't support Has")
 }
 
-func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (storage.Chunk, error) {
-	return nil, errors.New("get not well defined on round robin store")
+func (rrs *roundRobinStore) Get(_ context.Context, _ chunk.ModeGet, _ storage.Address) (storage.Chunk, error) {
+	return nil, errors.New("roundRobinStore doesn't support Get")
 }
 
-func (rrs *roundRobinStore) Put(ctx context.Context, chunk storage.Chunk) error {
+func (rrs *roundRobinStore) Put(ctx context.Context, mode chunk.ModePut, ch storage.Chunk) (bool, error) {
 	i := atomic.AddUint32(&rrs.index, 1)
 	idx := int(i) % len(rrs.stores)
-	return rrs.stores[idx].Put(ctx, chunk)
+	return rrs.stores[idx].Put(ctx, mode, ch)
+}
+
+func (rrs *roundRobinStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+	return errors.New("roundRobinStore doesn't support Set")
+}
+
+func (rrs *roundRobinStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
+	return 0, errors.New("roundRobinStore doesn't support LastPullSubscriptionBinID")
 }
 
-func (rrs *roundRobinStore) Close() {
+func (rrs *roundRobinStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+	return nil, nil
+}
+
+func (rrs *roundRobinStore) Close() error {
 	for _, store := range rrs.stores {
 		store.Close()
 	}
+	return nil
 }
 
 func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
@@ -311,24 +324,28 @@ func generateRandomFile() (string, error) {
 	return string(b), nil
 }
 
-//create a local store for the given node
-func createTestLocalStorageForID(id enode.ID, addr *network.BzzAddr) (storage.ChunkStore, string, error) {
-	var datadir string
-	var err error
-	datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
+func newTestLocalStore(id enode.ID, addr *network.BzzAddr, globalStore mock.GlobalStorer) (localStore *localstore.DB, cleanup func(), err error) {
+	dir, err := ioutil.TempDir("", "swarm-stream-")
 	if err != nil {
-		return nil, "", err
+		return nil, nil, err
+	}
+	cleanup = func() {
+		os.RemoveAll(dir)
 	}
-	var store storage.ChunkStore
-	params := storage.NewDefaultLocalStoreParams()
-	params.ChunkDbPath = datadir
-	params.BaseKey = addr.Over()
-	store, err = storage.NewTestLocalStoreForAddr(params)
+
+	var mockStore *mock.NodeStore
+	if globalStore != nil {
+		mockStore = globalStore.NewNodeStore(common.BytesToAddress(id.Bytes()))
+	}
+
+	localStore, err = localstore.New(dir, addr.Over(), &localstore.Options{
+		MockStore: mockStore,
+	})
 	if err != nil {
-		os.RemoveAll(datadir)
-		return nil, "", err
+		cleanup()
+		return nil, nil, err
 	}
-	return store, datadir, nil
+	return localStore, cleanup, nil
 }
 
 // watchDisconnections receives simulation peer events in a new goroutine and sets atomic value
diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go
index bc4f1f665..059666723 100644
--- a/swarm/network/stream/delivery.go
+++ b/swarm/network/stream/delivery.go
@@ -23,6 +23,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/p2p/enode"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/log"
 	"github.com/ethereum/go-ethereum/swarm/network"
 	"github.com/ethereum/go-ethereum/swarm/spancontext"
@@ -47,12 +48,12 @@ var (
 )
 
 type Delivery struct {
-	chunkStore storage.SyncChunkStore
+	chunkStore chunk.FetchStore
 	kad        *network.Kademlia
 	getPeer    func(enode.ID) *Peer
 }
 
-func NewDelivery(kad *network.Kademlia, chunkStore storage.SyncChunkStore) *Delivery {
+func NewDelivery(kad *network.Kademlia, chunkStore chunk.FetchStore) *Delivery {
 	return &Delivery{
 		chunkStore: chunkStore,
 		kad:        kad,
@@ -122,13 +123,13 @@ func (s *SwarmChunkServer) Close() {
 	close(s.quit)
 }
 
-// GetData retrives chunk data from db store
+// GetData retrieves chunk data from db store
 func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
-	chunk, err := s.chunkStore.Get(ctx, storage.Address(key))
+	ch, err := s.chunkStore.Get(ctx, chunk.ModeGetRequest, storage.Address(key))
 	if err != nil {
 		return nil, err
 	}
-	return chunk.Data(), nil
+	return ch.Data(), nil
 }
 
 // RetrieveRequestMsg is the protocol msg for chunk retrieve requests
@@ -171,7 +172,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
 
 	go func() {
 		defer osp.Finish()
-		chunk, err := d.chunkStore.Get(ctx, req.Addr)
+		ch, err := d.chunkStore.Get(ctx, chunk.ModeGetRequest, req.Addr)
 		if err != nil {
 			retrieveChunkFail.Inc(1)
 			log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
@@ -181,7 +182,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
 			syncing := false
 			osp.LogFields(olog.Bool("skipCheck", true))
 
-			err = sp.Deliver(ctx, chunk, s.priority, syncing)
+			err = sp.Deliver(ctx, ch, s.priority, syncing)
 			if err != nil {
 				log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
 			}
@@ -190,7 +191,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
 		}
 		osp.LogFields(olog.Bool("skipCheck", false))
 		select {
-		case streamer.deliveryC <- chunk.Address()[:]:
+		case streamer.deliveryC <- ch.Address()[:]:
 		case <-streamer.quit:
 		}
 
@@ -216,7 +217,7 @@ type ChunkDeliveryMsgRetrieval ChunkDeliveryMsg
 type ChunkDeliveryMsgSyncing ChunkDeliveryMsg
 
 // chunk delivery msg is response to retrieverequest msg
-func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
+func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req interface{}) error {
 	var osp opentracing.Span
 	ctx, osp = spancontext.StartSpan(
 		ctx,
@@ -224,11 +225,32 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
 
 	processReceivedChunksCount.Inc(1)
 
+	var msg *ChunkDeliveryMsg
+	var mode chunk.ModePut
+	switch r := req.(type) {
+	case *ChunkDeliveryMsgRetrieval:
+		msg = (*ChunkDeliveryMsg)(r)
+		peerPO := chunk.Proximity(sp.ID().Bytes(), msg.Addr)
+		po := chunk.Proximity(d.kad.BaseAddr(), msg.Addr)
+		depth := d.kad.NeighbourhoodDepth()
+		// chunks within the area of responsibility should always sync
+		// https://github.com/ethersphere/go-ethereum/pull/1282#discussion_r269406125
+		if po >= depth || peerPO < po {
+			mode = chunk.ModePutSync
+		} else {
+			// do not sync if peer that is sending us a chunk is closer to the chunk then we are
+			mode = chunk.ModePutRequest
+		}
+	case *ChunkDeliveryMsgSyncing:
+		msg = (*ChunkDeliveryMsg)(r)
+		mode = chunk.ModePutSync
+	}
+
 	// retrieve the span for the originating retrieverequest
-	spanId := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), req.Addr)
-	span := tracing.ShiftSpanByKey(spanId)
+	spanID := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), msg.Addr)
+	span := tracing.ShiftSpanByKey(spanID)
 
-	log.Trace("handle.chunk.delivery", "ref", req.Addr, "from peer", sp.ID())
+	log.Trace("handle.chunk.delivery", "ref", msg.Addr, "from peer", sp.ID())
 
 	go func() {
 		defer osp.Finish()
@@ -238,18 +260,18 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
 			defer span.Finish()
 		}
 
-		req.peer = sp
-		log.Trace("handle.chunk.delivery", "put", req.Addr)
-		err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
+		msg.peer = sp
+		log.Trace("handle.chunk.delivery", "put", msg.Addr)
+		_, err := d.chunkStore.Put(ctx, mode, storage.NewChunk(msg.Addr, msg.SData))
 		if err != nil {
 			if err == storage.ErrChunkInvalid {
 				// we removed this log because it spams the logs
 				// TODO: Enable this log line
-				// log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", req.Addr, )
-				req.peer.Drop(err)
+				// log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", msg.Addr, )
+				msg.peer.Drop(err)
 			}
 		}
-		log.Trace("handle.chunk.delivery", "done put", req.Addr, "err", err)
+		log.Trace("handle.chunk.delivery", "done put", msg.Addr, "err", err)
 	}()
 	return nil
 }
diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go
index 50b788150..801e6d98a 100644
--- a/swarm/network/stream/delivery_test.go
+++ b/swarm/network/stream/delivery_test.go
@@ -31,6 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/p2p/protocols"
 	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
 	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/log"
 	"github.com/ethereum/go-ethereum/swarm/network"
 	pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
@@ -189,8 +190,8 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
 	})
 
 	hash := storage.Address(hash0[:])
-	chunk := storage.NewChunk(hash, hash)
-	err = localStore.Put(context.TODO(), chunk)
+	ch := storage.NewChunk(hash, hash)
+	_, err = localStore.Put(context.TODO(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatalf("Expected no err got %v", err)
 	}
@@ -241,8 +242,8 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
 	}
 
 	hash = storage.Address(hash1[:])
-	chunk = storage.NewChunk(hash, hash1[:])
-	err = localStore.Put(context.TODO(), chunk)
+	ch = storage.NewChunk(hash, hash1[:])
+	_, err = localStore.Put(context.TODO(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatalf("Expected no err got %v", err)
 	}
@@ -420,14 +421,14 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
 	defer cancel()
 
 	// wait for the chunk to get stored
-	storedChunk, err := localStore.Get(ctx, chunkKey)
+	storedChunk, err := localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
 	for err != nil {
 		select {
 		case <-ctx.Done():
 			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
 		default:
 		}
-		storedChunk, err = localStore.Get(ctx, chunkKey)
+		storedChunk, err = localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
 		time.Sleep(50 * time.Millisecond)
 	}
 
@@ -700,7 +701,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
 			errs := make(chan error)
 			for _, hash := range hashes {
 				go func(h storage.Address) {
-					_, err := netStore.Get(ctx, h)
+					_, err := netStore.Get(ctx, chunk.ModeGetRequest, h)
 					log.Warn("test check netstore get", "hash", h, "err", err)
 					errs <- err
 				}(hash)
diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go
index 009a941ef..1f2cdcada 100644
--- a/swarm/network/stream/intervals_test.go
+++ b/swarm/network/stream/intervals_test.go
@@ -29,6 +29,7 @@ import (
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/network/simulation"
 	"github.com/ethereum/go-ethereum/swarm/state"
 	"github.com/ethereum/go-ethereum/swarm/storage"
@@ -287,11 +288,11 @@ func enableNotifications(r *Registry, peerID enode.ID, s Stream) error {
 
 type testExternalClient struct {
 	hashes               chan []byte
-	store                storage.SyncChunkStore
+	store                chunk.FetchStore
 	enableNotificationsC chan struct{}
 }
 
-func newTestExternalClient(store storage.SyncChunkStore) *testExternalClient {
+func newTestExternalClient(store chunk.FetchStore) *testExternalClient {
 	return &testExternalClient{
 		hashes:               make(chan []byte),
 		store:                store,
diff --git a/swarm/network/stream/snapshot_retrieval_test.go b/swarm/network/stream/snapshot_retrieval_test.go
index 2957999f8..2d5935276 100644
--- a/swarm/network/stream/snapshot_retrieval_test.go
+++ b/swarm/network/stream/snapshot_retrieval_test.go
@@ -25,6 +25,7 @@ import (
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/log"
 	"github.com/ethereum/go-ethereum/swarm/network/simulation"
 	"github.com/ethereum/go-ethereum/swarm/state"
@@ -278,8 +279,8 @@ func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) error {
 		if !ok {
 			return fmt.Errorf("No localstore")
 		}
-		lstore := item.(*storage.LocalStore)
-		conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
+		store := item.(chunk.Store)
+		conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, store)
 		if err != nil {
 			return err
 		}
diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go
index ce1e69db2..605c9dbeb 100644
--- a/swarm/network/stream/snapshot_sync_test.go
+++ b/swarm/network/stream/snapshot_sync_test.go
@@ -31,6 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/p2p/simulations"
 	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/network"
 	"github.com/ethereum/go-ethereum/swarm/network/simulation"
 	"github.com/ethereum/go-ethereum/swarm/pot"
@@ -190,10 +191,10 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
 		node := sim.Net.GetRandomUpNode()
 		item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
 		if !ok {
-			return fmt.Errorf("No localstore")
+			return errors.New("no store in simulation bucket")
 		}
-		lstore := item.(*storage.LocalStore)
-		hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
+		store := item.(chunk.Store)
+		hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, store)
 		if err != nil {
 			return err
 		}
@@ -221,25 +222,25 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
 				localChunks := conf.idToChunksMap[id]
 				for _, ch := range localChunks {
 					//get the real chunk by the index in the index array
-					chunk := conf.hashes[ch]
-					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
+					ch := conf.hashes[ch]
+					log.Trace("node has chunk", "address", ch)
 					//check if the expected chunk is indeed in the localstore
 					var err error
 					if *useMockStore {
 						//use the globalStore if the mockStore should be used; in that case,
 						//the complete localStore stack is bypassed for getting the chunk
-						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
+						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), ch)
 					} else {
 						//use the actual localstore
 						item, ok := sim.NodeItem(id, bucketKeyStore)
 						if !ok {
-							return fmt.Errorf("Error accessing localstore")
+							return errors.New("no store in simulation bucket")
 						}
-						lstore := item.(*storage.LocalStore)
-						_, err = lstore.Get(ctx, chunk)
+						store := item.(chunk.Store)
+						_, err = store.Get(ctx, chunk.ModeGetLookup, ch)
 					}
 					if err != nil {
-						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
+						log.Debug("chunk not found", "address", ch.Hex(), "node", id)
 						// Do not get crazy with logging the warn message
 						time.Sleep(500 * time.Millisecond)
 						continue REPEAT
@@ -247,10 +248,10 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
 					evt := &simulations.Event{
 						Type: EventTypeChunkArrived,
 						Node: sim.Net.GetNode(id),
-						Data: chunk.String(),
+						Data: ch.String(),
 					}
 					sim.Net.Events().Send(evt)
-					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
+					log.Trace("chunk found", "address", ch.Hex(), "node", id)
 				}
 			}
 			return nil
@@ -296,9 +297,9 @@ func mapKeysToNodes(conf *synctestConfig) {
 }
 
 //upload a file(chunks) to a single local node store
-func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
+func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, store chunk.Store) ([]storage.Address, error) {
 	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
-	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
+	fileStore := storage.NewFileStore(store, storage.NewFileStoreParams())
 	size := chunkSize
 	var rootAddrs []storage.Address
 	for i := 0; i < chunkCount; i++ {
diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go
index 1038e52d0..0d990da5c 100644
--- a/swarm/network/stream/stream.go
+++ b/swarm/network/stream/stream.go
@@ -30,11 +30,11 @@ import (
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/p2p/protocols"
 	"github.com/ethereum/go-ethereum/rpc"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/log"
 	"github.com/ethereum/go-ethereum/swarm/network"
 	"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
 	"github.com/ethereum/go-ethereum/swarm/state"
-	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
 const (
@@ -108,7 +108,7 @@ type RegistryOptions struct {
 }
 
 // NewRegistry is Streamer constructor
-func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
+func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore chunk.FetchStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
 	if options == nil {
 		options = &RegistryOptions{}
 	}
@@ -627,13 +627,8 @@ func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
 	case *WantedHashesMsg:
 		return p.handleWantedHashesMsg(ctx, msg)
 
-	case *ChunkDeliveryMsgRetrieval:
-		// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
-		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
-
-	case *ChunkDeliveryMsgSyncing:
-		// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
-		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
+	case *ChunkDeliveryMsgRetrieval, *ChunkDeliveryMsgSyncing:
+		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg)
 
 	case *RetrieveRequestMsg:
 		return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
diff --git a/swarm/network/stream/syncer.go b/swarm/network/stream/syncer.go
index 5f03dcff7..c573da5d2 100644
--- a/swarm/network/stream/syncer.go
+++ b/swarm/network/stream/syncer.go
@@ -21,8 +21,7 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/ethereum/go-ethereum/metrics"
-	"github.com/ethereum/go-ethereum/swarm/log"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -36,12 +35,12 @@ const (
 // * (live/non-live historical) chunk syncing per proximity bin
 type SwarmSyncerServer struct {
 	po    uint8
-	store storage.SyncChunkStore
+	store chunk.FetchStore
 	quit  chan struct{}
 }
 
 // NewSwarmSyncerServer is constructor for SwarmSyncerServer
-func NewSwarmSyncerServer(po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) {
+func NewSwarmSyncerServer(po uint8, syncChunkStore chunk.FetchStore) (*SwarmSyncerServer, error) {
 	return &SwarmSyncerServer{
 		po:    po,
 		store: syncChunkStore,
@@ -49,7 +48,7 @@ func NewSwarmSyncerServer(po uint8, syncChunkStore storage.SyncChunkStore) (*Swa
 	}, nil
 }
 
-func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) {
+func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore chunk.FetchStore) {
 	streamer.RegisterServerFunc("SYNC", func(_ *Peer, t string, _ bool) (Server, error) {
 		po, err := ParseSyncBinKey(t)
 		if err != nil {
@@ -69,76 +68,103 @@ func (s *SwarmSyncerServer) Close() {
 
 // GetData retrieves the actual chunk from netstore
 func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
-	chunk, err := s.store.Get(ctx, storage.Address(key))
+	ch, err := s.store.Get(ctx, chunk.ModeGetSync, storage.Address(key))
 	if err != nil {
 		return nil, err
 	}
-	return chunk.Data(), nil
+	return ch.Data(), nil
 }
 
 // SessionIndex returns current storage bin (po) index.
 func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
-	return s.store.BinIndex(s.po), nil
+	return s.store.LastPullSubscriptionBinID(s.po)
 }
 
-// GetBatch retrieves the next batch of hashes from the dbstore
+// SetNextBatch retrieves the next batch of hashes from the localstore.
+// It expects a range of bin IDs, both ends inclusive in syncing, and returns
+// concatenated byte slice of chunk addresses and bin IDs of the first and
+// the last one in that slice. The batch may have up to BatchSize number of
+// chunk addresses. If at least one chunk is added to the batch and no new chunks
+// are added in batchTimeout period, the batch will be returned. This function
+// will block until new chunks are received from localstore pull subscription.
 func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
-	var batch []byte
-	i := 0
-
-	var ticker *time.Ticker
+	descriptors, stop := s.store.SubscribePull(context.Background(), s.po, from, to)
+	defer stop()
+
+	const batchTimeout = 2 * time.Second
+
+	var (
+		batch        []byte
+		batchSize    int
+		batchStartID *uint64
+		batchEndID   uint64
+		timer        *time.Timer
+		timerC       <-chan time.Time
+	)
 	defer func() {
-		if ticker != nil {
-			ticker.Stop()
+		if timer != nil {
+			timer.Stop()
 		}
 	}()
-	var wait bool
-	for {
-		if wait {
-			if ticker == nil {
-				ticker = time.NewTicker(1000 * time.Millisecond)
+
+	for iterate := true; iterate; {
+		select {
+		case d, ok := <-descriptors:
+			if !ok {
+				iterate = false
+				break
 			}
-			select {
-			case <-ticker.C:
-			case <-s.quit:
-				return nil, 0, 0, nil, nil
+			batch = append(batch, d.Address[:]...)
+			// This is the most naive approach to label the chunk as synced
+			// allowing it to be garbage collected. A proper way requires
+			// validating that the chunk is successfully stored by the peer.
+			err := s.store.Set(context.Background(), chunk.ModeSetSync, d.Address)
+			if err != nil {
+				return nil, 0, 0, nil, err
 			}
-		}
-
-		metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
-		err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool {
-			select {
-			case <-s.quit:
-				return false
-			default:
+			batchSize++
+			if batchStartID == nil {
+				// set batch start id only if
+				// this is the first iteration
+				batchStartID = &d.BinID
 			}
-			batch = append(batch, key[:]...)
-			i++
-			to = idx
-			return i < BatchSize
-		})
-		if err != nil {
-			return nil, 0, 0, nil, err
-		}
-		if len(batch) > 0 {
-			break
+			batchEndID = d.BinID
+			if batchSize >= BatchSize {
+				iterate = false
+			}
+			if timer == nil {
+				timer = time.NewTimer(batchTimeout)
+			} else {
+				if !timer.Stop() {
+					<-timer.C
+				}
+				timer.Reset(batchTimeout)
+			}
+			timerC = timer.C
+		case <-timerC:
+			// return batch if new chunks are not
+			// received after some time
+			iterate = false
+		case <-s.quit:
+			iterate = false
 		}
-		wait = true
 	}
-
-	log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po))
-	return batch, from, to, nil, nil
+	if batchStartID == nil {
+		// if batch start id is not set, return 0
+		batchStartID = new(uint64)
+	}
+	return batch, *batchStartID, batchEndID, nil, nil
 }
 
 // SwarmSyncerClient
 type SwarmSyncerClient struct {
-	store  storage.SyncChunkStore
+	store  chunk.FetchStore
 	peer   *Peer
 	stream Stream
 }
 
 // NewSwarmSyncerClient is a contructor for provable data exchange syncer
-func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) {
+func NewSwarmSyncerClient(p *Peer, store chunk.FetchStore, stream Stream) (*SwarmSyncerClient, error) {
 	return &SwarmSyncerClient{
 		store:  store,
 		peer:   p,
@@ -184,7 +210,7 @@ func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream)
 
 // RegisterSwarmSyncerClient registers the client constructor function for
 // to handle incoming sync streams
-func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) {
+func RegisterSwarmSyncerClient(streamer *Registry, store chunk.FetchStore) {
 	streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
 		return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live))
 	})
diff --git a/swarm/network/stream/syncer_test.go b/swarm/network/stream/syncer_test.go
index 07586714e..a8651f386 100644
--- a/swarm/network/stream/syncer_test.go
+++ b/swarm/network/stream/syncer_test.go
@@ -21,22 +21,20 @@ import (
 	"errors"
 	"fmt"
 	"io/ioutil"
-	"math"
 	"os"
 	"sync"
 	"testing"
 	"time"
 
-	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/log"
 	"github.com/ethereum/go-ethereum/swarm/network"
 	"github.com/ethereum/go-ethereum/swarm/network/simulation"
 	"github.com/ethereum/go-ethereum/swarm/state"
 	"github.com/ethereum/go-ethereum/swarm/storage"
-	"github.com/ethereum/go-ethereum/swarm/storage/mock"
 	"github.com/ethereum/go-ethereum/swarm/testutil"
 )
 
@@ -55,24 +53,6 @@ func TestSyncerSimulation(t *testing.T) {
 	}
 }
 
-func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
-	address := common.BytesToAddress(id.Bytes())
-	mockStore := globalStore.NewNodeStore(address)
-	params := storage.NewDefaultLocalStoreParams()
-
-	datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
-	if err != nil {
-		return nil, "", err
-	}
-	params.Init(datadir)
-	params.BaseKey = addr.Over()
-	lstore, err = storage.NewLocalStore(params, mockStore)
-	if err != nil {
-		return nil, "", err
-	}
-	return lstore, datadir, nil
-}
-
 func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
 
 	sim := simulation.New(map[string]simulation.ServiceFunc{
@@ -181,17 +161,32 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
 			if i < nodes-1 {
 				hashCounts[i] = hashCounts[i+1]
 			}
-			item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
+			item, ok := sim.NodeItem(nodeIDs[i], bucketKeyStore)
 			if !ok {
 				return fmt.Errorf("No DB")
 			}
-			netStore := item.(*storage.NetStore)
-			netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
-				hashes[i] = append(hashes[i], addr)
-				totalHashes++
-				hashCounts[i]++
-				return true
-			})
+			store := item.(chunk.Store)
+			until, err := store.LastPullSubscriptionBinID(po)
+			if err != nil {
+				return err
+			}
+			if until > 0 {
+				c, _ := store.SubscribePull(ctx, po, 0, until)
+				for iterate := true; iterate; {
+					select {
+					case cd, ok := <-c:
+						if !ok {
+							iterate = false
+							break
+						}
+						hashes[i] = append(hashes[i], cd.Address)
+						totalHashes++
+						hashCounts[i]++
+					case <-ctx.Done():
+						return ctx.Err()
+					}
+				}
+			}
 		}
 		var total, found int
 		for _, node := range nodeIDs {
@@ -200,12 +195,12 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
 			for j := i; j < nodes; j++ {
 				total += len(hashes[j])
 				for _, key := range hashes[j] {
-					item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
+					item, ok := sim.NodeItem(nodeIDs[j], bucketKeyStore)
 					if !ok {
 						return fmt.Errorf("No DB")
 					}
-					db := item.(*storage.NetStore)
-					_, err := db.Get(ctx, key)
+					db := item.(chunk.Store)
+					_, err := db.Get(ctx, chunk.ModeGetRequest, key)
 					if err == nil {
 						found++
 					}
@@ -216,7 +211,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
 		if total == found && total > 0 {
 			return nil
 		}
-		return fmt.Errorf("Total not equallying found: total is %d", total)
+		return fmt.Errorf("Total not equallying found %v: total is %d", found, total)
 	})
 
 	if result.Error != nil {
diff --git a/swarm/shed/index.go b/swarm/shed/index.go
index 6be018d20..38afbce4c 100644
--- a/swarm/shed/index.go
+++ b/swarm/shed/index.go
@@ -40,9 +40,7 @@ type Item struct {
 	Data            []byte
 	AccessTimestamp int64
 	StoreTimestamp  int64
-	// UseMockStore is a pointer to identify
-	// an unset state of the field in Join function.
-	UseMockStore *bool
+	BinID           uint64
 }
 
 // Merge is a helper method to construct a new
@@ -61,8 +59,8 @@ func (i Item) Merge(i2 Item) (new Item) {
 	if i.StoreTimestamp == 0 {
 		i.StoreTimestamp = i2.StoreTimestamp
 	}
-	if i.UseMockStore == nil {
-		i.UseMockStore = i2.UseMockStore
+	if i.BinID == 0 {
+		i.BinID = i2.BinID
 	}
 	return i
 }
diff --git a/swarm/shed/schema.go b/swarm/shed/schema.go
index cfb7c6d64..557d951fb 100644
--- a/swarm/shed/schema.go
+++ b/swarm/shed/schema.go
@@ -52,7 +52,7 @@ type indexSpec struct {
 	Name string `json:"name"`
 }
 
-// schemaFieldKey retrives the complete LevelDB key for
+// schemaFieldKey retrieves the complete LevelDB key for
 // a particular field form the schema definition.
 func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
 	if name == "" {
diff --git a/swarm/storage/common_test.go b/swarm/storage/common_test.go
index c4d187b62..100e778a3 100644
--- a/swarm/storage/common_test.go
+++ b/swarm/storage/common_test.go
@@ -22,8 +22,6 @@ import (
 	"flag"
 	"fmt"
 	"io"
-	"io/ioutil"
-	"os"
 	"sync"
 	"testing"
 	"time"
@@ -59,30 +57,6 @@ func brokenLimitReader(data io.Reader, size int, errAt int) *brokenLimitedReader
 	}
 }
 
-func newLDBStore(t *testing.T) (*LDBStore, func()) {
-	dir, err := ioutil.TempDir("", "bzz-storage-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	log.Trace("memstore.tempdir", "dir", dir)
-
-	ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir)
-	db, err := NewLDBStore(ldbparams)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	cleanup := func() {
-		db.Close()
-		err := os.RemoveAll(dir)
-		if err != nil {
-			t.Fatal(err)
-		}
-	}
-
-	return db, cleanup
-}
-
 func mputRandomChunks(store ChunkStore, n int) ([]Chunk, error) {
 	return mput(store, n, GenerateRandomChunk)
 }
@@ -94,14 +68,15 @@ func mput(store ChunkStore, n int, f func(i int64) Chunk) (hs []Chunk, err error
 	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
 	defer cancel()
 	for i := int64(0); i < int64(n); i++ {
-		chunk := f(chunk.DefaultSize)
+		ch := f(chunk.DefaultSize)
 		go func() {
+			_, err := store.Put(ctx, chunk.ModePutUpload, ch)
 			select {
-			case errc <- store.Put(ctx, chunk):
+			case errc <- err:
 			case <-ctx.Done():
 			}
 		}()
-		hs = append(hs, chunk)
+		hs = append(hs, ch)
 	}
 
 	// wait for all chunks to be stored
@@ -123,13 +98,13 @@ func mget(store ChunkStore, hs []Address, f func(h Address, chunk Chunk) error)
 		go func(h Address) {
 			defer wg.Done()
 			// TODO: write timeout with context
-			chunk, err := store.Get(context.TODO(), h)
+			ch, err := store.Get(context.TODO(), chunk.ModeGetRequest, h)
 			if err != nil {
 				errc <- err
 				return
 			}
 			if f != nil {
-				err = f(h, chunk)
+				err = f(h, ch)
 				if err != nil {
 					errc <- err
 					return
@@ -250,14 +225,15 @@ func NewMapChunkStore() *MapChunkStore {
 	}
 }
 
-func (m *MapChunkStore) Put(_ context.Context, ch Chunk) error {
+func (m *MapChunkStore) Put(_ context.Context, _ chunk.ModePut, ch Chunk) (bool, error) {
 	m.mu.Lock()
 	defer m.mu.Unlock()
+	_, exists := m.chunks[ch.Address().Hex()]
 	m.chunks[ch.Address().Hex()] = ch
-	return nil
+	return exists, nil
 }
 
-func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
+func (m *MapChunkStore) Get(_ context.Context, _ chunk.ModeGet, ref Address) (Chunk, error) {
 	m.mu.RLock()
 	defer m.mu.RUnlock()
 	chunk := m.chunks[ref.Hex()]
@@ -268,15 +244,28 @@ func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
 }
 
 // Need to implement Has from SyncChunkStore
-func (m *MapChunkStore) Has(ctx context.Context, ref Address) bool {
+func (m *MapChunkStore) Has(ctx context.Context, ref Address) (has bool, err error) {
 	m.mu.RLock()
 	defer m.mu.RUnlock()
 
-	_, has := m.chunks[ref.Hex()]
-	return has
+	_, has = m.chunks[ref.Hex()]
+	return has, nil
+}
+
+func (m *MapChunkStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+	return nil
+}
+
+func (m *MapChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
+	return 0, nil
 }
 
-func (m *MapChunkStore) Close() {
+func (m *MapChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+	return nil, nil
+}
+
+func (m *MapChunkStore) Close() error {
+	return nil
 }
 
 func chunkAddresses(chunks []Chunk) []Address {
diff --git a/swarm/storage/database.go b/swarm/storage/database.go
deleted file mode 100644
index 12367b905..000000000
--- a/swarm/storage/database.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-// this is a clone of an earlier state of the ethereum ethdb/database
-// no need for queueing/caching
-
-import (
-	"github.com/ethereum/go-ethereum/metrics"
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/iterator"
-	"github.com/syndtr/goleveldb/leveldb/opt"
-)
-
-const openFileLimit = 128
-
-type LDBDatabase struct {
-	db *leveldb.DB
-}
-
-func NewLDBDatabase(file string) (*LDBDatabase, error) {
-	// Open the db
-	db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: openFileLimit})
-	if err != nil {
-		return nil, err
-	}
-
-	database := &LDBDatabase{db: db}
-
-	return database, nil
-}
-
-func (db *LDBDatabase) Put(key []byte, value []byte) error {
-	metrics.GetOrRegisterCounter("ldbdatabase.put", nil).Inc(1)
-
-	return db.db.Put(key, value, nil)
-}
-
-func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
-	metrics.GetOrRegisterCounter("ldbdatabase.get", nil).Inc(1)
-
-	dat, err := db.db.Get(key, nil)
-	if err != nil {
-		return nil, err
-	}
-	return dat, nil
-}
-
-func (db *LDBDatabase) Delete(key []byte) error {
-	return db.db.Delete(key, nil)
-}
-
-func (db *LDBDatabase) NewIterator() iterator.Iterator {
-	metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1)
-
-	return db.db.NewIterator(nil, nil)
-}
-
-func (db *LDBDatabase) Write(batch *leveldb.Batch) error {
-	metrics.GetOrRegisterCounter("ldbdatabase.write", nil).Inc(1)
-
-	return db.db.Write(batch, nil)
-}
-
-func (db *LDBDatabase) Close() {
-	// Close the leveldb database
-	db.db.Close()
-}
diff --git a/swarm/storage/feed/handler.go b/swarm/storage/feed/handler.go
index 61124e2db..0f6f2ba34 100644
--- a/swarm/storage/feed/handler.go
+++ b/swarm/storage/feed/handler.go
@@ -24,6 +24,8 @@ import (
 	"fmt"
 	"sync"
 
+	"github.com/ethereum/go-ethereum/swarm/chunk"
+
 	"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
 
 	"github.com/ethereum/go-ethereum/swarm/log"
@@ -189,7 +191,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
 		ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
 		defer cancel()
 
-		chunk, err := h.chunkStore.Get(ctx, id.Addr())
+		ch, err := h.chunkStore.Get(ctx, chunk.ModeGetLookup, id.Addr())
 		if err != nil {
 			if err == context.DeadlineExceeded { // chunk not found
 				return nil, nil
@@ -198,7 +200,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
 		}
 
 		var request Request
-		if err := request.fromChunk(chunk); err != nil {
+		if err := request.fromChunk(ch); err != nil {
 			return nil, nil
 		}
 		if request.Time <= timeLimit {
@@ -257,14 +259,14 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad
 		return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist")
 	}
 
-	chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
+	ch, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
 	if err != nil {
 		return nil, err
 	}
 
 	// send the chunk
-	h.chunkStore.Put(ctx, chunk)
-	log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data())
+	h.chunkStore.Put(ctx, chunk.ModePutUpload, ch)
+	log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", ch.Data())
 	// update our feed updates map cache entry if the new update is older than the one we have, if we have it.
 	if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) {
 		feedUpdate.Epoch = r.Epoch
diff --git a/swarm/storage/feed/handler_test.go b/swarm/storage/feed/handler_test.go
index 2f8a52453..c4f6fe689 100644
--- a/swarm/storage/feed/handler_test.go
+++ b/swarm/storage/feed/handler_test.go
@@ -31,6 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 	"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
 )
 
 var (
@@ -400,9 +401,7 @@ func TestValidatorInStore(t *testing.T) {
 	}
 	defer os.RemoveAll(datadir)
 
-	handlerParams := storage.NewDefaultLocalStoreParams()
-	handlerParams.Init(datadir)
-	store, err := storage.NewLocalStore(handlerParams, nil)
+	localstore, err := localstore.New(datadir, make([]byte, 32), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -410,7 +409,7 @@ func TestValidatorInStore(t *testing.T) {
 	// set up Swarm feeds handler and add is as a validator to the localstore
 	fhParams := &HandlerParams{}
 	fh := NewHandler(fhParams)
-	store.Validators = append(store.Validators, fh)
+	store := chunk.NewValidatorStore(localstore, fh)
 
 	// create content addressed chunks, one good, one faulty
 	chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
@@ -447,15 +446,15 @@ func TestValidatorInStore(t *testing.T) {
 	}
 
 	// put the chunks in the store and check their error status
-	err = store.Put(context.Background(), goodChunk)
+	_, err = store.Put(context.Background(), chunk.ModePutUpload, goodChunk)
 	if err == nil {
 		t.Fatal("expected error on good content address chunk with feed update validator only, but got nil")
 	}
-	err = store.Put(context.Background(), badChunk)
+	_, err = store.Put(context.Background(), chunk.ModePutUpload, badChunk)
 	if err == nil {
 		t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil")
 	}
-	err = store.Put(context.Background(), uglyChunk)
+	_, err = store.Put(context.Background(), chunk.ModePutUpload, uglyChunk)
 	if err != nil {
 		t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err)
 	}
diff --git a/swarm/storage/feed/testutil.go b/swarm/storage/feed/testutil.go
index caa39d9ff..db2d989e1 100644
--- a/swarm/storage/feed/testutil.go
+++ b/swarm/storage/feed/testutil.go
@@ -18,12 +18,13 @@ package feed
 
 import (
 	"context"
-	"fmt"
 	"path/filepath"
 	"sync"
 
 	"github.com/ethereum/go-ethereum/p2p/enode"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/storage"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
 )
 
 const (
@@ -53,14 +54,14 @@ func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetF
 func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
 	path := filepath.Join(datadir, testDbDirName)
 	fh := NewHandler(params)
-	localstoreparams := storage.NewDefaultLocalStoreParams()
-	localstoreparams.Init(path)
-	localStore, err := storage.NewLocalStore(localstoreparams, nil)
+
+	db, err := localstore.New(filepath.Join(path, "chunks"), make([]byte, 32), nil)
 	if err != nil {
-		return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
+		return nil, err
 	}
-	localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)))
-	localStore.Validators = append(localStore.Validators, fh)
+
+	localStore := chunk.NewValidatorStore(db, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)), fh)
+
 	netStore, err := storage.NewNetStore(localStore, nil)
 	if err != nil {
 		return nil, err
diff --git a/swarm/storage/filestore.go b/swarm/storage/filestore.go
index 0bad944ee..2b15f7da6 100644
--- a/swarm/storage/filestore.go
+++ b/swarm/storage/filestore.go
@@ -21,6 +21,9 @@ import (
 	"io"
 	"sort"
 	"sync"
+
+	"github.com/ethereum/go-ethereum/swarm/chunk"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
 )
 
 /*
@@ -58,14 +61,11 @@ func NewFileStoreParams() *FileStoreParams {
 
 // for testing locally
 func NewLocalFileStore(datadir string, basekey []byte) (*FileStore, error) {
-	params := NewDefaultLocalStoreParams()
-	params.Init(datadir)
-	localStore, err := NewLocalStore(params, nil)
+	localStore, err := localstore.New(datadir, basekey, nil)
 	if err != nil {
 		return nil, err
 	}
-	localStore.Validators = append(localStore.Validators, NewContentAddressValidator(MakeHashFunc(DefaultHash)))
-	return NewFileStore(localStore, NewFileStoreParams()), nil
+	return NewFileStore(chunk.NewValidatorStore(localStore, NewContentAddressValidator(MakeHashFunc(DefaultHash))), NewFileStoreParams()), nil
 }
 
 func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore {
diff --git a/swarm/storage/filestore_test.go b/swarm/storage/filestore_test.go
index 06c4be1d7..fe01eed9a 100644
--- a/swarm/storage/filestore_test.go
+++ b/swarm/storage/filestore_test.go
@@ -22,8 +22,10 @@ import (
 	"io"
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"testing"
 
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
 	"github.com/ethereum/go-ethereum/swarm/testutil"
 )
 
@@ -35,21 +37,18 @@ func TestFileStorerandom(t *testing.T) {
 }
 
 func testFileStoreRandom(toEncrypt bool, t *testing.T) {
-	tdb, cleanup, err := newTestDbStore(false, false)
-	defer cleanup()
+	dir, err := ioutil.TempDir("", "swarm-storage-")
 	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
+		t.Fatal(err)
 	}
-	db := tdb.LDBStore
-	db.setCapacity(50000)
-	memStore := NewMemStore(NewDefaultStoreParams(), db)
-	localStore := &LocalStore{
-		memStore: memStore,
-		DbStore:  db,
+	defer os.RemoveAll(dir)
+	localStore, err := localstore.New(dir, make([]byte, 32), nil)
+	if err != nil {
+		t.Fatal(err)
 	}
+	defer localStore.Close()
 
 	fileStore := NewFileStore(localStore, NewFileStoreParams())
-	defer os.RemoveAll("/tmp/bzz")
 
 	slice := testutil.RandomBytes(1, testDataSize)
 	ctx := context.TODO()
@@ -76,9 +75,8 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
 	if !bytes.Equal(slice, resultSlice) {
 		t.Fatalf("Comparison error.")
 	}
-	ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666)
-	ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666)
-	localStore.memStore = NewMemStore(NewDefaultStoreParams(), db)
+	ioutil.WriteFile(filepath.Join(dir, "slice.bzz.16M"), slice, 0666)
+	ioutil.WriteFile(filepath.Join(dir, "result.bzz.16M"), resultSlice, 0666)
 	resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
 	if isEncrypted != toEncrypt {
 		t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
@@ -104,17 +102,17 @@ func TestFileStoreCapacity(t *testing.T) {
 }
 
 func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
-	tdb, cleanup, err := newTestDbStore(false, false)
-	defer cleanup()
+	dir, err := ioutil.TempDir("", "swarm-storage-")
 	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
+		t.Fatal(err)
 	}
-	db := tdb.LDBStore
-	memStore := NewMemStore(NewDefaultStoreParams(), db)
-	localStore := &LocalStore{
-		memStore: memStore,
-		DbStore:  db,
+	defer os.RemoveAll(dir)
+	localStore, err := localstore.New(dir, make([]byte, 32), nil)
+	if err != nil {
+		t.Fatal(err)
 	}
+	defer localStore.Close()
+
 	fileStore := NewFileStore(localStore, NewFileStoreParams())
 	slice := testutil.RandomBytes(1, testDataSize)
 	ctx := context.TODO()
@@ -141,10 +139,6 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
 	if !bytes.Equal(slice, resultSlice) {
 		t.Fatalf("Comparison error.")
 	}
-	// Clear memStore
-	memStore.setCapacity(0)
-	// check whether it is, indeed, empty
-	fileStore.ChunkStore = memStore
 	resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
 	if isEncrypted != toEncrypt {
 		t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
@@ -177,17 +171,17 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
 // TestGetAllReferences only tests that GetAllReferences returns an expected
 // number of references for a given file
 func TestGetAllReferences(t *testing.T) {
-	tdb, cleanup, err := newTestDbStore(false, false)
-	defer cleanup()
+	dir, err := ioutil.TempDir("", "swarm-storage-")
 	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
+		t.Fatal(err)
 	}
-	db := tdb.LDBStore
-	memStore := NewMemStore(NewDefaultStoreParams(), db)
-	localStore := &LocalStore{
-		memStore: memStore,
-		DbStore:  db,
+	defer os.RemoveAll(dir)
+	localStore, err := localstore.New(dir, make([]byte, 32), nil)
+	if err != nil {
+		t.Fatal(err)
 	}
+	defer localStore.Close()
+
 	fileStore := NewFileStore(localStore, NewFileStoreParams())
 
 	// testRuns[i] and expectedLen[i] are dataSize and expected length respectively
diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go
index 345ce7430..2e4a1c11b 100644
--- a/swarm/storage/hasherstore.go
+++ b/swarm/storage/hasherstore.go
@@ -93,7 +93,7 @@ func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error)
 		return nil, err
 	}
 
-	chunk, err := h.store.Get(ctx, addr)
+	chunk, err := h.store.Get(ctx, chunk.ModeGetRequest, addr)
 	if err != nil {
 		return nil, err
 	}
@@ -239,11 +239,12 @@ func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryptio
 	return encryption.New(key, int(chunk.DefaultSize), 0, sha3.NewLegacyKeccak256)
 }
 
-func (h *hasherStore) storeChunk(ctx context.Context, chunk Chunk) {
+func (h *hasherStore) storeChunk(ctx context.Context, ch Chunk) {
 	atomic.AddUint64(&h.nrChunks, 1)
 	go func() {
+		_, err := h.store.Put(ctx, chunk.ModePutUpload, ch)
 		select {
-		case h.errC <- h.store.Put(ctx, chunk):
+		case h.errC <- err:
 		case <-h.quitC:
 		}
 	}()
diff --git a/swarm/storage/hasherstore_test.go b/swarm/storage/hasherstore_test.go
index 22cf98d0e..c95537db7 100644
--- a/swarm/storage/hasherstore_test.go
+++ b/swarm/storage/hasherstore_test.go
@@ -21,9 +21,9 @@ import (
 	"context"
 	"testing"
 
-	"github.com/ethereum/go-ethereum/swarm/storage/encryption"
-
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
+	"github.com/ethereum/go-ethereum/swarm/storage/encryption"
 )
 
 func TestHasherStore(t *testing.T) {
@@ -107,7 +107,7 @@ func TestHasherStore(t *testing.T) {
 		}
 
 		// Check if chunk data in store is encrypted or not
-		chunkInStore, err := chunkStore.Get(ctx, hash1)
+		chunkInStore, err := chunkStore.Get(ctx, chunk.ModeGetRequest, hash1)
 		if err != nil {
 			t.Fatalf("Expected no error got \"%v\"", err)
 		}
diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go
deleted file mode 100644
index fd5ec9e30..000000000
--- a/swarm/storage/ldbstore.go
+++ /dev/null
@@ -1,1082 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-// disk storage layer for the package bzz
-// DbStore implements the ChunkStore interface and is used by the FileStore as
-// persistent storage of chunks
-// it implements purging based on access count allowing for external control of
-// max capacity
-
-package storage
-
-import (
-	"archive/tar"
-	"bytes"
-	"context"
-	"encoding/binary"
-	"encoding/hex"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"sync"
-
-	"github.com/ethereum/go-ethereum/metrics"
-	"github.com/ethereum/go-ethereum/rlp"
-	"github.com/ethereum/go-ethereum/swarm/log"
-	"github.com/ethereum/go-ethereum/swarm/storage/mock"
-	"github.com/syndtr/goleveldb/leveldb"
-)
-
-const (
-	defaultGCRatio    = 10
-	defaultMaxGCRound = 10000
-	defaultMaxGCBatch = 5000
-
-	wEntryCnt  = 1 << 0
-	wIndexCnt  = 1 << 1
-	wAccessCnt = 1 << 2
-)
-
-var (
-	dbEntryCount = metrics.NewRegisteredCounter("ldbstore.entryCnt", nil)
-)
-
-var (
-	keyIndex       = byte(0)
-	keyAccessCnt   = []byte{2}
-	keyEntryCnt    = []byte{3}
-	keyDataIdx     = []byte{4}
-	keyData        = byte(6)
-	keyDistanceCnt = byte(7)
-	keySchema      = []byte{8}
-	keyGCIdx       = byte(9) // access to chunk data index, used by garbage collection in ascending order from first entry
-)
-
-var (
-	ErrDBClosed = errors.New("LDBStore closed")
-)
-
-type LDBStoreParams struct {
-	*StoreParams
-	Path string
-	Po   func(Address) uint8
-}
-
-// NewLDBStoreParams constructs LDBStoreParams with the specified values.
-func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams {
-	return &LDBStoreParams{
-		StoreParams: storeparams,
-		Path:        path,
-		Po:          func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey, k[:])) },
-	}
-}
-
-type garbage struct {
-	maxRound int           // maximum number of chunks to delete in one garbage collection round
-	maxBatch int           // maximum number of chunks to delete in one db request batch
-	ratio    int           // 1/x ratio to calculate the number of chunks to gc on a low capacity db
-	count    int           // number of chunks deleted in running round
-	target   int           // number of chunks to delete in running round
-	batch    *dbBatch      // the delete batch
-	runC     chan struct{} // struct in chan means gc is NOT running
-}
-
-type LDBStore struct {
-	db *LDBDatabase
-
-	// this should be stored in db, accessed transactionally
-	entryCnt  uint64 // number of items in the LevelDB
-	accessCnt uint64 // ever-accumulating number increased every time we read/access an entry
-	dataIdx   uint64 // similar to entryCnt, but we only increment it
-	capacity  uint64
-	bucketCnt []uint64
-
-	hashfunc SwarmHasher
-	po       func(Address) uint8
-
-	batchesC chan struct{}
-	closed   bool
-	batch    *dbBatch
-	lock     sync.RWMutex
-	quit     chan struct{}
-	gc       *garbage
-
-	// Functions encodeDataFunc is used to bypass
-	// the default functionality of DbStore with
-	// mock.NodeStore for testing purposes.
-	encodeDataFunc func(chunk Chunk) []byte
-	// If getDataFunc is defined, it will be used for
-	// retrieving the chunk data instead from the local
-	// LevelDB database.
-	getDataFunc func(key Address) (data []byte, err error)
-}
-
-type dbBatch struct {
-	*leveldb.Batch
-	err error
-	c   chan struct{}
-}
-
-func newBatch() *dbBatch {
-	return &dbBatch{Batch: new(leveldb.Batch), c: make(chan struct{})}
-}
-
-// TODO: Instead of passing the distance function, just pass the address from which distances are calculated
-// to avoid the appearance of a pluggable distance metric and opportunities of bugs associated with providing
-// a function different from the one that is actually used.
-func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
-	s = new(LDBStore)
-	s.hashfunc = params.Hash
-	s.quit = make(chan struct{})
-
-	s.batchesC = make(chan struct{}, 1)
-	go s.writeBatches()
-	s.batch = newBatch()
-	// associate encodeData with default functionality
-	s.encodeDataFunc = encodeData
-
-	s.db, err = NewLDBDatabase(params.Path)
-	if err != nil {
-		return nil, err
-	}
-
-	s.po = params.Po
-	s.setCapacity(params.DbCapacity)
-
-	s.bucketCnt = make([]uint64, 0x100)
-	for i := 0; i < 0x100; i++ {
-		k := make([]byte, 2)
-		k[0] = keyDistanceCnt
-		k[1] = uint8(i)
-		cnt, _ := s.db.Get(k)
-		s.bucketCnt[i] = BytesToU64(cnt)
-	}
-	data, _ := s.db.Get(keyEntryCnt)
-	s.entryCnt = BytesToU64(data)
-	data, _ = s.db.Get(keyAccessCnt)
-	s.accessCnt = BytesToU64(data)
-	data, _ = s.db.Get(keyDataIdx)
-	s.dataIdx = BytesToU64(data)
-
-	// set up garbage collection
-	s.gc = &garbage{
-		maxBatch: defaultMaxGCBatch,
-		maxRound: defaultMaxGCRound,
-		ratio:    defaultGCRatio,
-	}
-
-	s.gc.runC = make(chan struct{}, 1)
-	s.gc.runC <- struct{}{}
-
-	return s, nil
-}
-
-// MarkAccessed increments the access counter as a best effort for a chunk, so
-// the chunk won't get garbage collected.
-func (s *LDBStore) MarkAccessed(addr Address) {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	if s.closed {
-		return
-	}
-
-	proximity := s.po(addr)
-	s.tryAccessIdx(addr, proximity)
-}
-
-// initialize and set values for processing of gc round
-func (s *LDBStore) startGC(c int) {
-
-	s.gc.count = 0
-	// calculate the target number of deletions
-	if c >= s.gc.maxRound {
-		s.gc.target = s.gc.maxRound
-	} else {
-		s.gc.target = c / s.gc.ratio
-	}
-	s.gc.batch = newBatch()
-	log.Debug("startgc", "requested", c, "target", s.gc.target)
-}
-
-// NewMockDbStore creates a new instance of DbStore with
-// mockStore set to a provided value. If mockStore argument is nil,
-// this function behaves exactly as NewDbStore.
-func NewMockDbStore(params *LDBStoreParams, mockStore *mock.NodeStore) (s *LDBStore, err error) {
-	s, err = NewLDBStore(params)
-	if err != nil {
-		return nil, err
-	}
-
-	// replace put and get with mock store functionality
-	if mockStore != nil {
-		s.encodeDataFunc = newMockEncodeDataFunc(mockStore)
-		s.getDataFunc = newMockGetDataFunc(mockStore)
-	}
-	return
-}
-
-type dpaDBIndex struct {
-	Idx    uint64
-	Access uint64
-}
-
-func BytesToU64(data []byte) uint64 {
-	if len(data) < 8 {
-		return 0
-	}
-	return binary.BigEndian.Uint64(data)
-}
-
-func U64ToBytes(val uint64) []byte {
-	data := make([]byte, 8)
-	binary.BigEndian.PutUint64(data, val)
-	return data
-}
-
-func getIndexKey(hash Address) []byte {
-	hashSize := len(hash)
-	key := make([]byte, hashSize+1)
-	key[0] = keyIndex
-	copy(key[1:], hash[:])
-	return key
-}
-
-func getDataKey(idx uint64, po uint8) []byte {
-	key := make([]byte, 10)
-	key[0] = keyData
-	key[1] = po
-	binary.BigEndian.PutUint64(key[2:], idx)
-
-	return key
-}
-
-func getGCIdxKey(index *dpaDBIndex) []byte {
-	key := make([]byte, 9)
-	key[0] = keyGCIdx
-	binary.BigEndian.PutUint64(key[1:], index.Access)
-	return key
-}
-
-func getGCIdxValue(index *dpaDBIndex, po uint8, addr Address) []byte {
-	val := make([]byte, 41) // po = 1, index.Index = 8, Address = 32
-	val[0] = po
-	binary.BigEndian.PutUint64(val[1:], index.Idx)
-	copy(val[9:], addr)
-	return val
-}
-
-func parseIdxKey(key []byte) (byte, []byte) {
-	return key[0], key[1:]
-}
-
-func parseGCIdxEntry(accessCnt []byte, val []byte) (index *dpaDBIndex, po uint8, addr Address) {
-	index = &dpaDBIndex{
-		Idx:    binary.BigEndian.Uint64(val[1:]),
-		Access: binary.BigEndian.Uint64(accessCnt),
-	}
-	po = val[0]
-	addr = val[9:]
-	return
-}
-
-func encodeIndex(index *dpaDBIndex) []byte {
-	data, _ := rlp.EncodeToBytes(index)
-	return data
-}
-
-func encodeData(chunk Chunk) []byte {
-	// Always create a new underlying array for the returned byte slice.
-	// The chunk.Address array may be used in the returned slice which
-	// may be changed later in the code or by the LevelDB, resulting
-	// that the Address is changed as well.
-	return append(append([]byte{}, chunk.Address()[:]...), chunk.Data()...)
-}
-
-func decodeIndex(data []byte, index *dpaDBIndex) error {
-	dec := rlp.NewStream(bytes.NewReader(data), 0)
-	return dec.Decode(index)
-}
-
-func decodeData(addr Address, data []byte) (Chunk, error) {
-	return NewChunk(addr, data[32:]), nil
-}
-
-func (s *LDBStore) collectGarbage() error {
-	// prevent duplicate gc from starting when one is already running
-	select {
-	case <-s.gc.runC:
-	default:
-		return nil
-	}
-
-	s.lock.Lock()
-	entryCnt := s.entryCnt
-	s.lock.Unlock()
-
-	metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1)
-
-	// calculate the amount of chunks to collect and reset counter
-	s.startGC(int(entryCnt))
-	log.Debug("collectGarbage", "target", s.gc.target, "entryCnt", entryCnt)
-
-	for s.gc.count < s.gc.target {
-		it := s.db.NewIterator()
-		ok := it.Seek([]byte{keyGCIdx})
-		var singleIterationCount int
-
-		// every batch needs a lock so we avoid entries changing accessidx in the meantime
-		s.lock.Lock()
-		for ; ok && (singleIterationCount < s.gc.maxBatch); ok = it.Next() {
-
-			// quit if no more access index keys
-			itkey := it.Key()
-			if (itkey == nil) || (itkey[0] != keyGCIdx) {
-				break
-			}
-
-			// get chunk data entry from access index
-			val := it.Value()
-			index, po, hash := parseGCIdxEntry(itkey[1:], val)
-			keyIdx := make([]byte, 33)
-			keyIdx[0] = keyIndex
-			copy(keyIdx[1:], hash)
-
-			// add delete operation to batch
-			s.delete(s.gc.batch.Batch, index, keyIdx, po)
-			singleIterationCount++
-			s.gc.count++
-			log.Trace("garbage collect enqueued chunk for deletion", "key", hash)
-
-			// break if target is not on max garbage batch boundary
-			if s.gc.count >= s.gc.target {
-				break
-			}
-		}
-
-		s.writeBatch(s.gc.batch, wEntryCnt)
-		log.Trace("garbage collect batch done", "batch", singleIterationCount, "total", s.gc.count)
-		s.lock.Unlock()
-		it.Release()
-	}
-
-	metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(s.gc.count))
-	log.Debug("garbage collect done", "c", s.gc.count)
-	s.gc.runC <- struct{}{}
-
-	return nil
-}
-
-// Export writes all chunks from the store to a tar archive, returning the
-// number of chunks written.
-func (s *LDBStore) Export(out io.Writer) (int64, error) {
-	tw := tar.NewWriter(out)
-	defer tw.Close()
-
-	it := s.db.NewIterator()
-	defer it.Release()
-	var count int64
-	for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
-		key := it.Key()
-		if (key == nil) || (key[0] != keyIndex) {
-			break
-		}
-
-		var index dpaDBIndex
-
-		hash := key[1:]
-		decodeIndex(it.Value(), &index)
-		po := s.po(hash)
-		datakey := getDataKey(index.Idx, po)
-		log.Trace("store.export", "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po)
-		data, err := s.db.Get(datakey)
-		if err != nil {
-			log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key, err))
-			continue
-		}
-
-		hdr := &tar.Header{
-			Name: hex.EncodeToString(hash),
-			Mode: 0644,
-			Size: int64(len(data)),
-		}
-		if err := tw.WriteHeader(hdr); err != nil {
-			return count, err
-		}
-		if _, err := tw.Write(data); err != nil {
-			return count, err
-		}
-		count++
-	}
-
-	return count, nil
-}
-
-// of chunks read.
-func (s *LDBStore) Import(in io.Reader) (int64, error) {
-	tr := tar.NewReader(in)
-
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-
-	countC := make(chan int64)
-	errC := make(chan error)
-	var count int64
-	go func() {
-		for {
-			hdr, err := tr.Next()
-			if err == io.EOF {
-				break
-			} else if err != nil {
-				select {
-				case errC <- err:
-				case <-ctx.Done():
-				}
-			}
-
-			if len(hdr.Name) != 64 {
-				log.Warn("ignoring non-chunk file", "name", hdr.Name)
-				continue
-			}
-
-			keybytes, err := hex.DecodeString(hdr.Name)
-			if err != nil {
-				log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
-				continue
-			}
-
-			data, err := ioutil.ReadAll(tr)
-			if err != nil {
-				select {
-				case errC <- err:
-				case <-ctx.Done():
-				}
-			}
-			key := Address(keybytes)
-			chunk := NewChunk(key, data[32:])
-
-			go func() {
-				select {
-				case errC <- s.Put(ctx, chunk):
-				case <-ctx.Done():
-				}
-			}()
-
-			count++
-		}
-		countC <- count
-	}()
-
-	// wait for all chunks to be stored
-	i := int64(0)
-	var total int64
-	for {
-		select {
-		case err := <-errC:
-			if err != nil {
-				return count, err
-			}
-			i++
-		case total = <-countC:
-		case <-ctx.Done():
-			return i, ctx.Err()
-		}
-		if total > 0 && i == total {
-			return total, nil
-		}
-	}
-}
-
-// Cleanup iterates over the database and deletes chunks if they pass the `f` condition
-func (s *LDBStore) Cleanup(f func(Chunk) bool) {
-	var errorsFound, removed, total int
-
-	it := s.db.NewIterator()
-	defer it.Release()
-	for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
-		key := it.Key()
-		if (key == nil) || (key[0] != keyIndex) {
-			break
-		}
-		total++
-		var index dpaDBIndex
-		err := decodeIndex(it.Value(), &index)
-		if err != nil {
-			log.Warn("Cannot decode")
-			errorsFound++
-			continue
-		}
-		hash := key[1:]
-		po := s.po(hash)
-		datakey := getDataKey(index.Idx, po)
-		data, err := s.db.Get(datakey)
-		if err != nil {
-			found := false
-
-			// The highest possible proximity is 255, so exit loop upon overflow.
-			for po = uint8(1); po != 0; po++ {
-				datakey = getDataKey(index.Idx, po)
-				data, err = s.db.Get(datakey)
-				if err == nil {
-					found = true
-					break
-				}
-			}
-
-			if !found {
-				log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key))
-				errorsFound++
-				continue
-			}
-		}
-
-		ck := data[:32]
-		c, err := decodeData(ck, data)
-		if err != nil {
-			log.Error("decodeData error", "err", err)
-			continue
-		}
-
-		sdata := c.Data()
-
-		cs := int64(binary.LittleEndian.Uint64(sdata[:8]))
-		log.Trace("chunk", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(sdata), "size", cs)
-
-		// if chunk is to be removed
-		if f(c) {
-			log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(sdata), "size", cs)
-			s.deleteNow(&index, getIndexKey(key[1:]), po)
-			removed++
-			errorsFound++
-		}
-	}
-
-	log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed))
-}
-
-// CleanGCIndex rebuilds the garbage collector index from scratch, while
-// removing inconsistent elements, e.g., indices with missing data chunks.
-// WARN: it's a pretty heavy, long running function.
-func (s *LDBStore) CleanGCIndex() error {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	batch := leveldb.Batch{}
-
-	var okEntryCount uint64
-	var totalEntryCount uint64
-
-	// throw out all gc indices, we will rebuild from cleaned index
-	it := s.db.NewIterator()
-	it.Seek([]byte{keyGCIdx})
-	var gcDeletes int
-	for it.Valid() {
-		rowType, _ := parseIdxKey(it.Key())
-		if rowType != keyGCIdx {
-			break
-		}
-		batch.Delete(it.Key())
-		gcDeletes++
-		it.Next()
-	}
-	log.Debug("gc", "deletes", gcDeletes)
-	if err := s.db.Write(&batch); err != nil {
-		return err
-	}
-	batch.Reset()
-
-	it.Release()
-
-	// corrected po index pointer values
-	var poPtrs [256]uint64
-
-	// set to true if chunk count not on 4096 iteration boundary
-	var doneIterating bool
-
-	// last key index in previous iteration
-	lastIdxKey := []byte{keyIndex}
-
-	// counter for debug output
-	var cleanBatchCount int
-
-	// go through all key index entries
-	for !doneIterating {
-		cleanBatchCount++
-		var idxs []dpaDBIndex
-		var chunkHashes [][]byte
-		var pos []uint8
-		it := s.db.NewIterator()
-
-		it.Seek(lastIdxKey)
-
-		// 4096 is just a nice number, don't look for any hidden meaning here...
-		var i int
-		for i = 0; i < 4096; i++ {
-
-			// this really shouldn't happen unless database is empty
-			// but let's keep it to be safe
-			if !it.Valid() {
-				doneIterating = true
-				break
-			}
-
-			// if it's not keyindex anymore we're done iterating
-			rowType, chunkHash := parseIdxKey(it.Key())
-			if rowType != keyIndex {
-				doneIterating = true
-				break
-			}
-
-			// decode the retrieved index
-			var idx dpaDBIndex
-			err := decodeIndex(it.Value(), &idx)
-			if err != nil {
-				return fmt.Errorf("corrupt index: %v", err)
-			}
-			po := s.po(chunkHash)
-			lastIdxKey = it.Key()
-
-			// if we don't find the data key, remove the entry
-			// if we find it, add to the array of new gc indices to create
-			dataKey := getDataKey(idx.Idx, po)
-			_, err = s.db.Get(dataKey)
-			if err != nil {
-				log.Warn("deleting inconsistent index (missing data)", "key", chunkHash)
-				batch.Delete(it.Key())
-			} else {
-				idxs = append(idxs, idx)
-				chunkHashes = append(chunkHashes, chunkHash)
-				pos = append(pos, po)
-				okEntryCount++
-				if idx.Idx > poPtrs[po] {
-					poPtrs[po] = idx.Idx
-				}
-			}
-			totalEntryCount++
-			it.Next()
-		}
-		it.Release()
-
-		// flush the key index corrections
-		err := s.db.Write(&batch)
-		if err != nil {
-			return err
-		}
-		batch.Reset()
-
-		// add correct gc indices
-		for i, okIdx := range idxs {
-			gcIdxKey := getGCIdxKey(&okIdx)
-			gcIdxData := getGCIdxValue(&okIdx, pos[i], chunkHashes[i])
-			batch.Put(gcIdxKey, gcIdxData)
-			log.Trace("clean ok", "key", chunkHashes[i], "gcKey", gcIdxKey, "gcData", gcIdxData)
-		}
-
-		// flush them
-		err = s.db.Write(&batch)
-		if err != nil {
-			return err
-		}
-		batch.Reset()
-
-		log.Debug("clean gc index pass", "batch", cleanBatchCount, "checked", i, "kept", len(idxs))
-	}
-
-	log.Debug("gc cleanup entries", "ok", okEntryCount, "total", totalEntryCount, "batchlen", batch.Len())
-
-	// lastly add updated entry count
-	var entryCount [8]byte
-	binary.BigEndian.PutUint64(entryCount[:], okEntryCount)
-	batch.Put(keyEntryCnt, entryCount[:])
-
-	// and add the new po index pointers
-	var poKey [2]byte
-	poKey[0] = keyDistanceCnt
-	for i, poPtr := range poPtrs {
-		poKey[1] = uint8(i)
-		if poPtr == 0 {
-			batch.Delete(poKey[:])
-		} else {
-			var idxCount [8]byte
-			binary.BigEndian.PutUint64(idxCount[:], poPtr)
-			batch.Put(poKey[:], idxCount[:])
-		}
-	}
-
-	// if you made it this far your harddisk has survived. Congratulations
-	return s.db.Write(&batch)
-}
-
-// Delete is removes a chunk and updates indices.
-// Is thread safe
-func (s *LDBStore) Delete(addr Address) error {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	ikey := getIndexKey(addr)
-
-	idata, err := s.db.Get(ikey)
-	if err != nil {
-		return err
-	}
-
-	var idx dpaDBIndex
-	decodeIndex(idata, &idx)
-	proximity := s.po(addr)
-	return s.deleteNow(&idx, ikey, proximity)
-}
-
-// executes one delete operation immediately
-// see *LDBStore.delete
-func (s *LDBStore) deleteNow(idx *dpaDBIndex, idxKey []byte, po uint8) error {
-	batch := new(leveldb.Batch)
-	s.delete(batch, idx, idxKey, po)
-	return s.db.Write(batch)
-}
-
-// adds a delete chunk operation to the provided batch
-// if called directly, decrements entrycount regardless if the chunk exists upon deletion. Risk of wrap to max uint64
-func (s *LDBStore) delete(batch *leveldb.Batch, idx *dpaDBIndex, idxKey []byte, po uint8) {
-	metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1)
-
-	gcIdxKey := getGCIdxKey(idx)
-	batch.Delete(gcIdxKey)
-	dataKey := getDataKey(idx.Idx, po)
-	batch.Delete(dataKey)
-	batch.Delete(idxKey)
-	s.entryCnt--
-	dbEntryCount.Dec(1)
-	cntKey := make([]byte, 2)
-	cntKey[0] = keyDistanceCnt
-	cntKey[1] = po
-	batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
-	batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
-}
-
-func (s *LDBStore) BinIndex(po uint8) uint64 {
-	s.lock.RLock()
-	defer s.lock.RUnlock()
-	return s.bucketCnt[po]
-}
-
-// Put adds a chunk to the database, adding indices and incrementing global counters.
-// If it already exists, it merely increments the access count of the existing entry.
-// Is thread safe
-func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
-	metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1)
-	log.Trace("ldbstore.put", "key", chunk.Address())
-
-	ikey := getIndexKey(chunk.Address())
-	var index dpaDBIndex
-
-	po := s.po(chunk.Address())
-
-	s.lock.Lock()
-
-	if s.closed {
-		s.lock.Unlock()
-		return ErrDBClosed
-	}
-	batch := s.batch
-
-	log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
-	_, err := s.db.Get(ikey)
-	if err != nil {
-		s.doPut(chunk, &index, po)
-	}
-	idata := encodeIndex(&index)
-	s.batch.Put(ikey, idata)
-
-	// add the access-chunkindex index for garbage collection
-	gcIdxKey := getGCIdxKey(&index)
-	gcIdxData := getGCIdxValue(&index, po, chunk.Address())
-	s.batch.Put(gcIdxKey, gcIdxData)
-	s.lock.Unlock()
-
-	select {
-	case s.batchesC <- struct{}{}:
-	default:
-	}
-
-	select {
-	case <-batch.c:
-		return batch.err
-	case <-ctx.Done():
-		return ctx.Err()
-	}
-}
-
-// force putting into db, does not check or update necessary indices
-func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) {
-	data := s.encodeDataFunc(chunk)
-	dkey := getDataKey(s.dataIdx, po)
-	s.batch.Put(dkey, data)
-	index.Idx = s.dataIdx
-	s.bucketCnt[po] = s.dataIdx
-	s.entryCnt++
-	dbEntryCount.Inc(1)
-	s.dataIdx++
-	index.Access = s.accessCnt
-	s.accessCnt++
-	cntKey := make([]byte, 2)
-	cntKey[0] = keyDistanceCnt
-	cntKey[1] = po
-	s.batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
-}
-
-func (s *LDBStore) writeBatches() {
-	for {
-		select {
-		case <-s.quit:
-			log.Debug("DbStore: quit batch write loop")
-			return
-		case <-s.batchesC:
-			err := s.writeCurrentBatch()
-			if err != nil {
-				log.Debug("DbStore: quit batch write loop", "err", err.Error())
-				return
-			}
-		}
-	}
-
-}
-
-func (s *LDBStore) writeCurrentBatch() error {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-	b := s.batch
-	l := b.Len()
-	if l == 0 {
-		return nil
-	}
-	s.batch = newBatch()
-	b.err = s.writeBatch(b, wEntryCnt|wAccessCnt|wIndexCnt)
-	close(b.c)
-	if s.entryCnt >= s.capacity {
-		go s.collectGarbage()
-	}
-	return nil
-}
-
-// must be called non concurrently
-func (s *LDBStore) writeBatch(b *dbBatch, wFlag uint8) error {
-	if wFlag&wEntryCnt > 0 {
-		b.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
-	}
-	if wFlag&wIndexCnt > 0 {
-		b.Put(keyDataIdx, U64ToBytes(s.dataIdx))
-	}
-	if wFlag&wAccessCnt > 0 {
-		b.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
-	}
-	l := b.Len()
-	if err := s.db.Write(b.Batch); err != nil {
-		return fmt.Errorf("unable to write batch: %v", err)
-	}
-	log.Trace(fmt.Sprintf("batch write (%d entries)", l))
-	return nil
-}
-
-// newMockEncodeDataFunc returns a function that stores the chunk data
-// to a mock store to bypass the default functionality encodeData.
-// The constructed function always returns the nil data, as DbStore does
-// not need to store the data, but still need to create the index.
-func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk Chunk) []byte {
-	return func(chunk Chunk) []byte {
-		if err := mockStore.Put(chunk.Address(), encodeData(chunk)); err != nil {
-			log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Address().Log(), err))
-		}
-		return chunk.Address()[:]
-	}
-}
-
-// tryAccessIdx tries to find index entry. If found then increments the access
-// count for garbage collection and returns the index entry and true for found,
-// otherwise returns nil and false.
-func (s *LDBStore) tryAccessIdx(addr Address, po uint8) (*dpaDBIndex, bool) {
-	ikey := getIndexKey(addr)
-	idata, err := s.db.Get(ikey)
-	if err != nil {
-		return nil, false
-	}
-
-	index := new(dpaDBIndex)
-	decodeIndex(idata, index)
-	oldGCIdxKey := getGCIdxKey(index)
-	s.batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
-	index.Access = s.accessCnt
-	idata = encodeIndex(index)
-	s.accessCnt++
-	s.batch.Put(ikey, idata)
-	newGCIdxKey := getGCIdxKey(index)
-	newGCIdxData := getGCIdxValue(index, po, ikey[1:])
-	s.batch.Delete(oldGCIdxKey)
-	s.batch.Put(newGCIdxKey, newGCIdxData)
-	select {
-	case s.batchesC <- struct{}{}:
-	default:
-	}
-	return index, true
-}
-
-// GetSchema is returning the current named schema of the datastore as read from LevelDB
-func (s *LDBStore) GetSchema() (string, error) {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	data, err := s.db.Get(keySchema)
-	if err != nil {
-		if err == leveldb.ErrNotFound {
-			return DbSchemaNone, nil
-		}
-		return "", err
-	}
-
-	return string(data), nil
-}
-
-// PutSchema is saving a named schema to the LevelDB datastore
-func (s *LDBStore) PutSchema(schema string) error {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	return s.db.Put(keySchema, []byte(schema))
-}
-
-// Get retrieves the chunk matching the provided key from the database.
-// If the chunk entry does not exist, it returns an error
-// Updates access count and is thread safe
-func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) {
-	metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
-	log.Trace("ldbstore.get", "key", addr)
-
-	s.lock.Lock()
-	defer s.lock.Unlock()
-	return s.get(addr)
-}
-
-// Has queries the underlying DB if a chunk with the given address is stored
-// Returns true if the chunk is found, false if not
-func (s *LDBStore) Has(_ context.Context, addr Address) bool {
-	s.lock.RLock()
-	defer s.lock.RUnlock()
-
-	ikey := getIndexKey(addr)
-	_, err := s.db.Get(ikey)
-
-	return err == nil
-}
-
-// TODO: To conform with other private methods of this object indices should not be updated
-func (s *LDBStore) get(addr Address) (chunk Chunk, err error) {
-	if s.closed {
-		return nil, ErrDBClosed
-	}
-	proximity := s.po(addr)
-	index, found := s.tryAccessIdx(addr, proximity)
-	if found {
-		var data []byte
-		if s.getDataFunc != nil {
-			// if getDataFunc is defined, use it to retrieve the chunk data
-			log.Trace("ldbstore.get retrieve with getDataFunc", "key", addr)
-			data, err = s.getDataFunc(addr)
-			if err != nil {
-				return
-			}
-		} else {
-			// default DbStore functionality to retrieve chunk data
-			datakey := getDataKey(index.Idx, proximity)
-			data, err = s.db.Get(datakey)
-			log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", index.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity)
-			if err != nil {
-				log.Trace("ldbstore.get chunk found but could not be accessed", "key", addr, "err", err)
-				s.deleteNow(index, getIndexKey(addr), s.po(addr))
-				if err == leveldb.ErrNotFound {
-					return nil, ErrChunkNotFound
-				}
-				return nil, err
-			}
-		}
-
-		return decodeData(addr, data)
-	} else {
-		err = ErrChunkNotFound
-	}
-
-	return
-}
-
-// newMockGetFunc returns a function that reads chunk data from
-// the mock database, which is used as the value for DbStore.getFunc
-// to bypass the default functionality of DbStore with a mock store.
-func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {
-	return func(addr Address) (data []byte, err error) {
-		data, err = mockStore.Get(addr)
-		if err == mock.ErrNotFound {
-			// preserve ErrChunkNotFound error
-			err = ErrChunkNotFound
-		}
-		return data, err
-	}
-}
-
-func (s *LDBStore) setCapacity(c uint64) {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	s.capacity = c
-
-	for s.entryCnt > c {
-		s.collectGarbage()
-	}
-}
-
-func (s *LDBStore) Close() {
-	close(s.quit)
-	s.lock.Lock()
-	s.closed = true
-	s.lock.Unlock()
-	// force writing out current batch
-	s.writeCurrentBatch()
-	s.db.Close()
-}
-
-// SyncIterator(start, stop, po, f) calls f on each hash of a bin po from start to stop
-func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error {
-	metrics.GetOrRegisterCounter("ldbstore.synciterator", nil).Inc(1)
-
-	sincekey := getDataKey(since, po)
-	untilkey := getDataKey(until, po)
-	it := s.db.NewIterator()
-	defer it.Release()
-
-	for ok := it.Seek(sincekey); ok; ok = it.Next() {
-		metrics.GetOrRegisterCounter("ldbstore.synciterator.seek", nil).Inc(1)
-
-		dbkey := it.Key()
-		if dbkey[0] != keyData || dbkey[1] != po || bytes.Compare(untilkey, dbkey) < 0 {
-			break
-		}
-		key := make([]byte, 32)
-		val := it.Value()
-		copy(key, val[:32])
-		if !f(Address(key), binary.BigEndian.Uint64(dbkey[2:])) {
-			break
-		}
-	}
-	return it.Error()
-}
diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go
deleted file mode 100644
index 1cd4947be..000000000
--- a/swarm/storage/ldbstore_test.go
+++ /dev/null
@@ -1,788 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
-	"bytes"
-	"context"
-	"encoding/binary"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"strconv"
-	"strings"
-	"testing"
-
-	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/swarm/chunk"
-	"github.com/ethereum/go-ethereum/swarm/log"
-	"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
-	"github.com/ethereum/go-ethereum/swarm/testutil"
-	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
-)
-
-type testDbStore struct {
-	*LDBStore
-	dir string
-}
-
-func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
-	dir, err := ioutil.TempDir("", "bzz-storage-test")
-	if err != nil {
-		return nil, func() {}, err
-	}
-
-	var db *LDBStore
-	storeparams := NewDefaultStoreParams()
-	params := NewLDBStoreParams(storeparams, dir)
-	params.Po = testPoFunc
-
-	if mock {
-		globalStore := mem.NewGlobalStore()
-		addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
-		mockStore := globalStore.NewNodeStore(addr)
-
-		db, err = NewMockDbStore(params, mockStore)
-	} else {
-		db, err = NewLDBStore(params)
-	}
-
-	cleanup := func() {
-		if db != nil {
-			db.Close()
-		}
-		err = os.RemoveAll(dir)
-		if err != nil {
-			panic(fmt.Sprintf("db cleanup failed: %v", err))
-		}
-	}
-
-	return &testDbStore{db, dir}, cleanup, err
-}
-
-func testPoFunc(k Address) (ret uint8) {
-	basekey := make([]byte, 32)
-	return uint8(Proximity(basekey, k[:]))
-}
-
-func testDbStoreRandom(n int, mock bool, t *testing.T) {
-	db, cleanup, err := newTestDbStore(mock, true)
-	defer cleanup()
-	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
-	}
-	testStoreRandom(db, n, t)
-}
-
-func testDbStoreCorrect(n int, mock bool, t *testing.T) {
-	db, cleanup, err := newTestDbStore(mock, false)
-	defer cleanup()
-	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
-	}
-	testStoreCorrect(db, n, t)
-}
-
-func TestMarkAccessed(t *testing.T) {
-	db, cleanup, err := newTestDbStore(false, true)
-	defer cleanup()
-	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
-	}
-
-	h := GenerateRandomChunk(chunk.DefaultSize)
-
-	db.Put(context.Background(), h)
-
-	var index dpaDBIndex
-	addr := h.Address()
-	idxk := getIndexKey(addr)
-
-	idata, err := db.db.Get(idxk)
-	if err != nil {
-		t.Fatal(err)
-	}
-	decodeIndex(idata, &index)
-
-	if index.Access != 0 {
-		t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
-	}
-
-	db.MarkAccessed(addr)
-	db.writeCurrentBatch()
-
-	idata, err = db.db.Get(idxk)
-	if err != nil {
-		t.Fatal(err)
-	}
-	decodeIndex(idata, &index)
-
-	if index.Access != 1 {
-		t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
-	}
-
-}
-
-func TestDbStoreRandom_1(t *testing.T) {
-	testDbStoreRandom(1, false, t)
-}
-
-func TestDbStoreCorrect_1(t *testing.T) {
-	testDbStoreCorrect(1, false, t)
-}
-
-func TestDbStoreRandom_1k(t *testing.T) {
-	testDbStoreRandom(1000, false, t)
-}
-
-func TestDbStoreCorrect_1k(t *testing.T) {
-	testDbStoreCorrect(1000, false, t)
-}
-
-func TestMockDbStoreRandom_1(t *testing.T) {
-	testDbStoreRandom(1, true, t)
-}
-
-func TestMockDbStoreCorrect_1(t *testing.T) {
-	testDbStoreCorrect(1, true, t)
-}
-
-func TestMockDbStoreRandom_1k(t *testing.T) {
-	testDbStoreRandom(1000, true, t)
-}
-
-func TestMockDbStoreCorrect_1k(t *testing.T) {
-	testDbStoreCorrect(1000, true, t)
-}
-
-func testDbStoreNotFound(t *testing.T, mock bool) {
-	db, cleanup, err := newTestDbStore(mock, false)
-	defer cleanup()
-	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
-	}
-
-	_, err = db.Get(context.TODO(), ZeroAddr)
-	if err != ErrChunkNotFound {
-		t.Errorf("Expected ErrChunkNotFound, got %v", err)
-	}
-}
-
-func TestDbStoreNotFound(t *testing.T) {
-	testDbStoreNotFound(t, false)
-}
-func TestMockDbStoreNotFound(t *testing.T) {
-	testDbStoreNotFound(t, true)
-}
-
-func testIterator(t *testing.T, mock bool) {
-	var i int
-	var poc uint
-	chunkcount := 32
-	chunkkeys := NewAddressCollection(chunkcount)
-	chunkkeysResults := NewAddressCollection(chunkcount)
-
-	db, cleanup, err := newTestDbStore(mock, false)
-	defer cleanup()
-	if err != nil {
-		t.Fatalf("init dbStore failed: %v", err)
-	}
-
-	chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount)
-
-	for i = 0; i < len(chunks); i++ {
-		chunkkeys[i] = chunks[i].Address()
-		err := db.Put(context.TODO(), chunks[i])
-		if err != nil {
-			t.Fatalf("dbStore.Put failed: %v", err)
-		}
-	}
-
-	for i = 0; i < len(chunkkeys); i++ {
-		log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
-	}
-	i = 0
-	for poc = 0; poc <= 255; poc++ {
-		err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
-			log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
-			chunkkeysResults[n] = k
-			i++
-			return true
-		})
-		if err != nil {
-			t.Fatalf("Iterator call failed: %v", err)
-		}
-	}
-
-	for i = 0; i < chunkcount; i++ {
-		if !bytes.Equal(chunkkeys[i], chunkkeysResults[i]) {
-			t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeysResults[i])
-		}
-	}
-
-}
-
-func TestIterator(t *testing.T) {
-	testIterator(t, false)
-}
-func TestMockIterator(t *testing.T) {
-	testIterator(t, true)
-}
-
-func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
-	db, cleanup, err := newTestDbStore(mock, true)
-	defer cleanup()
-	if err != nil {
-		b.Fatalf("init dbStore failed: %v", err)
-	}
-	benchmarkStorePut(db, n, b)
-}
-
-func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
-	db, cleanup, err := newTestDbStore(mock, true)
-	defer cleanup()
-	if err != nil {
-		b.Fatalf("init dbStore failed: %v", err)
-	}
-	benchmarkStoreGet(db, n, b)
-}
-
-func BenchmarkDbStorePut_500(b *testing.B) {
-	benchmarkDbStorePut(500, false, b)
-}
-
-func BenchmarkDbStoreGet_500(b *testing.B) {
-	benchmarkDbStoreGet(500, false, b)
-}
-
-func BenchmarkMockDbStorePut_500(b *testing.B) {
-	benchmarkDbStorePut(500, true, b)
-}
-
-func BenchmarkMockDbStoreGet_500(b *testing.B) {
-	benchmarkDbStoreGet(500, true, b)
-}
-
-// TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
-// retrieve them, provided we don't hit the garbage collection
-func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
-	capacity := 50
-	n := 10
-
-	ldb, cleanup := newLDBStore(t)
-	ldb.setCapacity(uint64(capacity))
-	defer cleanup()
-
-	chunks, err := mputRandomChunks(ldb, n)
-	if err != nil {
-		t.Fatal(err.Error())
-	}
-
-	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
-	for _, ch := range chunks {
-		ret, err := ldb.Get(context.TODO(), ch.Address())
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if !bytes.Equal(ret.Data(), ch.Data()) {
-			t.Fatal("expected to get the same data back, but got smth else")
-		}
-	}
-
-	if ldb.entryCnt != uint64(n) {
-		t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
-	}
-
-	if ldb.accessCnt != uint64(2*n) {
-		t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
-	}
-}
-
-// TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
-// retrieve only some of them, because garbage collection must have partially cleared the store
-// Also tests that we can delete chunks and that we can trigger garbage collection
-func TestLDBStoreCollectGarbage(t *testing.T) {
-
-	// below max ronud
-	initialCap := defaultMaxGCRound / 100
-	cap := initialCap / 2
-	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
-
-	if testutil.RaceEnabled {
-		t.Skip("only the simplest case run as others are flaky with race")
-		// Note: some tests fail consistently and even locally with `-race`
-	}
-
-	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
-
-	// at max round
-	cap = initialCap
-	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
-	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
-
-	// more than max around, not on threshold
-	cap = initialCap + 500
-	t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
-	t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
-
-}
-
-func testLDBStoreCollectGarbage(t *testing.T) {
-	params := strings.Split(t.Name(), "/")
-	capacity, err := strconv.Atoi(params[2])
-	if err != nil {
-		t.Fatal(err)
-	}
-	n, err := strconv.Atoi(params[3])
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	ldb, cleanup := newLDBStore(t)
-	ldb.setCapacity(uint64(capacity))
-	defer cleanup()
-
-	// retrieve the gc round target count for the db capacity
-	ldb.startGC(capacity)
-	roundTarget := ldb.gc.target
-
-	// split put counts to gc target count threshold, and wait for gc to finish in between
-	var allChunks []Chunk
-	remaining := n
-	for remaining > 0 {
-		var putCount int
-		if remaining < roundTarget {
-			putCount = remaining
-		} else {
-			putCount = roundTarget
-		}
-		remaining -= putCount
-		chunks, err := mputRandomChunks(ldb, putCount)
-		if err != nil {
-			t.Fatal(err.Error())
-		}
-		allChunks = append(allChunks, chunks...)
-		ldb.lock.RLock()
-		log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
-		ldb.lock.RUnlock()
-
-		waitGc(ldb)
-	}
-
-	// attempt gets on all put chunks
-	var missing int
-	for _, ch := range allChunks {
-		ret, err := ldb.Get(context.TODO(), ch.Address())
-		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
-			missing++
-			continue
-		}
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if !bytes.Equal(ret.Data(), ch.Data()) {
-			t.Fatal("expected to get the same data back, but got smth else")
-		}
-
-		log.Trace("got back chunk", "chunk", ret)
-	}
-
-	// all surplus chunks should be missing
-	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
-	if missing != expectMissing {
-		t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
-	}
-
-	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-}
-
-// TestLDBStoreAddRemove tests that we can put and then delete a given chunk
-func TestLDBStoreAddRemove(t *testing.T) {
-	ldb, cleanup := newLDBStore(t)
-	ldb.setCapacity(200)
-	defer cleanup()
-
-	n := 100
-	chunks, err := mputRandomChunks(ldb, n)
-	if err != nil {
-		t.Fatalf(err.Error())
-	}
-
-	for i := 0; i < n; i++ {
-		// delete all even index chunks
-		if i%2 == 0 {
-			ldb.Delete(chunks[i].Address())
-		}
-	}
-
-	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
-	for i := 0; i < n; i++ {
-		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
-
-		if i%2 == 0 {
-			// expect even chunks to be missing
-			if err == nil {
-				t.Fatal("expected chunk to be missing, but got no error")
-			}
-		} else {
-			// expect odd chunks to be retrieved successfully
-			if err != nil {
-				t.Fatalf("expected no error, but got %s", err)
-			}
-
-			if !bytes.Equal(ret.Data(), chunks[i].Data()) {
-				t.Fatal("expected to get the same data back, but got smth else")
-			}
-		}
-	}
-}
-
-func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
-	t.Skip("flaky with -race flag")
-
-	params := strings.Split(t.Name(), "/")
-	capacity, err := strconv.Atoi(params[2])
-	if err != nil {
-		t.Fatal(err)
-	}
-	n, err := strconv.Atoi(params[3])
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	ldb, cleanup := newLDBStore(t)
-	defer cleanup()
-	ldb.setCapacity(uint64(capacity))
-
-	// put capacity count number of chunks
-	chunks := make([]Chunk, n)
-	for i := 0; i < n; i++ {
-		c := GenerateRandomChunk(chunk.DefaultSize)
-		chunks[i] = c
-		log.Trace("generate random chunk", "idx", i, "chunk", c)
-	}
-
-	for i := 0; i < n; i++ {
-		err := ldb.Put(context.TODO(), chunks[i])
-		if err != nil {
-			t.Fatal(err)
-		}
-	}
-
-	waitGc(ldb)
-
-	// delete all chunks
-	// (only count the ones actually deleted, the rest will have been gc'd)
-	deletes := 0
-	for i := 0; i < n; i++ {
-		if ldb.Delete(chunks[i].Address()) == nil {
-			deletes++
-		}
-	}
-
-	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
-	if ldb.entryCnt != 0 {
-		t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
-	}
-
-	// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
-	expAccessCnt := uint64(n)
-	if ldb.accessCnt != expAccessCnt {
-		t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
-	}
-
-	// retrieve the gc round target count for the db capacity
-	ldb.startGC(capacity)
-	roundTarget := ldb.gc.target
-
-	remaining := n
-	var puts int
-	for remaining > 0 {
-		var putCount int
-		if remaining < roundTarget {
-			putCount = remaining
-		} else {
-			putCount = roundTarget
-		}
-		remaining -= putCount
-		for putCount > 0 {
-			ldb.Put(context.TODO(), chunks[puts])
-			ldb.lock.RLock()
-			log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
-			ldb.lock.RUnlock()
-			puts++
-			putCount--
-		}
-
-		waitGc(ldb)
-	}
-
-	// expect first surplus chunks to be missing, because they have the smallest access value
-	expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
-	for i := 0; i < expectMissing; i++ {
-		_, err := ldb.Get(context.TODO(), chunks[i].Address())
-		if err == nil {
-			t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
-		}
-	}
-
-	// expect last chunks to be present, as they have the largest access value
-	for i := expectMissing; i < n; i++ {
-		ret, err := ldb.Get(context.TODO(), chunks[i].Address())
-		if err != nil {
-			t.Fatalf("chunk %v: expected no error, but got %s", i, err)
-		}
-		if !bytes.Equal(ret.Data(), chunks[i].Data()) {
-			t.Fatal("expected to get the same data back, but got smth else")
-		}
-	}
-}
-
-// TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
-func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
-
-	capacity := defaultMaxGCRound / 100 * 2
-	n := capacity - 1
-
-	ldb, cleanup := newLDBStore(t)
-	ldb.setCapacity(uint64(capacity))
-	defer cleanup()
-
-	chunks, err := mputRandomChunks(ldb, n)
-	if err != nil {
-		t.Fatal(err.Error())
-	}
-	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
-	// set first added capacity/2 chunks to highest accesscount
-	for i := 0; i < capacity/2; i++ {
-		_, err := ldb.Get(context.TODO(), chunks[i].Address())
-		if err != nil {
-			t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
-		}
-	}
-	_, err = mputRandomChunks(ldb, 2)
-	if err != nil {
-		t.Fatal(err.Error())
-	}
-
-	// wait for garbage collection to kick in on the responsible actor
-	waitGc(ldb)
-
-	var missing int
-	for i, ch := range chunks[2 : capacity/2] {
-		ret, err := ldb.Get(context.TODO(), ch.Address())
-		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
-			t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
-		}
-
-		if !bytes.Equal(ret.Data(), ch.Data()) {
-			t.Fatal("expected to get the same data back, but got smth else")
-		}
-		log.Trace("got back chunk", "chunk", ret)
-	}
-
-	log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-}
-
-func TestCleanIndex(t *testing.T) {
-	if testutil.RaceEnabled {
-		t.Skip("disabled because it times out with race detector")
-	}
-
-	capacity := 5000
-	n := 3
-
-	ldb, cleanup := newLDBStore(t)
-	ldb.setCapacity(uint64(capacity))
-	defer cleanup()
-
-	chunks, err := mputRandomChunks(ldb, n)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// remove the data of the first chunk
-	po := ldb.po(chunks[0].Address()[:])
-	dataKey := make([]byte, 10)
-	dataKey[0] = keyData
-	dataKey[1] = byte(po)
-	// dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
-	if _, err := ldb.db.Get(dataKey); err != nil {
-		t.Fatal(err)
-	}
-	if err := ldb.db.Delete(dataKey); err != nil {
-		t.Fatal(err)
-	}
-
-	// remove the gc index row for the first chunk
-	gcFirstCorrectKey := make([]byte, 9)
-	gcFirstCorrectKey[0] = keyGCIdx
-	if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
-		t.Fatal(err)
-	}
-
-	// warp the gc data of the second chunk
-	// this data should be correct again after the clean
-	gcSecondCorrectKey := make([]byte, 9)
-	gcSecondCorrectKey[0] = keyGCIdx
-	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
-	gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
-	if err != nil {
-		t.Fatal(err)
-	}
-	warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
-	copy(warpedGCVal[1:], gcSecondCorrectVal)
-	if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
-		t.Fatal(err)
-	}
-	if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := ldb.CleanGCIndex(); err != nil {
-		t.Fatal(err)
-	}
-
-	// the index without corresponding data should have been deleted
-	idxKey := make([]byte, 33)
-	idxKey[0] = keyIndex
-	copy(idxKey[1:], chunks[0].Address())
-	if _, err := ldb.db.Get(idxKey); err == nil {
-		t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
-	}
-
-	// the two other indices should be present
-	copy(idxKey[1:], chunks[1].Address())
-	if _, err := ldb.db.Get(idxKey); err != nil {
-		t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
-	}
-
-	copy(idxKey[1:], chunks[2].Address())
-	if _, err := ldb.db.Get(idxKey); err != nil {
-		t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
-	}
-
-	// first gc index should still be gone
-	if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
-		t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
-	}
-
-	// second gc index should still be fixed
-	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
-		t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
-	}
-
-	// third gc index should be unchanged
-	binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
-	if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
-		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
-	}
-
-	c, err := ldb.db.Get(keyEntryCnt)
-	if err != nil {
-		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
-	}
-
-	// entrycount should now be one less
-	entryCount := binary.BigEndian.Uint64(c)
-	if entryCount != 2 {
-		t.Fatalf("expected entrycnt to be 2, was %d", c)
-	}
-
-	// the chunks might accidentally be in the same bin
-	// if so that bin counter will now be 2 - the highest added index.
-	// if not, the total of them will be 3
-	poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
-	if poBins[0] == poBins[1] {
-		poBins = poBins[:1]
-	}
-
-	var binTotal uint64
-	var currentBin [2]byte
-	currentBin[0] = keyDistanceCnt
-	if len(poBins) == 1 {
-		currentBin[1] = poBins[0]
-		c, err := ldb.db.Get(currentBin[:])
-		if err != nil {
-			t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
-		}
-		binCount := binary.BigEndian.Uint64(c)
-		if binCount != 2 {
-			t.Fatalf("expected entrycnt to be 2, was %d", binCount)
-		}
-	} else {
-		for _, bin := range poBins {
-			currentBin[1] = bin
-			c, err := ldb.db.Get(currentBin[:])
-			if err != nil {
-				t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
-			}
-			binCount := binary.BigEndian.Uint64(c)
-			binTotal += binCount
-
-		}
-		if binTotal != 3 {
-			t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
-		}
-	}
-
-	// check that the iterator quits properly
-	chunks, err = mputRandomChunks(ldb, 4100)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	po = ldb.po(chunks[4099].Address()[:])
-	dataKey = make([]byte, 10)
-	dataKey[0] = keyData
-	dataKey[1] = byte(po)
-	binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
-	if _, err := ldb.db.Get(dataKey); err != nil {
-		t.Fatal(err)
-	}
-	if err := ldb.db.Delete(dataKey); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := ldb.CleanGCIndex(); err != nil {
-		t.Fatal(err)
-	}
-
-	// entrycount should now be one less of added chunks
-	c, err = ldb.db.Get(keyEntryCnt)
-	if err != nil {
-		t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
-	}
-	entryCount = binary.BigEndian.Uint64(c)
-	if entryCount != 4099+2 {
-		t.Fatalf("expected entrycnt to be 2, was %d", c)
-	}
-}
-
-// Note: waitGc does not guarantee that we wait 1 GC round; it only
-// guarantees that if the GC is running we wait for that run to finish
-// ticket: https://github.com/ethersphere/go-ethereum/issues/1151
-func waitGc(ldb *LDBStore) {
-	<-ldb.gc.runC
-	ldb.gc.runC <- struct{}{}
-}
diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go
deleted file mode 100644
index a8f6f037f..000000000
--- a/swarm/storage/localstore.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
-	"context"
-	"path/filepath"
-	"sync"
-
-	"github.com/ethereum/go-ethereum/metrics"
-	"github.com/ethereum/go-ethereum/swarm/log"
-	"github.com/ethereum/go-ethereum/swarm/storage/mock"
-)
-
-type LocalStoreParams struct {
-	*StoreParams
-	ChunkDbPath string
-	Validators  []ChunkValidator `toml:"-"`
-}
-
-func NewDefaultLocalStoreParams() *LocalStoreParams {
-	return &LocalStoreParams{
-		StoreParams: NewDefaultStoreParams(),
-	}
-}
-
-//this can only finally be set after all config options (file, cmd line, env vars)
-//have been evaluated
-func (p *LocalStoreParams) Init(path string) {
-	if p.ChunkDbPath == "" {
-		p.ChunkDbPath = filepath.Join(path, "chunks")
-	}
-}
-
-// LocalStore is a combination of inmemory db over a disk persisted db
-// implements a Get/Put with fallback (caching) logic using any 2 ChunkStores
-type LocalStore struct {
-	Validators []ChunkValidator
-	memStore   *MemStore
-	DbStore    *LDBStore
-	mu         sync.Mutex
-}
-
-// This constructor uses MemStore and DbStore as components
-func NewLocalStore(params *LocalStoreParams, mockStore *mock.NodeStore) (*LocalStore, error) {
-	ldbparams := NewLDBStoreParams(params.StoreParams, params.ChunkDbPath)
-	dbStore, err := NewMockDbStore(ldbparams, mockStore)
-	if err != nil {
-		return nil, err
-	}
-	return &LocalStore{
-		memStore:   NewMemStore(params.StoreParams, dbStore),
-		DbStore:    dbStore,
-		Validators: params.Validators,
-	}, nil
-}
-
-func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) {
-	ldbparams := NewLDBStoreParams(params.StoreParams, params.ChunkDbPath)
-	dbStore, err := NewLDBStore(ldbparams)
-	if err != nil {
-		return nil, err
-	}
-	localStore := &LocalStore{
-		memStore:   NewMemStore(params.StoreParams, dbStore),
-		DbStore:    dbStore,
-		Validators: params.Validators,
-	}
-	return localStore, nil
-}
-
-// isValid returns true if chunk passes any of the LocalStore Validators.
-// isValid also returns true if LocalStore has no Validators.
-func (ls *LocalStore) isValid(chunk Chunk) bool {
-	// by default chunks are valid. if we have 0 validators, then all chunks are valid.
-	valid := true
-
-	// ls.Validators contains a list of one validator per chunk type.
-	// if one validator succeeds, then the chunk is valid
-	for _, v := range ls.Validators {
-		if valid = v.Validate(chunk); valid {
-			break
-		}
-	}
-	return valid
-}
-
-// Put is responsible for doing validation and storage of the chunk
-// by using configured ChunkValidators, MemStore and LDBStore.
-// If the chunk is not valid, its GetErrored function will
-// return ErrChunkInvalid.
-// This method will check if the chunk is already in the MemStore
-// and it will return it if it is. If there is an error from
-// the MemStore.Get, it will be returned by calling GetErrored
-// on the chunk.
-// This method is responsible for closing Chunk.ReqC channel
-// when the chunk is stored in memstore.
-// After the LDBStore.Put, it is ensured that the MemStore
-// contains the chunk with the same data, but nil ReqC channel.
-func (ls *LocalStore) Put(ctx context.Context, chunk Chunk) error {
-	if !ls.isValid(chunk) {
-		return ErrChunkInvalid
-	}
-
-	log.Trace("localstore.put", "key", chunk.Address())
-	ls.mu.Lock()
-	defer ls.mu.Unlock()
-
-	_, err := ls.memStore.Get(ctx, chunk.Address())
-	if err == nil {
-		return nil
-	}
-	if err != nil && err != ErrChunkNotFound {
-		return err
-	}
-	ls.memStore.Put(ctx, chunk)
-	err = ls.DbStore.Put(ctx, chunk)
-	return err
-}
-
-// Has queries the underlying DbStore if a chunk with the given address
-// is being stored there.
-// Returns true if it is stored, false if not
-func (ls *LocalStore) Has(ctx context.Context, addr Address) bool {
-	return ls.DbStore.Has(ctx, addr)
-}
-
-// Get(chunk *Chunk) looks up a chunk in the local stores
-// This method is blocking until the chunk is retrieved
-// so additional timeout may be needed to wrap this call if
-// ChunkStores are remote and can have long latency
-func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk Chunk, err error) {
-	ls.mu.Lock()
-	defer ls.mu.Unlock()
-
-	return ls.get(ctx, addr)
-}
-
-func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk Chunk, err error) {
-	chunk, err = ls.memStore.Get(ctx, addr)
-
-	if err != nil && err != ErrChunkNotFound {
-		metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
-		return nil, err
-	}
-
-	if err == nil {
-		metrics.GetOrRegisterCounter("localstore.get.cachehit", nil).Inc(1)
-		go ls.DbStore.MarkAccessed(addr)
-		return chunk, nil
-	}
-
-	metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1)
-	chunk, err = ls.DbStore.Get(ctx, addr)
-	if err != nil {
-		metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
-		return nil, err
-	}
-
-	ls.memStore.Put(ctx, chunk)
-	return chunk, nil
-}
-
-func (ls *LocalStore) FetchFunc(ctx context.Context, addr Address) func(context.Context) error {
-	ls.mu.Lock()
-	defer ls.mu.Unlock()
-
-	_, err := ls.get(ctx, addr)
-	if err == nil {
-		return nil
-	}
-	return func(context.Context) error {
-		return err
-	}
-}
-
-func (ls *LocalStore) BinIndex(po uint8) uint64 {
-	return ls.DbStore.BinIndex(po)
-}
-
-func (ls *LocalStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
-	return ls.DbStore.SyncIterator(from, to, po, f)
-}
-
-// Close the local store
-func (ls *LocalStore) Close() {
-	ls.DbStore.Close()
-}
-
-// Migrate checks the datastore schema vs the runtime schema and runs
-// migrations if they don't match
-func (ls *LocalStore) Migrate() error {
-	actualDbSchema, err := ls.DbStore.GetSchema()
-	if err != nil {
-		log.Error(err.Error())
-		return err
-	}
-
-	if actualDbSchema == CurrentDbSchema {
-		return nil
-	}
-
-	log.Debug("running migrations for", "schema", actualDbSchema, "runtime-schema", CurrentDbSchema)
-
-	if actualDbSchema == DbSchemaNone {
-		ls.migrateFromNoneToPurity()
-		actualDbSchema = DbSchemaPurity
-	}
-
-	if err := ls.DbStore.PutSchema(actualDbSchema); err != nil {
-		return err
-	}
-
-	if actualDbSchema == DbSchemaPurity {
-		if err := ls.migrateFromPurityToHalloween(); err != nil {
-			return err
-		}
-		actualDbSchema = DbSchemaHalloween
-	}
-
-	if err := ls.DbStore.PutSchema(actualDbSchema); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (ls *LocalStore) migrateFromNoneToPurity() {
-	// delete chunks that are not valid, i.e. chunks that do not pass
-	// any of the ls.Validators
-	ls.DbStore.Cleanup(func(c Chunk) bool {
-		return !ls.isValid(c)
-	})
-}
-
-func (ls *LocalStore) migrateFromPurityToHalloween() error {
-	return ls.DbStore.CleanGCIndex()
-}
diff --git a/swarm/storage/localstore/export.go b/swarm/storage/localstore/export.go
new file mode 100644
index 000000000..bbea1d877
--- /dev/null
+++ b/swarm/storage/localstore/export.go
@@ -0,0 +1,204 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package localstore
+
+import (
+	"archive/tar"
+	"context"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"sync"
+
+	"github.com/ethereum/go-ethereum/swarm/chunk"
+	"github.com/ethereum/go-ethereum/swarm/log"
+	"github.com/ethereum/go-ethereum/swarm/shed"
+)
+
+const (
+	// filename in tar archive that holds the information
+	// about exported data format version
+	exportVersionFilename = ".swarm-export-version"
+	// legacy version for previous LDBStore
+	legacyExportVersion = "1"
+	// current export format version
+	currentExportVersion = "2"
+)
+
+// Export writes a tar structured data to the writer of
+// all chunks in the retrieval data index. It returns the
+// number of chunks exported.
+func (db *DB) Export(w io.Writer) (count int64, err error) {
+	tw := tar.NewWriter(w)
+	defer tw.Close()
+
+	if err := tw.WriteHeader(&tar.Header{
+		Name: exportVersionFilename,
+		Mode: 0644,
+		Size: int64(len(currentExportVersion)),
+	}); err != nil {
+		return 0, err
+	}
+	if _, err := tw.Write([]byte(currentExportVersion)); err != nil {
+		return 0, err
+	}
+
+	err = db.retrievalDataIndex.Iterate(func(item shed.Item) (stop bool, err error) {
+		hdr := &tar.Header{
+			Name: hex.EncodeToString(item.Address),
+			Mode: 0644,
+			Size: int64(len(item.Data)),
+		}
+		if err := tw.WriteHeader(hdr); err != nil {
+			return false, err
+		}
+		if _, err := tw.Write(item.Data); err != nil {
+			return false, err
+		}
+		count++
+		return false, nil
+	}, nil)
+
+	return count, err
+}
+
+// Import reads a tar structured data from the reader and
+// stores chunks in the database. It returns the number of
+// chunks imported.
+func (db *DB) Import(r io.Reader, legacy bool) (count int64, err error) {
+	tr := tar.NewReader(r)
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	errC := make(chan error)
+	doneC := make(chan struct{})
+	tokenPool := make(chan struct{}, 100)
+	var wg sync.WaitGroup
+	go func() {
+		var (
+			firstFile = true
+			// if exportVersionFilename file is not present
+			// assume legacy version
+			version = legacyExportVersion
+		)
+		for {
+			hdr, err := tr.Next()
+			if err != nil {
+				if err == io.EOF {
+					break
+				}
+				select {
+				case errC <- err:
+				case <-ctx.Done():
+				}
+			}
+			if firstFile {
+				firstFile = false
+				if hdr.Name == exportVersionFilename {
+					data, err := ioutil.ReadAll(tr)
+					if err != nil {
+						select {
+						case errC <- err:
+						case <-ctx.Done():
+						}
+					}
+					version = string(data)
+					continue
+				}
+			}
+
+			if len(hdr.Name) != 64 {
+				log.Warn("ignoring non-chunk file", "name", hdr.Name)
+				continue
+			}
+
+			keybytes, err := hex.DecodeString(hdr.Name)
+			if err != nil {
+				log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
+				continue
+			}
+
+			data, err := ioutil.ReadAll(tr)
+			if err != nil {
+				select {
+				case errC <- err:
+				case <-ctx.Done():
+				}
+			}
+			key := chunk.Address(keybytes)
+
+			var ch chunk.Chunk
+			switch version {
+			case legacyExportVersion:
+				// LDBStore Export exported chunk data prefixed with the chunk key.
+				// That is not necessary, as the key is in the chunk filename,
+				// but backward compatibility needs to be preserved.
+				ch = chunk.NewChunk(key, data[32:])
+			case currentExportVersion:
+				ch = chunk.NewChunk(key, data)
+			default:
+				select {
+				case errC <- fmt.Errorf("unsupported export data version %q", version):
+				case <-ctx.Done():
+				}
+			}
+			tokenPool <- struct{}{}
+			wg.Add(1)
+
+			go func() {
+				_, err := db.Put(ctx, chunk.ModePutUpload, ch)
+				select {
+				case errC <- err:
+				case <-ctx.Done():
+					wg.Done()
+					<-tokenPool
+				default:
+					err := db.Put(ctx, chunk.ModePutUpload, ch)
+					if err != nil {
+						errC <- err
+					}
+					wg.Done()
+					<-tokenPool
+				}
+			}()
+
+			count++
+		}
+		wg.Wait()
+		close(doneC)
+	}()
+
+	// wait for all chunks to be stored
+	for {
+		select {
+		case err := <-errC:
+			if err != nil {
+				return count, err
+			}
+		case <-ctx.Done():
+			return count, ctx.Err()
+		default:
+			select {
+			case <-doneC:
+				return count, nil
+			default:
+			}
+		}
+	}
+}
diff --git a/swarm/storage/localstore/export_test.go b/swarm/storage/localstore/export_test.go
new file mode 100644
index 000000000..d7f848f80
--- /dev/null
+++ b/swarm/storage/localstore/export_test.go
@@ -0,0 +1,80 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package localstore
+
+import (
+	"bytes"
+	"context"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/swarm/chunk"
+)
+
+// TestExportImport constructs two databases, one to put and export
+// chunks and another one to import and validate that all chunks are
+// imported.
+func TestExportImport(t *testing.T) {
+	db1, cleanup1 := newTestDB(t, nil)
+	defer cleanup1()
+
+	var chunkCount = 100
+
+	chunks := make(map[string][]byte, chunkCount)
+	for i := 0; i < chunkCount; i++ {
+		ch := generateTestRandomChunk()
+
+		_, err := db1.Put(context.Background(), chunk.ModePutUpload, ch)
+		if err != nil {
+			t.Fatal(err)
+		}
+		chunks[string(ch.Address())] = ch.Data()
+	}
+
+	var buf bytes.Buffer
+
+	c, err := db1.Export(&buf)
+	if err != nil {
+		t.Fatal(err)
+	}
+	wantChunksCount := int64(len(chunks))
+	if c != wantChunksCount {
+		t.Errorf("got export count %v, want %v", c, wantChunksCount)
+	}
+
+	db2, cleanup2 := newTestDB(t, nil)
+	defer cleanup2()
+
+	c, err = db2.Import(&buf, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c != wantChunksCount {
+		t.Errorf("got import count %v, want %v", c, wantChunksCount)
+	}
+
+	for a, want := range chunks {
+		addr := chunk.Address([]byte(a))
+		ch, err := db2.Get(context.Background(), chunk.ModeGetRequest, addr)
+		if err != nil {
+			t.Fatal(err)
+		}
+		got := ch.Data()
+		if !bytes.Equal(got, want) {
+			t.Fatalf("chunk %s: got data %x, want %x", addr.Hex(), got, want)
+		}
+	}
+}
diff --git a/swarm/storage/localstore/gc_test.go b/swarm/storage/localstore/gc_test.go
index 081e0af80..4a6e0a5f4 100644
--- a/swarm/storage/localstore/gc_test.go
+++ b/swarm/storage/localstore/gc_test.go
@@ -17,6 +17,7 @@
 package localstore
 
 import (
+	"context"
 	"io/ioutil"
 	"math/rand"
 	"os"
@@ -63,26 +64,23 @@ func testDB_collectGarbageWorker(t *testing.T) {
 	})()
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-	syncer := db.NewSetter(ModeSetSync)
-
 	addrs := make([]chunk.Address, 0)
 
 	// upload random chunks
 	for i := 0; i < chunkCount; i++ {
-		chunk := generateTestRandomChunk()
+		ch := generateTestRandomChunk()
 
-		err := uploader.Put(chunk)
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		err = syncer.Set(chunk.Address())
+		err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		addrs = append(addrs, chunk.Address())
+		addrs = append(addrs, ch.Address())
 	}
 
 	gcTarget := db.gcTarget()
@@ -110,7 +108,7 @@ func testDB_collectGarbageWorker(t *testing.T) {
 
 	// the first synced chunk should be removed
 	t.Run("get the first synced chunk", func(t *testing.T) {
-		_, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
+		_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
 		if err != chunk.ErrChunkNotFound {
 			t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
 		}
@@ -118,7 +116,7 @@ func testDB_collectGarbageWorker(t *testing.T) {
 
 	// last synced chunk should not be removed
 	t.Run("get most recent synced chunk", func(t *testing.T) {
-		_, err := db.NewGetter(ModeGetRequest).Get(addrs[len(addrs)-1])
+		_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[len(addrs)-1])
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -134,9 +132,6 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
 	})
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-	syncer := db.NewSetter(ModeSetSync)
-
 	testHookCollectGarbageChan := make(chan uint64)
 	defer setTestHookCollectGarbage(func(collectedCount uint64) {
 		testHookCollectGarbageChan <- collectedCount
@@ -146,19 +141,19 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
 
 	// upload random chunks just up to the capacity
 	for i := 0; i < int(db.capacity)-1; i++ {
-		chunk := generateTestRandomChunk()
+		ch := generateTestRandomChunk()
 
-		err := uploader.Put(chunk)
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		err = syncer.Set(chunk.Address())
+		err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		addrs = append(addrs, chunk.Address())
+		addrs = append(addrs, ch.Address())
 	}
 
 	// set update gc test hook to signal when
@@ -172,7 +167,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
 	// request the latest synced chunk
 	// to prioritize it in the gc index
 	// not to be collected
-	_, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
+	_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -191,11 +186,11 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
 	// upload and sync another chunk to trigger
 	// garbage collection
 	ch := generateTestRandomChunk()
-	err = uploader.Put(ch)
+	_, err = db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
-	err = syncer.Set(ch.Address())
+	err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -235,7 +230,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
 
 	// requested chunk should not be removed
 	t.Run("get requested chunk", func(t *testing.T) {
-		_, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
+		_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -243,7 +238,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
 
 	// the second synced chunk should be removed
 	t.Run("get gc-ed chunk", func(t *testing.T) {
-		_, err := db.NewGetter(ModeGetRequest).Get(addrs[1])
+		_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[1])
 		if err != chunk.ErrChunkNotFound {
 			t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
 		}
@@ -251,7 +246,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
 
 	// last synced chunk should not be removed
 	t.Run("get most recent synced chunk", func(t *testing.T) {
-		_, err := db.NewGetter(ModeGetRequest).Get(addrs[len(addrs)-1])
+		_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[len(addrs)-1])
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -275,20 +270,17 @@ func TestDB_gcSize(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	uploader := db.NewPutter(ModePutUpload)
-	syncer := db.NewSetter(ModeSetSync)
-
 	count := 100
 
 	for i := 0; i < count; i++ {
-		chunk := generateTestRandomChunk()
+		ch := generateTestRandomChunk()
 
-		err := uploader.Put(chunk)
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		err = syncer.Set(chunk.Address())
+		err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
diff --git a/swarm/storage/localstore/index_test.go b/swarm/storage/localstore/index_test.go
index cf19e4f6c..0f23aa10a 100644
--- a/swarm/storage/localstore/index_test.go
+++ b/swarm/storage/localstore/index_test.go
@@ -18,6 +18,7 @@ package localstore
 
 import (
 	"bytes"
+	"context"
 	"math/rand"
 	"testing"
 
@@ -35,29 +36,22 @@ func TestDB_pullIndex(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	chunkCount := 50
 
 	chunks := make([]testIndexChunk, chunkCount)
 
 	// upload random chunks
 	for i := 0; i < chunkCount; i++ {
-		chunk := generateTestRandomChunk()
+		ch := generateTestRandomChunk()
 
-		err := uploader.Put(chunk)
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
 
 		chunks[i] = testIndexChunk{
-			Chunk: chunk,
-			// this timestamp is not the same as in
-			// the index, but given that uploads
-			// are sequential and that only ordering
-			// of events matter, this information is
-			// sufficient
-			storeTimestamp: now(),
+			Chunk: ch,
+			binID: uint64(i),
 		}
 	}
 
@@ -70,10 +64,10 @@ func TestDB_pullIndex(t *testing.T) {
 		if poi > poj {
 			return false
 		}
-		if chunks[i].storeTimestamp < chunks[j].storeTimestamp {
+		if chunks[i].binID < chunks[j].binID {
 			return true
 		}
-		if chunks[i].storeTimestamp > chunks[j].storeTimestamp {
+		if chunks[i].binID > chunks[j].binID {
 			return false
 		}
 		return bytes.Compare(chunks[i].Address(), chunks[j].Address()) == -1
@@ -87,23 +81,21 @@ func TestDB_gcIndex(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	chunkCount := 50
 
 	chunks := make([]testIndexChunk, chunkCount)
 
 	// upload random chunks
 	for i := 0; i < chunkCount; i++ {
-		chunk := generateTestRandomChunk()
+		ch := generateTestRandomChunk()
 
-		err := uploader.Put(chunk)
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
 
 		chunks[i] = testIndexChunk{
-			Chunk: chunk,
+			Chunk: ch,
 		}
 	}
 
@@ -123,9 +115,9 @@ func TestDB_gcIndex(t *testing.T) {
 	})()
 
 	t.Run("request unsynced", func(t *testing.T) {
-		chunk := chunks[1]
+		ch := chunks[1]
 
-		_, err := db.NewGetter(ModeGetRequest).Get(chunk.Address())
+		_, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -140,9 +132,9 @@ func TestDB_gcIndex(t *testing.T) {
 	})
 
 	t.Run("sync one chunk", func(t *testing.T) {
-		chunk := chunks[0]
+		ch := chunks[0]
 
-		err := db.NewSetter(ModeSetSync).Set(chunk.Address())
+		err := db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -154,10 +146,8 @@ func TestDB_gcIndex(t *testing.T) {
 	})
 
 	t.Run("sync all chunks", func(t *testing.T) {
-		setter := db.NewSetter(ModeSetSync)
-
 		for i := range chunks {
-			err := setter.Set(chunks[i].Address())
+			err := db.Set(context.Background(), chunk.ModeSetSync, chunks[i].Address())
 			if err != nil {
 				t.Fatal(err)
 			}
@@ -171,7 +161,7 @@ func TestDB_gcIndex(t *testing.T) {
 	t.Run("request one chunk", func(t *testing.T) {
 		i := 6
 
-		_, err := db.NewGetter(ModeGetRequest).Get(chunks[i].Address())
+		_, err := db.Get(context.Background(), chunk.ModeGetRequest, chunks[i].Address())
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -189,14 +179,13 @@ func TestDB_gcIndex(t *testing.T) {
 	})
 
 	t.Run("random chunk request", func(t *testing.T) {
-		requester := db.NewGetter(ModeGetRequest)
 
 		rand.Shuffle(len(chunks), func(i, j int) {
 			chunks[i], chunks[j] = chunks[j], chunks[i]
 		})
 
-		for _, chunk := range chunks {
-			_, err := requester.Get(chunk.Address())
+		for _, ch := range chunks {
+			_, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 			if err != nil {
 				t.Fatal(err)
 			}
@@ -212,7 +201,7 @@ func TestDB_gcIndex(t *testing.T) {
 	t.Run("remove one chunk", func(t *testing.T) {
 		i := 3
 
-		err := db.NewSetter(modeSetRemove).Set(chunks[i].Address())
+		err := db.Set(context.Background(), chunk.ModeSetRemove, chunks[i].Address())
 		if err != nil {
 			t.Fatal(err)
 		}
diff --git a/swarm/storage/localstore/localstore.go b/swarm/storage/localstore/localstore.go
index 98d4c7881..56a6d10e6 100644
--- a/swarm/storage/localstore/localstore.go
+++ b/swarm/storage/localstore/localstore.go
@@ -28,6 +28,9 @@ import (
 	"github.com/ethereum/go-ethereum/swarm/storage/mock"
 )
 
+// DB implements chunk.Store.
+var _ chunk.Store = &DB{}
+
 var (
 	// ErrInvalidMode is retuned when an unknown Mode
 	// is provided to the function.
@@ -69,6 +72,10 @@ type DB struct {
 	pullTriggers   map[uint8][]chan struct{}
 	pullTriggersMu sync.RWMutex
 
+	// binIDs stores the latest chunk serial ID for very
+	// proximity order bin
+	binIDs shed.Uint64Vector
+
 	// garbage collection index
 	gcIndex shed.Index
 
@@ -124,7 +131,10 @@ type Options struct {
 // One goroutine for writing batches is created.
 func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
 	if o == nil {
-		o = new(Options)
+		// default options
+		o = &Options{
+			Capacity: 5000000,
+		}
 	}
 	db = &DB{
 		capacity: o.Capacity,
@@ -148,11 +158,23 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
 	if err != nil {
 		return nil, err
 	}
+
 	// Identify current storage schema by arbitrary name.
 	db.schemaName, err = db.shed.NewStringField("schema-name")
 	if err != nil {
 		return nil, err
 	}
+	schemaName, err := db.schemaName.Get()
+	if err != nil {
+		return nil, err
+	}
+	if schemaName == "" {
+		// initial new localstore run
+		err := db.schemaName.Put(DbSchemaSanctuary)
+		if err != nil {
+			return nil, err
+		}
+	}
 	// Persist gc size.
 	db.gcSize, err = db.shed.NewUint64Field("gc-size")
 	if err != nil {
@@ -165,8 +187,9 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
 	)
 	if o.MockStore != nil {
 		encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
-			b := make([]byte, 8)
-			binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+			b := make([]byte, 16)
+			binary.BigEndian.PutUint64(b[:8], fields.BinID)
+			binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
 			err = o.MockStore.Put(fields.Address, fields.Data)
 			if err != nil {
 				return nil, err
@@ -174,25 +197,28 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
 			return b, nil
 		}
 		decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
-			e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+			e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
+			e.BinID = binary.BigEndian.Uint64(value[:8])
 			e.Data, err = o.MockStore.Get(keyItem.Address)
 			return e, err
 		}
 	} else {
 		encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
-			b := make([]byte, 8)
-			binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+			b := make([]byte, 16)
+			binary.BigEndian.PutUint64(b[:8], fields.BinID)
+			binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
 			value = append(b, fields.Data...)
 			return value, nil
 		}
 		decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
-			e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
-			e.Data = value[8:]
+			e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
+			e.BinID = binary.BigEndian.Uint64(value[:8])
+			e.Data = value[16:]
 			return e, nil
 		}
 	}
-	// Index storing actual chunk address, data and store timestamp.
-	db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
+	// Index storing actual chunk address, data and bin id.
+	db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{
 		EncodeKey: func(fields shed.Item) (key []byte, err error) {
 			return fields.Address, nil
 		},
@@ -230,33 +256,37 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
 		return nil, err
 	}
 	// pull index allows history and live syncing per po bin
-	db.pullIndex, err = db.shed.NewIndex("PO|StoredTimestamp|Hash->nil", shed.IndexFuncs{
+	db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash", shed.IndexFuncs{
 		EncodeKey: func(fields shed.Item) (key []byte, err error) {
 			key = make([]byte, 41)
 			key[0] = db.po(fields.Address)
-			binary.BigEndian.PutUint64(key[1:9], uint64(fields.StoreTimestamp))
-			copy(key[9:], fields.Address[:])
+			binary.BigEndian.PutUint64(key[1:9], fields.BinID)
 			return key, nil
 		},
 		DecodeKey: func(key []byte) (e shed.Item, err error) {
-			e.Address = key[9:]
-			e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[1:9]))
+			e.BinID = binary.BigEndian.Uint64(key[1:9])
 			return e, nil
 		},
 		EncodeValue: func(fields shed.Item) (value []byte, err error) {
-			return nil, nil
+			return fields.Address, nil
 		},
 		DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
+			e.Address = value
 			return e, nil
 		},
 	})
 	if err != nil {
 		return nil, err
 	}
+	// create a vector for bin IDs
+	db.binIDs, err = db.shed.NewUint64Vector("bin-ids")
+	if err != nil {
+		return nil, err
+	}
 	// create a pull syncing triggers used by SubscribePull function
 	db.pullTriggers = make(map[uint8][]chan struct{})
 	// push index contains as yet unsynced chunks
-	db.pushIndex, err = db.shed.NewIndex("StoredTimestamp|Hash->nil", shed.IndexFuncs{
+	db.pushIndex, err = db.shed.NewIndex("StoreTimestamp|Hash->Tags", shed.IndexFuncs{
 		EncodeKey: func(fields shed.Item) (key []byte, err error) {
 			key = make([]byte, 40)
 			binary.BigEndian.PutUint64(key[:8], uint64(fields.StoreTimestamp))
@@ -281,17 +311,17 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
 	// create a push syncing triggers used by SubscribePush function
 	db.pushTriggers = make([]chan struct{}, 0)
 	// gc index for removable chunk ordered by ascending last access time
-	db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|StoredTimestamp|Hash->nil", shed.IndexFuncs{
+	db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->nil", shed.IndexFuncs{
 		EncodeKey: func(fields shed.Item) (key []byte, err error) {
 			b := make([]byte, 16, 16+len(fields.Address))
 			binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
-			binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
+			binary.BigEndian.PutUint64(b[8:16], fields.BinID)
 			key = append(b, fields.Address...)
 			return key, nil
 		},
 		DecodeKey: func(key []byte) (e shed.Item, err error) {
 			e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
-			e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
+			e.BinID = binary.BigEndian.Uint64(key[8:16])
 			e.Address = key[16:]
 			return e, nil
 		},
diff --git a/swarm/storage/localstore/localstore_test.go b/swarm/storage/localstore/localstore_test.go
index 42e762587..6dbc4b7ad 100644
--- a/swarm/storage/localstore/localstore_test.go
+++ b/swarm/storage/localstore/localstore_test.go
@@ -18,6 +18,7 @@ package localstore
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 	"io/ioutil"
 	"math/rand"
@@ -59,23 +60,23 @@ func TestDB(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	got, err := db.NewGetter(ModeGetRequest).Get(chunk.Address())
+	got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if !bytes.Equal(got.Address(), chunk.Address()) {
-		t.Errorf("got address %x, want %x", got.Address(), chunk.Address())
+	if !bytes.Equal(got.Address(), ch.Address()) {
+		t.Errorf("got address %x, want %x", got.Address(), ch.Address())
 	}
-	if !bytes.Equal(got.Data(), chunk.Data()) {
-		t.Errorf("got data %x, want %x", got.Data(), chunk.Data())
+	if !bytes.Equal(got.Data(), ch.Data()) {
+		t.Errorf("got data %x, want %x", got.Data(), ch.Data())
 	}
 }
 
@@ -113,19 +114,17 @@ func TestDB_updateGCSem(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	getter := db.NewGetter(ModeGetRequest)
-
 	// get more chunks then maxParallelUpdateGC
 	// in time shorter then updateGCSleep
 	for i := 0; i < 5; i++ {
-		_, err = getter.Get(chunk.Address())
+		_, err = db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -237,71 +236,71 @@ func newRetrieveIndexesTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTim
 
 // newRetrieveIndexesTestWithAccess returns a test function that validates if the right
 // chunk values are in the retrieval indexes when access time must be stored.
-func newRetrieveIndexesTestWithAccess(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
+func newRetrieveIndexesTestWithAccess(db *DB, ch chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
 	return func(t *testing.T) {
-		item, err := db.retrievalDataIndex.Get(addressToItem(chunk.Address()))
+		item, err := db.retrievalDataIndex.Get(addressToItem(ch.Address()))
 		if err != nil {
 			t.Fatal(err)
 		}
-		validateItem(t, item, chunk.Address(), chunk.Data(), storeTimestamp, 0)
+		validateItem(t, item, ch.Address(), ch.Data(), storeTimestamp, 0)
 
 		if accessTimestamp > 0 {
-			item, err = db.retrievalAccessIndex.Get(addressToItem(chunk.Address()))
+			item, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
 			if err != nil {
 				t.Fatal(err)
 			}
-			validateItem(t, item, chunk.Address(), nil, 0, accessTimestamp)
+			validateItem(t, item, ch.Address(), nil, 0, accessTimestamp)
 		}
 	}
 }
 
 // newPullIndexTest returns a test function that validates if the right
 // chunk values are in the pull index.
-func newPullIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
+func newPullIndexTest(db *DB, ch chunk.Chunk, binID uint64, wantError error) func(t *testing.T) {
 	return func(t *testing.T) {
 		item, err := db.pullIndex.Get(shed.Item{
-			Address:        chunk.Address(),
-			StoreTimestamp: storeTimestamp,
+			Address: ch.Address(),
+			BinID:   binID,
 		})
 		if err != wantError {
 			t.Errorf("got error %v, want %v", err, wantError)
 		}
 		if err == nil {
-			validateItem(t, item, chunk.Address(), nil, storeTimestamp, 0)
+			validateItem(t, item, ch.Address(), nil, 0, 0)
 		}
 	}
 }
 
 // newPushIndexTest returns a test function that validates if the right
 // chunk values are in the push index.
-func newPushIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
+func newPushIndexTest(db *DB, ch chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
 	return func(t *testing.T) {
 		item, err := db.pushIndex.Get(shed.Item{
-			Address:        chunk.Address(),
+			Address:        ch.Address(),
 			StoreTimestamp: storeTimestamp,
 		})
 		if err != wantError {
 			t.Errorf("got error %v, want %v", err, wantError)
 		}
 		if err == nil {
-			validateItem(t, item, chunk.Address(), nil, storeTimestamp, 0)
+			validateItem(t, item, ch.Address(), nil, storeTimestamp, 0)
 		}
 	}
 }
 
 // newGCIndexTest returns a test function that validates if the right
 // chunk values are in the push index.
-func newGCIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
+func newGCIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64, binID uint64) func(t *testing.T) {
 	return func(t *testing.T) {
 		item, err := db.gcIndex.Get(shed.Item{
 			Address:         chunk.Address(),
-			StoreTimestamp:  storeTimestamp,
+			BinID:           binID,
 			AccessTimestamp: accessTimestamp,
 		})
 		if err != nil {
 			t.Fatal(err)
 		}
-		validateItem(t, item, chunk.Address(), nil, storeTimestamp, accessTimestamp)
+		validateItem(t, item, chunk.Address(), nil, 0, accessTimestamp)
 	}
 }
 
@@ -349,7 +348,7 @@ func newIndexGCSizeTest(db *DB) func(t *testing.T) {
 // in database. It is used for index values validations.
 type testIndexChunk struct {
 	chunk.Chunk
-	storeTimestamp int64
+	binID uint64
 }
 
 // testItemsOrder tests the order of chunks in the index. If sortFunc is not nil,
diff --git a/swarm/storage/localstore/mode_get.go b/swarm/storage/localstore/mode_get.go
index a6353e141..0df0e9b7d 100644
--- a/swarm/storage/localstore/mode_get.go
+++ b/swarm/storage/localstore/mode_get.go
@@ -17,45 +17,21 @@
 package localstore
 
 import (
+	"context"
+
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/shed"
 	"github.com/syndtr/goleveldb/leveldb"
 )
 
-// ModeGet enumerates different Getter modes.
-type ModeGet int
-
-// Getter modes.
-const (
-	// ModeGetRequest: when accessed for retrieval
-	ModeGetRequest ModeGet = iota
-	// ModeGetSync: when accessed for syncing or proof of custody request
-	ModeGetSync
-)
-
-// Getter provides Get method to retrieve Chunks
-// from database.
-type Getter struct {
-	db   *DB
-	mode ModeGet
-}
-
-// NewGetter returns a new Getter on database
-// with a specific Mode.
-func (db *DB) NewGetter(mode ModeGet) *Getter {
-	return &Getter{
-		mode: mode,
-		db:   db,
-	}
-}
-
 // Get returns a chunk from the database. If the chunk is
 // not found chunk.ErrChunkNotFound will be returned.
 // All required indexes will be updated required by the
-// Getter Mode.
-func (g *Getter) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
-	out, err := g.db.get(g.mode, addr)
+// Getter Mode. Get is required to implement chunk.Store
+// interface.
+func (db *DB) Get(_ context.Context, mode chunk.ModeGet, addr chunk.Address) (ch chunk.Chunk, err error) {
+	out, err := db.get(mode, addr)
 	if err != nil {
 		if err == leveldb.ErrNotFound {
 			return nil, chunk.ErrChunkNotFound
@@ -67,7 +43,7 @@ func (g *Getter) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
 
 // get returns Item from the retrieval index
 // and updates other indexes.
-func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
+func (db *DB) get(mode chunk.ModeGet, addr chunk.Address) (out shed.Item, err error) {
 	item := addressToItem(addr)
 
 	out, err = db.retrievalDataIndex.Get(item)
@@ -76,7 +52,7 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
 	}
 	switch mode {
 	// update the access timestamp and gc index
-	case ModeGetRequest:
+	case chunk.ModeGetRequest:
 		if db.updateGCSem != nil {
 			// wait before creating new goroutines
 			// if updateGCSem buffer id full
@@ -101,7 +77,8 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
 		}()
 
 	// no updates to indexes
-	case ModeGetSync:
+	case chunk.ModeGetSync:
+	case chunk.ModeGetLookup:
 	default:
 		return out, ErrInvalidMode
 	}
diff --git a/swarm/storage/localstore/mode_get_test.go b/swarm/storage/localstore/mode_get_test.go
index 28a70ee0c..217fa5d2d 100644
--- a/swarm/storage/localstore/mode_get_test.go
+++ b/swarm/storage/localstore/mode_get_test.go
@@ -18,8 +18,11 @@ package localstore
 
 import (
 	"bytes"
+	"context"
 	"testing"
 	"time"
+
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 )
 
 // TestModeGetRequest validates ModeGetRequest index values on the provided DB.
@@ -32,15 +35,13 @@ func TestModeGetRequest(t *testing.T) {
 		return uploadTimestamp
 	})()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	requester := db.NewGetter(ModeGetRequest)
-
 	// set update gc test hook to signal when
 	// update gc goroutine is done by sending to
 	// testHookUpdateGCChan channel, which is
@@ -52,22 +53,22 @@ func TestModeGetRequest(t *testing.T) {
 	})()
 
 	t.Run("get unsynced", func(t *testing.T) {
-		got, err := requester.Get(chunk.Address())
+		got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
 		// wait for update gc goroutine to be done
 		<-testHookUpdateGCChan
 
-		if !bytes.Equal(got.Address(), chunk.Address()) {
-			t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+		if !bytes.Equal(got.Address(), ch.Address()) {
+			t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
 		}
 
-		if !bytes.Equal(got.Data(), chunk.Data()) {
-			t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+		if !bytes.Equal(got.Data(), ch.Data()) {
+			t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
 		}
 
-		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, 0))
+		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, 0))
 
 		t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
 
@@ -75,30 +76,30 @@ func TestModeGetRequest(t *testing.T) {
 	})
 
 	// set chunk to synced state
-	err = db.NewSetter(ModeSetSync).Set(chunk.Address())
+	err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	t.Run("first get", func(t *testing.T) {
-		got, err := requester.Get(chunk.Address())
+		got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
 		// wait for update gc goroutine to be done
 		<-testHookUpdateGCChan
 
-		if !bytes.Equal(got.Address(), chunk.Address()) {
-			t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+		if !bytes.Equal(got.Address(), ch.Address()) {
+			t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
 		}
 
-		if !bytes.Equal(got.Data(), chunk.Data()) {
-			t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+		if !bytes.Equal(got.Data(), ch.Data()) {
+			t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
 		}
 
-		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, uploadTimestamp))
+		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, uploadTimestamp))
 
-		t.Run("gc index", newGCIndexTest(db, chunk, uploadTimestamp, uploadTimestamp))
+		t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, uploadTimestamp, 1))
 
 		t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
 
@@ -111,24 +112,24 @@ func TestModeGetRequest(t *testing.T) {
 			return accessTimestamp
 		})()
 
-		got, err := requester.Get(chunk.Address())
+		got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
 		// wait for update gc goroutine to be done
 		<-testHookUpdateGCChan
 
-		if !bytes.Equal(got.Address(), chunk.Address()) {
-			t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+		if !bytes.Equal(got.Address(), ch.Address()) {
+			t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
 		}
 
-		if !bytes.Equal(got.Data(), chunk.Data()) {
-			t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+		if !bytes.Equal(got.Data(), ch.Data()) {
+			t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
 		}
 
-		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, accessTimestamp))
+		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, accessTimestamp))
 
-		t.Run("gc index", newGCIndexTest(db, chunk, uploadTimestamp, accessTimestamp))
+		t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, accessTimestamp, 1))
 
 		t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
 
@@ -146,27 +147,27 @@ func TestModeGetSync(t *testing.T) {
 		return uploadTimestamp
 	})()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	got, err := db.NewGetter(ModeGetSync).Get(chunk.Address())
+	got, err := db.Get(context.Background(), chunk.ModeGetSync, ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if !bytes.Equal(got.Address(), chunk.Address()) {
-		t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+	if !bytes.Equal(got.Address(), ch.Address()) {
+		t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
 	}
 
-	if !bytes.Equal(got.Data(), chunk.Data()) {
-		t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+	if !bytes.Equal(got.Data(), ch.Data()) {
+		t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
 	}
 
-	t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, 0))
+	t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, 0))
 
 	t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
 
diff --git a/swarm/storage/localstore/mode_has.go b/swarm/storage/localstore/mode_has.go
index 90feaceef..fea8a50bf 100644
--- a/swarm/storage/localstore/mode_has.go
+++ b/swarm/storage/localstore/mode_has.go
@@ -17,23 +17,12 @@
 package localstore
 
 import (
+	"context"
+
 	"github.com/ethereum/go-ethereum/swarm/chunk"
 )
 
-// Hasser provides Has method to retrieve Chunks
-// from database.
-type Hasser struct {
-	db *DB
-}
-
-// NewHasser returns a new Hasser on database.
-func (db *DB) NewHasser() *Hasser {
-	return &Hasser{
-		db: db,
-	}
-}
-
 // Has returns true if the chunk is stored in database.
-func (h *Hasser) Has(addr chunk.Address) (bool, error) {
-	return h.db.retrievalDataIndex.Has(addressToItem(addr))
+func (db *DB) Has(_ context.Context, addr chunk.Address) (bool, error) {
+	return db.retrievalDataIndex.Has(addressToItem(addr))
 }
diff --git a/swarm/storage/localstore/mode_has_test.go b/swarm/storage/localstore/mode_has_test.go
index 332616ca2..043b21a2b 100644
--- a/swarm/storage/localstore/mode_has_test.go
+++ b/swarm/storage/localstore/mode_has_test.go
@@ -17,7 +17,10 @@
 package localstore
 
 import (
+	"context"
 	"testing"
+
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 )
 
 // TestHas validates that Hasser is returning true for
@@ -26,16 +29,14 @@ func TestHas(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	hasser := db.NewHasser()
-
-	has, err := hasser.Has(chunk.Address())
+	has, err := db.Has(context.Background(), ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -45,7 +46,7 @@ func TestHas(t *testing.T) {
 
 	missingChunk := generateTestRandomChunk()
 
-	has, err = hasser.Has(missingChunk.Address())
+	has, err = db.Has(context.Background(), missingChunk.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/swarm/storage/localstore/mode_put.go b/swarm/storage/localstore/mode_put.go
index 1599ca8e3..488e4d8e1 100644
--- a/swarm/storage/localstore/mode_put.go
+++ b/swarm/storage/localstore/mode_put.go
@@ -17,44 +17,19 @@
 package localstore
 
 import (
+	"context"
+
 	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/shed"
 	"github.com/syndtr/goleveldb/leveldb"
 )
 
-// ModePut enumerates different Putter modes.
-type ModePut int
-
-// Putter modes.
-const (
-	// ModePutRequest: when a chunk is received as a result of retrieve request and delivery
-	ModePutRequest ModePut = iota
-	// ModePutSync: when a chunk is received via syncing
-	ModePutSync
-	// ModePutUpload: when a chunk is created by local upload
-	ModePutUpload
-)
-
-// Putter provides Put method to store Chunks
-// to database.
-type Putter struct {
-	db   *DB
-	mode ModePut
-}
-
-// NewPutter returns a new Putter on database
-// with a specific Mode.
-func (db *DB) NewPutter(mode ModePut) *Putter {
-	return &Putter{
-		mode: mode,
-		db:   db,
-	}
-}
-
 // Put stores the Chunk to database and depending
 // on the Putter mode, it updates required indexes.
-func (p *Putter) Put(ch chunk.Chunk) (err error) {
-	return p.db.put(p.mode, chunkToItem(ch))
+// Put is required to implement chunk.Store
+// interface.
+func (db *DB) Put(_ context.Context, mode chunk.ModePut, ch chunk.Chunk) (exists bool, err error) {
+	return db.put(mode, chunkToItem(ch))
 }
 
 // put stores Item to database and updates other
@@ -62,7 +37,7 @@ func (p *Putter) Put(ch chunk.Chunk) (err error) {
 // of this function for the same address in parallel.
 // Item fields Address and Data must not be
 // with their nil values.
-func (db *DB) put(mode ModePut, item shed.Item) (err error) {
+func (db *DB) put(mode chunk.ModePut, item shed.Item) (exists bool, err error) {
 	// protect parallel updates
 	db.batchMu.Lock()
 	defer db.batchMu.Unlock()
@@ -76,7 +51,7 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
 	var triggerPushFeed bool // signal push feed subscriptions to iterate
 
 	switch mode {
-	case ModePutRequest:
+	case chunk.ModePutRequest:
 		// put to indexes: retrieve, gc; it does not enter the syncpool
 
 		// check if the chunk already is in the database
@@ -84,20 +59,25 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
 		i, err := db.retrievalAccessIndex.Get(item)
 		switch err {
 		case nil:
+			exists = true
 			item.AccessTimestamp = i.AccessTimestamp
 		case leveldb.ErrNotFound:
+			exists = false
 			// no chunk accesses
 		default:
-			return err
+			return false, err
 		}
 		i, err = db.retrievalDataIndex.Get(item)
 		switch err {
 		case nil:
+			exists = true
 			item.StoreTimestamp = i.StoreTimestamp
+			item.BinID = i.BinID
 		case leveldb.ErrNotFound:
 			// no chunk accesses
+			exists = false
 		default:
-			return err
+			return false, err
 		}
 		if item.AccessTimestamp != 0 {
 			// delete current entry from the gc index
@@ -107,6 +87,12 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
 		if item.StoreTimestamp == 0 {
 			item.StoreTimestamp = now()
 		}
+		if item.BinID == 0 {
+			item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
+			if err != nil {
+				return false, err
+			}
+		}
 		// update access timestamp
 		item.AccessTimestamp = now()
 		// update retrieve access index
@@ -117,36 +103,56 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
 
 		db.retrievalDataIndex.PutInBatch(batch, item)
 
-	case ModePutUpload:
+	case chunk.ModePutUpload:
 		// put to indexes: retrieve, push, pull
 
-		item.StoreTimestamp = now()
-		db.retrievalDataIndex.PutInBatch(batch, item)
-		db.pullIndex.PutInBatch(batch, item)
-		triggerPullFeed = true
-		db.pushIndex.PutInBatch(batch, item)
-		triggerPushFeed = true
+		exists, err = db.retrievalDataIndex.Has(item)
+		if err != nil {
+			return false, err
+		}
+		if !exists {
+			item.StoreTimestamp = now()
+			item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
+			if err != nil {
+				return false, err
+			}
+			db.retrievalDataIndex.PutInBatch(batch, item)
+			db.pullIndex.PutInBatch(batch, item)
+			triggerPullFeed = true
+			db.pushIndex.PutInBatch(batch, item)
+			triggerPushFeed = true
+		}
 
-	case ModePutSync:
+	case chunk.ModePutSync:
 		// put to indexes: retrieve, pull
 
-		item.StoreTimestamp = now()
-		db.retrievalDataIndex.PutInBatch(batch, item)
-		db.pullIndex.PutInBatch(batch, item)
-		triggerPullFeed = true
+		exists, err = db.retrievalDataIndex.Has(item)
+		if err != nil {
+			return exists, err
+		}
+		if !exists {
+			item.StoreTimestamp = now()
+			item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
+			if err != nil {
+				return false, err
+			}
+			db.retrievalDataIndex.PutInBatch(batch, item)
+			db.pullIndex.PutInBatch(batch, item)
+			triggerPullFeed = true
+		}
 
 	default:
-		return ErrInvalidMode
+		return false, ErrInvalidMode
 	}
 
 	err = db.incGCSizeInBatch(batch, gcSizeChange)
 	if err != nil {
-		return err
+		return false, err
 	}
 
 	err = db.shed.WriteBatch(batch)
 	if err != nil {
-		return err
+		return false, err
 	}
 	if triggerPullFeed {
 		db.triggerPullSubscriptions(db.po(item.Address))
@@ -154,5 +160,5 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
 	if triggerPushFeed {
 		db.triggerPushSubscriptions()
 	}
-	return nil
+	return exists, nil
 }
diff --git a/swarm/storage/localstore/mode_put_test.go b/swarm/storage/localstore/mode_put_test.go
index 8ecae1d2e..5376aa8b3 100644
--- a/swarm/storage/localstore/mode_put_test.go
+++ b/swarm/storage/localstore/mode_put_test.go
@@ -18,6 +18,7 @@ package localstore
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 	"sync"
 	"testing"
@@ -31,9 +32,7 @@ func TestModePutRequest(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	putter := db.NewPutter(ModePutRequest)
-
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
 	// keep the record when the chunk is stored
 	var storeTimestamp int64
@@ -46,12 +45,12 @@ func TestModePutRequest(t *testing.T) {
 
 		storeTimestamp = wantTimestamp
 
-		err := putter.Put(chunk)
+		_, err := db.Put(context.Background(), chunk.ModePutRequest, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, wantTimestamp, wantTimestamp))
+		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, wantTimestamp, wantTimestamp))
 
 		t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
 
@@ -64,12 +63,12 @@ func TestModePutRequest(t *testing.T) {
 			return wantTimestamp
 		})()
 
-		err := putter.Put(chunk)
+		_, err := db.Put(context.Background(), chunk.ModePutRequest, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, storeTimestamp, wantTimestamp))
+		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, storeTimestamp, wantTimestamp))
 
 		t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
 
@@ -87,16 +86,16 @@ func TestModePutSync(t *testing.T) {
 		return wantTimestamp
 	})()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutSync).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutSync, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
+	t.Run("retrieve indexes", newRetrieveIndexesTest(db, ch, wantTimestamp, 0))
 
-	t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
+	t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
 }
 
 // TestModePutUpload validates ModePutUpload index values on the provided DB.
@@ -109,18 +108,18 @@ func TestModePutUpload(t *testing.T) {
 		return wantTimestamp
 	})()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
+	t.Run("retrieve indexes", newRetrieveIndexesTest(db, ch, wantTimestamp, 0))
 
-	t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
+	t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
 
-	t.Run("push index", newPushIndexTest(db, chunk, wantTimestamp, nil))
+	t.Run("push index", newPushIndexTest(db, ch, wantTimestamp, nil))
 }
 
 // TestModePutUpload_parallel uploads chunks in parallel
@@ -140,14 +139,13 @@ func TestModePutUpload_parallel(t *testing.T) {
 	// start uploader workers
 	for i := 0; i < workerCount; i++ {
 		go func(i int) {
-			uploader := db.NewPutter(ModePutUpload)
 			for {
 				select {
-				case chunk, ok := <-chunkChan:
+				case ch, ok := <-chunkChan:
 					if !ok {
 						return
 					}
-					err := uploader.Put(chunk)
+					_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 					select {
 					case errChan <- err:
 					case <-doneChan:
@@ -188,21 +186,85 @@ func TestModePutUpload_parallel(t *testing.T) {
 	}
 
 	// get every chunk and validate its data
-	getter := db.NewGetter(ModeGetRequest)
-
 	chunksMu.Lock()
 	defer chunksMu.Unlock()
-	for _, chunk := range chunks {
-		got, err := getter.Get(chunk.Address())
+	for _, ch := range chunks {
+		got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
 		if err != nil {
 			t.Fatal(err)
 		}
-		if !bytes.Equal(got.Data(), chunk.Data()) {
-			t.Fatalf("got chunk %s data %x, want %x", chunk.Address().Hex(), got.Data(), chunk.Data())
+		if !bytes.Equal(got.Data(), ch.Data()) {
+			t.Fatalf("got chunk %s data %x, want %x", ch.Address().Hex(), got.Data(), ch.Data())
 		}
 	}
 }
 
+// TestModePut_sameChunk puts the same chunk multiple times
+// and validates that all relevant indexes have only one item
+// in them.
+func TestModePut_sameChunk(t *testing.T) {
+	ch := generateTestRandomChunk()
+
+	for _, tc := range []struct {
+		name      string
+		mode      chunk.ModePut
+		pullIndex bool
+		pushIndex bool
+	}{
+		{
+			name:      "ModePutRequest",
+			mode:      chunk.ModePutRequest,
+			pullIndex: false,
+			pushIndex: false,
+		},
+		{
+			name:      "ModePutUpload",
+			mode:      chunk.ModePutUpload,
+			pullIndex: true,
+			pushIndex: true,
+		},
+		{
+			name:      "ModePutSync",
+			mode:      chunk.ModePutSync,
+			pullIndex: true,
+			pushIndex: false,
+		},
+	} {
+		t.Run(tc.name, func(t *testing.T) {
+			db, cleanupFunc := newTestDB(t, nil)
+			defer cleanupFunc()
+
+			for i := 0; i < 10; i++ {
+				exists, err := db.Put(context.Background(), tc.mode, ch)
+				if err != nil {
+					t.Fatal(err)
+				}
+				switch exists {
+				case false:
+					if i != 0 {
+						t.Fatal("should not exist only on first Put")
+					}
+				case true:
+					if i == 0 {
+						t.Fatal("should exist on all cases other than the first one")
+					}
+				}
+
+				count := func(b bool) (c int) {
+					if b {
+						return 1
+					}
+					return 0
+				}
+
+				newItemsCountTest(db.retrievalDataIndex, 1)(t)
+				newItemsCountTest(db.pullIndex, count(tc.pullIndex))(t)
+				newItemsCountTest(db.pushIndex, count(tc.pushIndex))(t)
+			}
+		})
+	}
+}
+
 // BenchmarkPutUpload runs a series of benchmarks that upload
 // a specific number of chunks in parallel.
 //
@@ -270,7 +332,6 @@ func benchmarkPutUpload(b *testing.B, o *Options, count, maxParallelUploads int)
 	db, cleanupFunc := newTestDB(b, o)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
 	chunks := make([]chunk.Chunk, count)
 	for i := 0; i < count; i++ {
 		chunks[i] = generateTestRandomChunk()
@@ -286,7 +347,8 @@ func benchmarkPutUpload(b *testing.B, o *Options, count, maxParallelUploads int)
 			go func(i int) {
 				defer func() { <-sem }()
 
-				errs <- uploader.Put(chunks[i])
+				_, err := db.Put(context.Background(), chunk.ModePutUpload, chunks[i])
+				errs <- err
 			}(i)
 		}
 	}()
diff --git a/swarm/storage/localstore/mode_set.go b/swarm/storage/localstore/mode_set.go
index 83fcbea52..13e98d1ec 100644
--- a/swarm/storage/localstore/mode_set.go
+++ b/swarm/storage/localstore/mode_set.go
@@ -17,51 +17,25 @@
 package localstore
 
 import (
+	"context"
+
 	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/syndtr/goleveldb/leveldb"
 )
 
-// ModeSet enumerates different Setter modes.
-type ModeSet int
-
-// Setter modes.
-const (
-	// ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
-	ModeSetAccess ModeSet = iota
-	// ModeSetSync: when push sync receipt is received
-	ModeSetSync
-	// modeSetRemove: when GC-d
-	// unexported as no external packages should remove chunks from database
-	modeSetRemove
-)
-
-// Setter sets the state of a particular
-// Chunk in database by changing indexes.
-type Setter struct {
-	db   *DB
-	mode ModeSet
-}
-
-// NewSetter returns a new Setter on database
-// with a specific Mode.
-func (db *DB) NewSetter(mode ModeSet) *Setter {
-	return &Setter{
-		mode: mode,
-		db:   db,
-	}
-}
-
 // Set updates database indexes for a specific
 // chunk represented by the address.
-func (s *Setter) Set(addr chunk.Address) (err error) {
-	return s.db.set(s.mode, addr)
+// Set is required to implement chunk.Store
+// interface.
+func (db *DB) Set(_ context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+	return db.set(mode, addr)
 }
 
 // set updates database indexes for a specific
 // chunk represented by the address.
 // It acquires lockAddr to protect two calls
 // of this function for the same address in parallel.
-func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
+func (db *DB) set(mode chunk.ModeSet, addr chunk.Address) (err error) {
 	// protect parallel updates
 	db.batchMu.Lock()
 	defer db.batchMu.Unlock()
@@ -76,7 +50,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
 	item := addressToItem(addr)
 
 	switch mode {
-	case ModeSetAccess:
+	case chunk.ModeSetAccess:
 		// add to pull, insert to gc
 
 		// need to get access timestamp here as it is not
@@ -87,9 +61,14 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
 		switch err {
 		case nil:
 			item.StoreTimestamp = i.StoreTimestamp
+			item.BinID = i.BinID
 		case leveldb.ErrNotFound:
 			db.pushIndex.DeleteInBatch(batch, item)
 			item.StoreTimestamp = now()
+			item.BinID, err = db.binIDs.Inc(uint64(db.po(item.Address)))
+			if err != nil {
+				return err
+			}
 		default:
 			return err
 		}
@@ -112,7 +91,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
 		db.gcIndex.PutInBatch(batch, item)
 		gcSizeChange++
 
-	case ModeSetSync:
+	case chunk.ModeSetSync:
 		// delete from push, insert to gc
 
 		// need to get access timestamp here as it is not
@@ -131,6 +110,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
 			return err
 		}
 		item.StoreTimestamp = i.StoreTimestamp
+		item.BinID = i.BinID
 
 		i, err = db.retrievalAccessIndex.Get(item)
 		switch err {
@@ -149,7 +129,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
 		db.gcIndex.PutInBatch(batch, item)
 		gcSizeChange++
 
-	case modeSetRemove:
+	case chunk.ModeSetRemove:
 		// delete from retrieve, pull, gc
 
 		// need to get access timestamp here as it is not
@@ -169,6 +149,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
 			return err
 		}
 		item.StoreTimestamp = i.StoreTimestamp
+		item.BinID = i.BinID
 
 		db.retrievalDataIndex.DeleteInBatch(batch, item)
 		db.retrievalAccessIndex.DeleteInBatch(batch, item)
diff --git a/swarm/storage/localstore/mode_set_test.go b/swarm/storage/localstore/mode_set_test.go
index 674aaabec..9ba62cd20 100644
--- a/swarm/storage/localstore/mode_set_test.go
+++ b/swarm/storage/localstore/mode_set_test.go
@@ -17,9 +17,11 @@
 package localstore
 
 import (
+	"context"
 	"testing"
 	"time"
 
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/syndtr/goleveldb/leveldb"
 )
 
@@ -28,23 +30,23 @@ func TestModeSetAccess(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
 	wantTimestamp := time.Now().UTC().UnixNano()
 	defer setNow(func() (t int64) {
 		return wantTimestamp
 	})()
 
-	err := db.NewSetter(ModeSetAccess).Set(chunk.Address())
+	err := db.Set(context.Background(), chunk.ModeSetAccess, ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
+	t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
 
 	t.Run("pull index count", newItemsCountTest(db.pullIndex, 1))
 
-	t.Run("gc index", newGCIndexTest(db, chunk, wantTimestamp, wantTimestamp))
+	t.Run("gc index", newGCIndexTest(db, ch, wantTimestamp, wantTimestamp, 1))
 
 	t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
 
@@ -56,28 +58,28 @@ func TestModeSetSync(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
 	wantTimestamp := time.Now().UTC().UnixNano()
 	defer setNow(func() (t int64) {
 		return wantTimestamp
 	})()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	err = db.NewSetter(ModeSetSync).Set(chunk.Address())
+	err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, wantTimestamp, wantTimestamp))
+	t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, wantTimestamp, wantTimestamp))
 
-	t.Run("push index", newPushIndexTest(db, chunk, wantTimestamp, leveldb.ErrNotFound))
+	t.Run("push index", newPushIndexTest(db, ch, wantTimestamp, leveldb.ErrNotFound))
 
-	t.Run("gc index", newGCIndexTest(db, chunk, wantTimestamp, wantTimestamp))
+	t.Run("gc index", newGCIndexTest(db, ch, wantTimestamp, wantTimestamp, 1))
 
 	t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
 
@@ -89,40 +91,39 @@ func TestModeSetRemove(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	chunk := generateTestRandomChunk()
+	ch := generateTestRandomChunk()
 
-	err := db.NewPutter(ModePutUpload).Put(chunk)
+	_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	err = db.NewSetter(modeSetRemove).Set(chunk.Address())
+	err = db.Set(context.Background(), chunk.ModeSetRemove, ch.Address())
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	t.Run("retrieve indexes", func(t *testing.T) {
 		wantErr := leveldb.ErrNotFound
-		_, err := db.retrievalDataIndex.Get(addressToItem(chunk.Address()))
+		_, err := db.retrievalDataIndex.Get(addressToItem(ch.Address()))
 		if err != wantErr {
 			t.Errorf("got error %v, want %v", err, wantErr)
 		}
 		t.Run("retrieve data index count", newItemsCountTest(db.retrievalDataIndex, 0))
 
 		// access index should not be set
-		_, err = db.retrievalAccessIndex.Get(addressToItem(chunk.Address()))
+		_, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
 		if err != wantErr {
 			t.Errorf("got error %v, want %v", err, wantErr)
 		}
 		t.Run("retrieve access index count", newItemsCountTest(db.retrievalAccessIndex, 0))
 	})
 
-	t.Run("pull index", newPullIndexTest(db, chunk, 0, leveldb.ErrNotFound))
+	t.Run("pull index", newPullIndexTest(db, ch, 0, leveldb.ErrNotFound))
 
 	t.Run("pull index count", newItemsCountTest(db.pullIndex, 0))
 
 	t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
 
 	t.Run("gc size", newIndexGCSizeTest(db))
-
 }
diff --git a/swarm/storage/localstore/retrieval_index_test.go b/swarm/storage/localstore/retrieval_index_test.go
index b08790124..4ca2e32e6 100644
--- a/swarm/storage/localstore/retrieval_index_test.go
+++ b/swarm/storage/localstore/retrieval_index_test.go
@@ -17,6 +17,7 @@
 package localstore
 
 import (
+	"context"
 	"strconv"
 	"testing"
 
@@ -61,17 +62,14 @@ func benchmarkRetrievalIndexes(b *testing.B, o *Options, count int) {
 	b.StopTimer()
 	db, cleanupFunc := newTestDB(b, o)
 	defer cleanupFunc()
-	uploader := db.NewPutter(ModePutUpload)
-	syncer := db.NewSetter(ModeSetSync)
-	requester := db.NewGetter(ModeGetRequest)
 	addrs := make([]chunk.Address, count)
 	for i := 0; i < count; i++ {
-		chunk := generateTestRandomChunk()
-		err := uploader.Put(chunk)
+		ch := generateTestRandomChunk()
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 		if err != nil {
 			b.Fatal(err)
 		}
-		addrs[i] = chunk.Address()
+		addrs[i] = ch.Address()
 	}
 	// set update gc test hook to signal when
 	// update gc goroutine is done by sending to
@@ -85,12 +83,12 @@ func benchmarkRetrievalIndexes(b *testing.B, o *Options, count int) {
 	b.StartTimer()
 
 	for i := 0; i < count; i++ {
-		err := syncer.Set(addrs[i])
+		err := db.Set(context.Background(), chunk.ModeSetSync, addrs[i])
 		if err != nil {
 			b.Fatal(err)
 		}
 
-		_, err = requester.Get(addrs[i])
+		_, err = db.Get(context.Background(), chunk.ModeGetRequest, addrs[i])
 		if err != nil {
 			b.Fatal(err)
 		}
@@ -133,7 +131,6 @@ func benchmarkUpload(b *testing.B, o *Options, count int) {
 	b.StopTimer()
 	db, cleanupFunc := newTestDB(b, o)
 	defer cleanupFunc()
-	uploader := db.NewPutter(ModePutUpload)
 	chunks := make([]chunk.Chunk, count)
 	for i := 0; i < count; i++ {
 		chunk := generateTestRandomChunk()
@@ -142,7 +139,7 @@ func benchmarkUpload(b *testing.B, o *Options, count int) {
 	b.StartTimer()
 
 	for i := 0; i < count; i++ {
-		err := uploader.Put(chunks[i])
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, chunks[i])
 		if err != nil {
 			b.Fatal(err)
 		}
diff --git a/swarm/storage/localstore/schema.go b/swarm/storage/localstore/schema.go
new file mode 100644
index 000000000..538c75d1f
--- /dev/null
+++ b/swarm/storage/localstore/schema.go
@@ -0,0 +1,52 @@
+package localstore
+
+import (
+	"github.com/ethereum/go-ethereum/swarm/log"
+	"github.com/syndtr/goleveldb/leveldb"
+	"github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+// The DB schema we want to use. The actual/current DB schema might differ
+// until migrations are run.
+const CurrentDbSchema = DbSchemaSanctuary
+
+// There was a time when we had no schema at all.
+const DbSchemaNone = ""
+
+// "purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5
+const DbSchemaPurity = "purity"
+
+// "halloween" is here because we had a screw in the garbage collector index.
+// Because of that we had to rebuild the GC index to get rid of erroneous
+// entries and that takes a long time. This schema is used for bookkeeping,
+// so rebuild index will run just once.
+const DbSchemaHalloween = "halloween"
+
+const DbSchemaSanctuary = "sanctuary"
+
+// returns true if legacy database is in the datadir
+func IsLegacyDatabase(datadir string) bool {
+
+	var (
+		legacyDbSchemaKey = []byte{8}
+	)
+
+	db, err := leveldb.OpenFile(datadir, &opt.Options{OpenFilesCacheCapacity: 128})
+	if err != nil {
+		log.Error("got an error while trying to open leveldb path", "path", datadir, "err", err)
+		return false
+	}
+	defer db.Close()
+
+	data, err := db.Get(legacyDbSchemaKey, nil)
+	if err != nil {
+		if err == leveldb.ErrNotFound {
+			// if we haven't found anything under the legacy db schema key- we are not on legacy
+			return false
+		}
+
+		log.Error("got an unexpected error fetching legacy name from the database", "err", err)
+	}
+	log.Trace("checking if database scheme is legacy", "schema name", string(data))
+	return string(data) == DbSchemaHalloween || string(data) == DbSchemaPurity
+}
diff --git a/swarm/storage/localstore/subscription_pull.go b/swarm/storage/localstore/subscription_pull.go
index 0b96102e3..fd81b045b 100644
--- a/swarm/storage/localstore/subscription_pull.go
+++ b/swarm/storage/localstore/subscription_pull.go
@@ -17,10 +17,8 @@
 package localstore
 
 import (
-	"bytes"
 	"context"
 	"errors"
-	"fmt"
 	"sync"
 
 	"github.com/ethereum/go-ethereum/log"
@@ -31,14 +29,14 @@ import (
 
 // SubscribePull returns a channel that provides chunk addresses and stored times from pull syncing index.
 // Pull syncing index can be only subscribed to a particular proximity order bin. If since
-// is not nil, the iteration will start from the first item stored after that timestamp. If until is not nil,
-// only chunks stored up to this timestamp will be send to the channel, and the returned channel will be
-// closed. The since-until interval is open on the left and closed on the right (since,until]. Returned stop
+// is not 0, the iteration will start from the first item stored after that id. If until is not 0,
+// only chunks stored up to this id will be sent to the channel, and the returned channel will be
+// closed. The since-until interval is closed on the both sides [since,until]. Returned stop
 // function will terminate current and further iterations without errors, and also close the returned channel.
 // Make sure that you check the second returned parameter from the channel to stop iteration when its value
 // is false.
-func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkDescriptor) (c <-chan ChunkDescriptor, stop func()) {
-	chunkDescriptors := make(chan ChunkDescriptor)
+func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+	chunkDescriptors := make(chan chunk.Descriptor)
 	trigger := make(chan struct{}, 1)
 
 	db.pullTriggersMu.Lock()
@@ -59,18 +57,19 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
 	var errStopSubscription = errors.New("stop subscription")
 
 	go func() {
-		// close the returned ChunkDescriptor channel at the end to
+		// close the returned chunk.Descriptor channel at the end to
 		// signal that the subscription is done
 		defer close(chunkDescriptors)
 		// sinceItem is the Item from which the next iteration
 		// should start. The first iteration starts from the first Item.
 		var sinceItem *shed.Item
-		if since != nil {
+		if since > 0 {
 			sinceItem = &shed.Item{
-				Address:        since.Address,
-				StoreTimestamp: since.StoreTimestamp,
+				Address: db.addressInBin(bin),
+				BinID:   since,
 			}
 		}
+		first := true // first iteration flag for SkipStartFromItem
 		for {
 			select {
 			case <-trigger:
@@ -80,15 +79,13 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
 				// - context is done
 				err := db.pullIndex.Iterate(func(item shed.Item) (stop bool, err error) {
 					select {
-					case chunkDescriptors <- ChunkDescriptor{
-						Address:        item.Address,
-						StoreTimestamp: item.StoreTimestamp,
+					case chunkDescriptors <- chunk.Descriptor{
+						Address: item.Address,
+						BinID:   item.BinID,
 					}:
 						// until chunk descriptor is sent
 						// break the iteration
-						if until != nil &&
-							(item.StoreTimestamp >= until.StoreTimestamp ||
-								bytes.Equal(item.Address, until.Address)) {
+						if until > 0 && item.BinID >= until {
 							return true, errStopSubscription
 						}
 						// set next iteration start item
@@ -109,8 +106,9 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
 				}, &shed.IterateOptions{
 					StartFrom: sinceItem,
 					// sinceItem was sent as the last Address in the previous
-					// iterator call, skip it in this one
-					SkipStartFromItem: true,
+					// iterator call, skip it in this one, but not the item with
+					// the provided since bin id as it should be sent to a channel
+					SkipStartFromItem: !first,
 					Prefix:            []byte{bin},
 				})
 				if err != nil {
@@ -122,6 +120,7 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
 					log.Error("localstore pull subscription iteration", "bin", bin, "since", since, "until", until, "err", err)
 					return
 				}
+				first = false
 			case <-stopChan:
 				// terminate the subscription
 				// on stop
@@ -159,35 +158,18 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
 	return chunkDescriptors, stop
 }
 
-// LastPullSubscriptionChunk returns ChunkDescriptor of the latest Chunk
+// LastPullSubscriptionBinID returns chunk bin id of the latest Chunk
 // in pull syncing index for a provided bin. If there are no chunks in
-// that bin, chunk.ErrChunkNotFound is returned.
-func (db *DB) LastPullSubscriptionChunk(bin uint8) (c *ChunkDescriptor, err error) {
+// that bin, 0 value is returned.
+func (db *DB) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
 	item, err := db.pullIndex.Last([]byte{bin})
 	if err != nil {
 		if err == leveldb.ErrNotFound {
-			return nil, chunk.ErrChunkNotFound
+			return 0, nil
 		}
-		return nil, err
+		return 0, err
 	}
-	return &ChunkDescriptor{
-		Address:        item.Address,
-		StoreTimestamp: item.StoreTimestamp,
-	}, nil
-}
-
-// ChunkDescriptor holds information required for Pull syncing. This struct
-// is provided by subscribing to pull index.
-type ChunkDescriptor struct {
-	Address        chunk.Address
-	StoreTimestamp int64
-}
-
-func (c *ChunkDescriptor) String() string {
-	if c == nil {
-		return "none"
-	}
-	return fmt.Sprintf("%s stored at %v", c.Address.Hex(), c.StoreTimestamp)
+	return item.BinID, nil
 }
 
 // triggerPullSubscriptions is used internally for starting iterations
@@ -209,3 +191,12 @@ func (db *DB) triggerPullSubscriptions(bin uint8) {
 		}
 	}
 }
+
+// addressInBin returns an address that is in a specific
+// proximity order bin from database base key.
+func (db *DB) addressInBin(bin uint8) (addr chunk.Address) {
+	addr = append([]byte(nil), db.baseKey...)
+	b := bin / 8
+	addr[b] = addr[b] ^ (1 << (7 - bin%8))
+	return addr
+}
diff --git a/swarm/storage/localstore/subscription_pull_test.go b/swarm/storage/localstore/subscription_pull_test.go
index d5ddae02b..bf364ed44 100644
--- a/swarm/storage/localstore/subscription_pull_test.go
+++ b/swarm/storage/localstore/subscription_pull_test.go
@@ -25,6 +25,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/swarm/chunk"
+	"github.com/ethereum/go-ethereum/swarm/shed"
 )
 
 // TestDB_SubscribePull uploads some chunks before and after
@@ -35,15 +36,13 @@ func TestDB_SubscribePull(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	addrs := make(map[uint8][]chunk.Address)
 	var addrsMu sync.Mutex
 	var wantedChunksCount int
 
 	// prepopulate database with some chunks
 	// before the subscription
-	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
+	uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 10)
 
 	// set a timeout on subscription
 	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -54,22 +53,22 @@ func TestDB_SubscribePull(t *testing.T) {
 	errChan := make(chan error)
 
 	for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
-		ch, stop := db.SubscribePull(ctx, bin, nil, nil)
+		ch, stop := db.SubscribePull(ctx, bin, 0, 0)
 		defer stop()
 
 		// receive and validate addresses from the subscription
-		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+		go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
 	}
 
 	// upload some chunks just after subscribe
-	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
+	uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 5)
 
 	time.Sleep(200 * time.Millisecond)
 
 	// upload some chunks after some short time
 	// to ensure that subscription will include them
 	// in a dynamic environment
-	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
+	uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 3)
 
 	checkErrChan(ctx, t, errChan, wantedChunksCount)
 }
@@ -82,15 +81,13 @@ func TestDB_SubscribePull_multiple(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	addrs := make(map[uint8][]chunk.Address)
 	var addrsMu sync.Mutex
 	var wantedChunksCount int
 
 	// prepopulate database with some chunks
 	// before the subscription
-	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
+	uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 10)
 
 	// set a timeout on subscription
 	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -106,23 +103,23 @@ func TestDB_SubscribePull_multiple(t *testing.T) {
 	// that all of them will write every address error to errChan
 	for j := 0; j < subsCount; j++ {
 		for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
-			ch, stop := db.SubscribePull(ctx, bin, nil, nil)
+			ch, stop := db.SubscribePull(ctx, bin, 0, 0)
 			defer stop()
 
 			// receive and validate addresses from the subscription
-			go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+			go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
 		}
 	}
 
 	// upload some chunks just after subscribe
-	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
+	uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 5)
 
 	time.Sleep(200 * time.Millisecond)
 
 	// upload some chunks after some short time
 	// to ensure that subscription will include them
 	// in a dynamic environment
-	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
+	uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 3)
 
 	checkErrChan(ctx, t, errChan, wantedChunksCount*subsCount)
 }
@@ -135,61 +132,52 @@ func TestDB_SubscribePull_since(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	addrs := make(map[uint8][]chunk.Address)
 	var addrsMu sync.Mutex
 	var wantedChunksCount int
 
-	lastTimestamp := time.Now().UTC().UnixNano()
-	var lastTimestampMu sync.RWMutex
-	defer setNow(func() (t int64) {
-		lastTimestampMu.Lock()
-		defer lastTimestampMu.Unlock()
-		lastTimestamp++
-		return lastTimestamp
-	})()
+	binIDCounter := make(map[uint8]uint64)
+	var binIDCounterMu sync.RWMutex
 
-	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
+	uploadRandomChunks := func(count int, wanted bool) (first map[uint8]uint64) {
 		addrsMu.Lock()
 		defer addrsMu.Unlock()
 
-		last = make(map[uint8]ChunkDescriptor)
+		first = make(map[uint8]uint64)
 		for i := 0; i < count; i++ {
 			ch := generateTestRandomChunk()
 
-			err := uploader.Put(ch)
+			_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 			if err != nil {
 				t.Fatal(err)
 			}
 
 			bin := db.po(ch.Address())
 
-			if _, ok := addrs[bin]; !ok {
-				addrs[bin] = make([]chunk.Address, 0)
-			}
+			binIDCounterMu.RLock()
+			binIDCounter[bin]++
+			binIDCounterMu.RUnlock()
+
 			if wanted {
+				if _, ok := addrs[bin]; !ok {
+					addrs[bin] = make([]chunk.Address, 0)
+				}
 				addrs[bin] = append(addrs[bin], ch.Address())
 				wantedChunksCount++
-			}
 
-			lastTimestampMu.RLock()
-			storeTimestamp := lastTimestamp
-			lastTimestampMu.RUnlock()
-
-			last[bin] = ChunkDescriptor{
-				Address:        ch.Address(),
-				StoreTimestamp: storeTimestamp,
+				if _, ok := first[bin]; !ok {
+					first[bin] = binIDCounter[bin]
+				}
 			}
 		}
-		return last
+		return first
 	}
 
 	// prepopulate database with some chunks
 	// before the subscription
-	last := uploadRandomChunks(30, false)
+	uploadRandomChunks(30, false)
 
-	uploadRandomChunks(25, true)
+	first := uploadRandomChunks(25, true)
 
 	// set a timeout on subscription
 	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -200,21 +188,18 @@ func TestDB_SubscribePull_since(t *testing.T) {
 	errChan := make(chan error)
 
 	for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
-		var since *ChunkDescriptor
-		if c, ok := last[bin]; ok {
-			since = &c
+		since, ok := first[bin]
+		if !ok {
+			continue
 		}
-		ch, stop := db.SubscribePull(ctx, bin, since, nil)
+		ch, stop := db.SubscribePull(ctx, bin, since, 0)
 		defer stop()
 
 		// receive and validate addresses from the subscription
-		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+		go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
 
 	}
 
-	// upload some chunks just after subscribe
-	uploadRandomChunks(15, true)
-
 	checkErrChan(ctx, t, errChan, wantedChunksCount)
 }
 
@@ -226,30 +211,22 @@ func TestDB_SubscribePull_until(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	addrs := make(map[uint8][]chunk.Address)
 	var addrsMu sync.Mutex
 	var wantedChunksCount int
 
-	lastTimestamp := time.Now().UTC().UnixNano()
-	var lastTimestampMu sync.RWMutex
-	defer setNow(func() (t int64) {
-		lastTimestampMu.Lock()
-		defer lastTimestampMu.Unlock()
-		lastTimestamp++
-		return lastTimestamp
-	})()
+	binIDCounter := make(map[uint8]uint64)
+	var binIDCounterMu sync.RWMutex
 
-	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
+	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]uint64) {
 		addrsMu.Lock()
 		defer addrsMu.Unlock()
 
-		last = make(map[uint8]ChunkDescriptor)
+		last = make(map[uint8]uint64)
 		for i := 0; i < count; i++ {
 			ch := generateTestRandomChunk()
 
-			err := uploader.Put(ch)
+			_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 			if err != nil {
 				t.Fatal(err)
 			}
@@ -264,14 +241,11 @@ func TestDB_SubscribePull_until(t *testing.T) {
 				wantedChunksCount++
 			}
 
-			lastTimestampMu.RLock()
-			storeTimestamp := lastTimestamp
-			lastTimestampMu.RUnlock()
+			binIDCounterMu.RLock()
+			binIDCounter[bin]++
+			binIDCounterMu.RUnlock()
 
-			last[bin] = ChunkDescriptor{
-				Address:        ch.Address(),
-				StoreTimestamp: storeTimestamp,
-			}
+			last[bin] = binIDCounter[bin]
 		}
 		return last
 	}
@@ -295,11 +269,11 @@ func TestDB_SubscribePull_until(t *testing.T) {
 		if !ok {
 			continue
 		}
-		ch, stop := db.SubscribePull(ctx, bin, nil, &until)
+		ch, stop := db.SubscribePull(ctx, bin, 0, until)
 		defer stop()
 
 		// receive and validate addresses from the subscription
-		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+		go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
 	}
 
 	// upload some chunks just after subscribe
@@ -316,30 +290,22 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	addrs := make(map[uint8][]chunk.Address)
 	var addrsMu sync.Mutex
 	var wantedChunksCount int
 
-	lastTimestamp := time.Now().UTC().UnixNano()
-	var lastTimestampMu sync.RWMutex
-	defer setNow(func() (t int64) {
-		lastTimestampMu.Lock()
-		defer lastTimestampMu.Unlock()
-		lastTimestamp++
-		return lastTimestamp
-	})()
+	binIDCounter := make(map[uint8]uint64)
+	var binIDCounterMu sync.RWMutex
 
-	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
+	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]uint64) {
 		addrsMu.Lock()
 		defer addrsMu.Unlock()
 
-		last = make(map[uint8]ChunkDescriptor)
+		last = make(map[uint8]uint64)
 		for i := 0; i < count; i++ {
 			ch := generateTestRandomChunk()
 
-			err := uploader.Put(ch)
+			_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 			if err != nil {
 				t.Fatal(err)
 			}
@@ -354,14 +320,11 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
 				wantedChunksCount++
 			}
 
-			lastTimestampMu.RLock()
-			storeTimestamp := lastTimestamp
-			lastTimestampMu.RUnlock()
+			binIDCounterMu.RLock()
+			binIDCounter[bin]++
+			binIDCounterMu.RUnlock()
 
-			last[bin] = ChunkDescriptor{
-				Address:        ch.Address(),
-				StoreTimestamp: storeTimestamp,
-			}
+			last[bin] = binIDCounter[bin]
 		}
 		return last
 	}
@@ -387,9 +350,10 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
 	errChan := make(chan error)
 
 	for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
-		var since *ChunkDescriptor
-		if c, ok := upload1[bin]; ok {
-			since = &c
+		since, ok := upload1[bin]
+		if ok {
+			// start from the next uploaded chunk
+			since++
 		}
 		until, ok := upload2[bin]
 		if !ok {
@@ -397,11 +361,11 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
 			// skip this bin from testing
 			continue
 		}
-		ch, stop := db.SubscribePull(ctx, bin, since, &until)
+		ch, stop := db.SubscribePull(ctx, bin, since, until)
 		defer stop()
 
 		// receive and validate addresses from the subscription
-		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+		go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
 	}
 
 	// upload some chunks just after subscribe
@@ -412,14 +376,14 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
 
 // uploadRandomChunksBin uploads random chunks to database and adds them to
 // the map of addresses ber bin.
-func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
+func uploadRandomChunksBin(t *testing.T, db *DB, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
 	addrsMu.Lock()
 	defer addrsMu.Unlock()
 
 	for i := 0; i < count; i++ {
 		ch := generateTestRandomChunk()
 
-		err := uploader.Put(ch)
+		_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -434,10 +398,10 @@ func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uin
 	}
 }
 
-// readPullSubscriptionBin is a helper function that reads all ChunkDescriptors from a channel and
-// sends error to errChan, even if it is nil, to count the number of ChunkDescriptors
+// readPullSubscriptionBin is a helper function that reads all chunk.Descriptors from a channel and
+// sends error to errChan, even if it is nil, to count the number of chunk.Descriptors
 // returned by the channel.
-func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDescriptor, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, errChan chan error) {
+func readPullSubscriptionBin(ctx context.Context, db *DB, bin uint8, ch <-chan chunk.Descriptor, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, errChan chan error) {
 	var i int // address index
 	for {
 		select {
@@ -450,9 +414,20 @@ func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDesc
 			if i+1 > len(addrs[bin]) {
 				err = fmt.Errorf("got more chunk addresses %v, then expected %v, for bin %v", i+1, len(addrs[bin]), bin)
 			} else {
-				want := addrs[bin][i]
-				if !bytes.Equal(got.Address, want) {
-					err = fmt.Errorf("got chunk address %v in bin %v %s, want %s", i, bin, got.Address.Hex(), want)
+				addr := addrs[bin][i]
+				if !bytes.Equal(got.Address, addr) {
+					err = fmt.Errorf("got chunk bin id %v in bin %v %v, want %v", i, bin, got.Address.Hex(), addr.Hex())
+				} else {
+					want, err := db.retrievalDataIndex.Get(shed.Item{
+						Address: addr,
+					})
+					if err != nil {
+						err = fmt.Errorf("got chunk (bin id %v in bin %v) from retrieval index %s: %v", i, bin, addrs[bin][i].Hex(), err)
+					} else {
+						if got.BinID != want.BinID {
+							err = fmt.Errorf("got chunk bin id %v in bin %v %v, want %v", i, bin, got, want)
+						}
+					}
 				}
 			}
 			addrsMu.Unlock()
@@ -486,27 +461,19 @@ func checkErrChan(ctx context.Context, t *testing.T, errChan chan error, wantedC
 	}
 }
 
-// TestDB_LastPullSubscriptionChunk validates that LastPullSubscriptionChunk
+// TestDB_LastPullSubscriptionBinID validates that LastPullSubscriptionBinID
 // is returning the last chunk descriptor for proximity order bins by
 // doing a few rounds of chunk uploads.
-func TestDB_LastPullSubscriptionChunk(t *testing.T) {
+func TestDB_LastPullSubscriptionBinID(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	addrs := make(map[uint8][]chunk.Address)
 
-	lastTimestamp := time.Now().UTC().UnixNano()
-	var lastTimestampMu sync.RWMutex
-	defer setNow(func() (t int64) {
-		lastTimestampMu.Lock()
-		defer lastTimestampMu.Unlock()
-		lastTimestamp++
-		return lastTimestamp
-	})()
+	binIDCounter := make(map[uint8]uint64)
+	var binIDCounterMu sync.RWMutex
 
-	last := make(map[uint8]ChunkDescriptor)
+	last := make(map[uint8]uint64)
 
 	// do a few rounds of uploads and check if
 	// last pull subscription chunk is correct
@@ -516,7 +483,7 @@ func TestDB_LastPullSubscriptionChunk(t *testing.T) {
 		for i := 0; i < count; i++ {
 			ch := generateTestRandomChunk()
 
-			err := uploader.Put(ch)
+			_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 			if err != nil {
 				t.Fatal(err)
 			}
@@ -528,32 +495,42 @@ func TestDB_LastPullSubscriptionChunk(t *testing.T) {
 			}
 			addrs[bin] = append(addrs[bin], ch.Address())
 
-			lastTimestampMu.RLock()
-			storeTimestamp := lastTimestamp
-			lastTimestampMu.RUnlock()
+			binIDCounterMu.RLock()
+			binIDCounter[bin]++
+			binIDCounterMu.RUnlock()
 
-			last[bin] = ChunkDescriptor{
-				Address:        ch.Address(),
-				StoreTimestamp: storeTimestamp,
-			}
+			last[bin] = binIDCounter[bin]
 		}
 
 		// check
 		for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
 			want, ok := last[bin]
-			got, err := db.LastPullSubscriptionChunk(bin)
+			got, err := db.LastPullSubscriptionBinID(bin)
 			if ok {
 				if err != nil {
 					t.Errorf("got unexpected error value %v", err)
 				}
-				if !bytes.Equal(got.Address, want.Address) {
-					t.Errorf("got last address %s, want %s", got.Address.Hex(), want.Address.Hex())
-				}
-			} else {
-				if err != chunk.ErrChunkNotFound {
-					t.Errorf("got unexpected error value %v, want %v", err, chunk.ErrChunkNotFound)
-				}
 			}
+			if got != want {
+				t.Errorf("got last bin id %v, want %v", got, want)
+			}
+		}
+	}
+}
+
+// TestAddressInBin validates that function addressInBin
+// returns a valid address for every proximity order bin.
+func TestAddressInBin(t *testing.T) {
+	db, cleanupFunc := newTestDB(t, nil)
+	defer cleanupFunc()
+
+	for po := uint8(0); po < chunk.MaxPO; po++ {
+		addr := db.addressInBin(po)
+
+		got := db.po(addr)
+
+		if got != uint8(po) {
+			t.Errorf("got po %v, want %v", got, po)
 		}
 	}
 }
diff --git a/swarm/storage/localstore/subscription_push_test.go b/swarm/storage/localstore/subscription_push_test.go
index 30fb98eb2..6124a534b 100644
--- a/swarm/storage/localstore/subscription_push_test.go
+++ b/swarm/storage/localstore/subscription_push_test.go
@@ -34,8 +34,6 @@ func TestDB_SubscribePush(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	chunks := make([]chunk.Chunk, 0)
 	var chunksMu sync.Mutex
 
@@ -44,14 +42,14 @@ func TestDB_SubscribePush(t *testing.T) {
 		defer chunksMu.Unlock()
 
 		for i := 0; i < count; i++ {
-			chunk := generateTestRandomChunk()
+			ch := generateTestRandomChunk()
 
-			err := uploader.Put(chunk)
+			_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 			if err != nil {
 				t.Fatal(err)
 			}
 
-			chunks = append(chunks, chunk)
+			chunks = append(chunks, ch)
 		}
 	}
 
@@ -122,8 +120,6 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
 	db, cleanupFunc := newTestDB(t, nil)
 	defer cleanupFunc()
 
-	uploader := db.NewPutter(ModePutUpload)
-
 	addrs := make([]chunk.Address, 0)
 	var addrsMu sync.Mutex
 
@@ -132,14 +128,14 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
 		defer addrsMu.Unlock()
 
 		for i := 0; i < count; i++ {
-			chunk := generateTestRandomChunk()
+			ch := generateTestRandomChunk()
 
-			err := uploader.Put(chunk)
+			_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
 			if err != nil {
 				t.Fatal(err)
 			}
 
-			addrs = append(addrs, chunk.Address())
+			addrs = append(addrs, ch.Address())
 		}
 	}
 
diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go
deleted file mode 100644
index fcadcefa0..000000000
--- a/swarm/storage/localstore_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
-	"context"
-	"io/ioutil"
-	"os"
-	"testing"
-	"time"
-
-	"github.com/ethereum/go-ethereum/swarm/chunk"
-)
-
-var (
-	hashfunc = MakeHashFunc(DefaultHash)
-)
-
-// tests that the content address validator correctly checks the data
-// tests that feed update chunks are passed through content address validator
-// the test checking the resouce update validator internal correctness is found in storage/feeds/handler_test.go
-func TestValidator(t *testing.T) {
-	// set up localstore
-	datadir, err := ioutil.TempDir("", "storage-testvalidator")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(datadir)
-
-	params := NewDefaultLocalStoreParams()
-	params.Init(datadir)
-	store, err := NewLocalStore(params, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// check puts with no validators, both succeed
-	chunks := GenerateRandomChunks(259, 2)
-	goodChunk := chunks[0]
-	badChunk := chunks[1]
-	copy(badChunk.Data(), goodChunk.Data())
-
-	errs := putChunks(store, goodChunk, badChunk)
-	if errs[0] != nil {
-		t.Fatalf("expected no error on good content address chunk in spite of no validation, but got: %s", err)
-	}
-	if errs[1] != nil {
-		t.Fatalf("expected no error on bad content address chunk in spite of no validation, but got: %s", err)
-	}
-
-	// add content address validator and check puts
-	// bad should fail, good should pass
-	store.Validators = append(store.Validators, NewContentAddressValidator(hashfunc))
-	chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
-	goodChunk = chunks[0]
-	badChunk = chunks[1]
-	copy(badChunk.Data(), goodChunk.Data())
-
-	errs = putChunks(store, goodChunk, badChunk)
-	if errs[0] != nil {
-		t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
-	}
-	if errs[1] == nil {
-		t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
-	}
-
-	// append a validator that always denies
-	// bad should fail, good should pass,
-	var negV boolTestValidator
-	store.Validators = append(store.Validators, negV)
-
-	chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
-	goodChunk = chunks[0]
-	badChunk = chunks[1]
-	copy(badChunk.Data(), goodChunk.Data())
-
-	errs = putChunks(store, goodChunk, badChunk)
-	if errs[0] != nil {
-		t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
-	}
-	if errs[1] == nil {
-		t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
-	}
-
-	// append a validator that always approves
-	// all shall pass
-	var posV boolTestValidator = true
-	store.Validators = append(store.Validators, posV)
-
-	chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
-	goodChunk = chunks[0]
-	badChunk = chunks[1]
-	copy(badChunk.Data(), goodChunk.Data())
-
-	errs = putChunks(store, goodChunk, badChunk)
-	if errs[0] != nil {
-		t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
-	}
-	if errs[1] != nil {
-		t.Fatalf("expected no error on bad content address chunk in spite of no validation, but got: %s", err)
-	}
-
-}
-
-type boolTestValidator bool
-
-func (self boolTestValidator) Validate(chunk Chunk) bool {
-	return bool(self)
-}
-
-// putChunks adds chunks  to localstore
-// It waits for receive on the stored channel
-// It logs but does not fail on delivery error
-func putChunks(store *LocalStore, chunks ...Chunk) []error {
-	i := 0
-	f := func(n int64) Chunk {
-		chunk := chunks[i]
-		i++
-		return chunk
-	}
-	_, errs := put(store, len(chunks), f)
-	return errs
-}
-
-func put(store *LocalStore, n int, f func(i int64) Chunk) (hs []Address, errs []error) {
-	for i := int64(0); i < int64(n); i++ {
-		chunk := f(chunk.DefaultSize)
-		err := store.Put(context.TODO(), chunk)
-		errs = append(errs, err)
-		hs = append(hs, chunk.Address())
-	}
-	return hs, errs
-}
-
-// TestGetFrequentlyAccessedChunkWontGetGarbageCollected tests that the most
-// frequently accessed chunk is not garbage collected from LDBStore, i.e.,
-// from disk when we are at the capacity and garbage collector runs. For that
-// we start putting random chunks into the DB while continuously accessing the
-// chunk we care about then check if we can still retrieve it from disk.
-func TestGetFrequentlyAccessedChunkWontGetGarbageCollected(t *testing.T) {
-	ldbCap := defaultGCRatio
-	store, cleanup := setupLocalStore(t, ldbCap)
-	defer cleanup()
-
-	var chunks []Chunk
-	for i := 0; i < ldbCap; i++ {
-		chunks = append(chunks, GenerateRandomChunk(chunk.DefaultSize))
-	}
-
-	mostAccessed := chunks[0].Address()
-	for _, chunk := range chunks {
-		if err := store.Put(context.Background(), chunk); err != nil {
-			t.Fatal(err)
-		}
-
-		if _, err := store.Get(context.Background(), mostAccessed); err != nil {
-			t.Fatal(err)
-		}
-		// Add time for MarkAccessed() to be able to finish in a separate Goroutine
-		time.Sleep(1 * time.Millisecond)
-	}
-
-	store.DbStore.collectGarbage()
-	if _, err := store.DbStore.Get(context.Background(), mostAccessed); err != nil {
-		t.Logf("most frequntly accessed chunk not found on disk (key: %v)", mostAccessed)
-		t.Fatal(err)
-	}
-
-}
-
-func setupLocalStore(t *testing.T, ldbCap int) (ls *LocalStore, cleanup func()) {
-	t.Helper()
-
-	var err error
-	datadir, err := ioutil.TempDir("", "storage")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	params := &LocalStoreParams{
-		StoreParams: NewStoreParams(uint64(ldbCap), uint(ldbCap), nil, nil),
-	}
-	params.Init(datadir)
-
-	store, err := NewLocalStore(params, nil)
-	if err != nil {
-		_ = os.RemoveAll(datadir)
-		t.Fatal(err)
-	}
-
-	cleanup = func() {
-		store.Close()
-		_ = os.RemoveAll(datadir)
-	}
-
-	return store, cleanup
-}
-
-func TestHas(t *testing.T) {
-	ldbCap := defaultGCRatio
-	store, cleanup := setupLocalStore(t, ldbCap)
-	defer cleanup()
-
-	nonStoredAddr := GenerateRandomChunk(128).Address()
-
-	has := store.Has(context.Background(), nonStoredAddr)
-	if has {
-		t.Fatal("Expected Has() to return false, but returned true!")
-	}
-
-	storeChunks := GenerateRandomChunks(128, 3)
-	for _, ch := range storeChunks {
-		err := store.Put(context.Background(), ch)
-		if err != nil {
-			t.Fatalf("Expected store to store chunk, but it failed: %v", err)
-		}
-
-		has := store.Has(context.Background(), ch.Address())
-		if !has {
-			t.Fatal("Expected Has() to return true, but returned false!")
-		}
-	}
-
-	//let's be paranoic and test again that the non-existent chunk returns false
-	has = store.Has(context.Background(), nonStoredAddr)
-	if has {
-		t.Fatal("Expected Has() to return false, but returned true!")
-	}
-
-}
diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go
deleted file mode 100644
index 611ac3bc5..000000000
--- a/swarm/storage/memstore.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-// memory storage layer for the package blockhash
-
-package storage
-
-import (
-	"context"
-
-	lru "github.com/hashicorp/golang-lru"
-)
-
-type MemStore struct {
-	cache    *lru.Cache
-	disabled bool
-}
-
-//NewMemStore is instantiating a MemStore cache keeping all frequently requested
-//chunks in the `cache` LRU cache.
-func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) {
-	if params.CacheCapacity == 0 {
-		return &MemStore{
-			disabled: true,
-		}
-	}
-
-	c, err := lru.New(int(params.CacheCapacity))
-	if err != nil {
-		panic(err)
-	}
-
-	return &MemStore{
-		cache: c,
-	}
-}
-
-// Has needed to implement SyncChunkStore
-func (m *MemStore) Has(_ context.Context, addr Address) bool {
-	return m.cache.Contains(addr)
-}
-
-func (m *MemStore) Get(_ context.Context, addr Address) (Chunk, error) {
-	if m.disabled {
-		return nil, ErrChunkNotFound
-	}
-
-	c, ok := m.cache.Get(string(addr))
-	if !ok {
-		return nil, ErrChunkNotFound
-	}
-	return c.(Chunk), nil
-}
-
-func (m *MemStore) Put(_ context.Context, c Chunk) error {
-	if m.disabled {
-		return nil
-	}
-
-	m.cache.Add(string(c.Address()), c)
-	return nil
-}
-
-func (m *MemStore) setCapacity(n int) {
-	if n <= 0 {
-		m.disabled = true
-	} else {
-		c, err := lru.New(n)
-		if err != nil {
-			panic(err)
-		}
-
-		*m = MemStore{
-			cache: c,
-		}
-	}
-}
-
-func (s *MemStore) Close() {}
diff --git a/swarm/storage/memstore_test.go b/swarm/storage/memstore_test.go
deleted file mode 100644
index 8aaf486a7..000000000
--- a/swarm/storage/memstore_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
-	"context"
-	"testing"
-
-	"github.com/ethereum/go-ethereum/swarm/log"
-)
-
-func newTestMemStore() *MemStore {
-	storeparams := NewDefaultStoreParams()
-	return NewMemStore(storeparams, nil)
-}
-
-func testMemStoreRandom(n int, t *testing.T) {
-	m := newTestMemStore()
-	defer m.Close()
-	testStoreRandom(m, n, t)
-}
-
-func testMemStoreCorrect(n int, t *testing.T) {
-	m := newTestMemStore()
-	defer m.Close()
-	testStoreCorrect(m, n, t)
-}
-
-func TestMemStoreRandom_1(t *testing.T) {
-	testMemStoreRandom(1, t)
-}
-
-func TestMemStoreCorrect_1(t *testing.T) {
-	testMemStoreCorrect(1, t)
-}
-
-func TestMemStoreRandom_1k(t *testing.T) {
-	testMemStoreRandom(1000, t)
-}
-
-func TestMemStoreCorrect_1k(t *testing.T) {
-	testMemStoreCorrect(100, t)
-}
-
-func TestMemStoreNotFound(t *testing.T) {
-	m := newTestMemStore()
-	defer m.Close()
-
-	_, err := m.Get(context.TODO(), ZeroAddr)
-	if err != ErrChunkNotFound {
-		t.Errorf("Expected ErrChunkNotFound, got %v", err)
-	}
-}
-
-func benchmarkMemStorePut(n int, b *testing.B) {
-	m := newTestMemStore()
-	defer m.Close()
-	benchmarkStorePut(m, n, b)
-}
-
-func benchmarkMemStoreGet(n int, b *testing.B) {
-	m := newTestMemStore()
-	defer m.Close()
-	benchmarkStoreGet(m, n, b)
-}
-
-func BenchmarkMemStorePut_500(b *testing.B) {
-	benchmarkMemStorePut(500, b)
-}
-
-func BenchmarkMemStoreGet_500(b *testing.B) {
-	benchmarkMemStoreGet(500, b)
-}
-
-func TestMemStoreAndLDBStore(t *testing.T) {
-	ldb, cleanup := newLDBStore(t)
-	ldb.setCapacity(4000)
-	defer cleanup()
-
-	cacheCap := 200
-	memStore := NewMemStore(NewStoreParams(4000, 200, nil, nil), nil)
-
-	tests := []struct {
-		n         int   // number of chunks to push to memStore
-		chunkSize int64 // size of chunk (by default in Swarm - 4096)
-	}{
-		{
-			n:         1,
-			chunkSize: 4096,
-		},
-		{
-			n:         101,
-			chunkSize: 4096,
-		},
-		{
-			n:         501,
-			chunkSize: 4096,
-		},
-		{
-			n:         1100,
-			chunkSize: 4096,
-		},
-	}
-
-	for i, tt := range tests {
-		log.Info("running test", "idx", i, "tt", tt)
-		var chunks []Chunk
-
-		for i := 0; i < tt.n; i++ {
-			c := GenerateRandomChunk(tt.chunkSize)
-			chunks = append(chunks, c)
-		}
-
-		for i := 0; i < tt.n; i++ {
-			err := ldb.Put(context.TODO(), chunks[i])
-			if err != nil {
-				t.Fatal(err)
-			}
-			err = memStore.Put(context.TODO(), chunks[i])
-			if err != nil {
-				t.Fatal(err)
-			}
-
-			if got := memStore.cache.Len(); got > cacheCap {
-				t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got)
-			}
-
-		}
-
-		for i := 0; i < tt.n; i++ {
-			_, err := memStore.Get(context.TODO(), chunks[i].Address())
-			if err != nil {
-				if err == ErrChunkNotFound {
-					_, err := ldb.Get(context.TODO(), chunks[i].Address())
-					if err != nil {
-						t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err)
-					}
-				} else {
-					t.Fatalf("got error from memstore: %v", err)
-				}
-			}
-		}
-	}
-}
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index 7741b8f7b..b675384ce 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -25,6 +25,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/p2p/enode"
+	"github.com/ethereum/go-ethereum/swarm/chunk"
 	"github.com/ethereum/go-ethereum/swarm/log"
 	"github.com/ethereum/go-ethereum/swarm/spancontext"
 	"github.com/opentracing/opentracing-go"
@@ -49,8 +50,8 @@ type NetFetcher interface {
 // fetchers are unique to a chunk and are stored in fetchers LRU memory cache
 // fetchFuncFactory is a factory object to create a fetch function for a specific chunk address
 type NetStore struct {
+	chunk.Store
 	mu                sync.Mutex
-	store             SyncChunkStore
 	fetchers          *lru.Cache
 	NewNetFetcherFunc NewNetFetcherFunc
 	closeC            chan struct{}
@@ -60,13 +61,13 @@ var fetcherTimeout = 2 * time.Minute // timeout to cancel the fetcher even if re
 
 // NewNetStore creates a new NetStore object using the given local store. newFetchFunc is a
 // constructor function that can create a fetch function for a specific chunk address.
-func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc) (*NetStore, error) {
+func NewNetStore(store chunk.Store, nnf NewNetFetcherFunc) (*NetStore, error) {
 	fetchers, err := lru.New(defaultChunkRequestsCacheCapacity)
 	if err != nil {
 		return nil, err
 	}
 	return &NetStore{
-		store:             store,
+		Store:             store,
 		fetchers:          fetchers,
 		NewNetFetcherFunc: nnf,
 		closeC:            make(chan struct{}),
@@ -75,14 +76,14 @@ func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc) (*NetStore, error)
 
 // Put stores a chunk in localstore, and delivers to all requestor peers using the fetcher stored in
 // the fetchers cache
-func (n *NetStore) Put(ctx context.Context, ch Chunk) error {
+func (n *NetStore) Put(ctx context.Context, mode chunk.ModePut, ch Chunk) (bool, error) {
 	n.mu.Lock()
 	defer n.mu.Unlock()
 
 	// put to the chunk to the store, there should be no error
-	err := n.store.Put(ctx, ch)
+	exists, err := n.Store.Put(ctx, mode, ch)
 	if err != nil {
-		return err
+		return exists, err
 	}
 
 	// if chunk is now put in the store, check if there was an active fetcher and call deliver on it
@@ -92,15 +93,15 @@ func (n *NetStore) Put(ctx context.Context, ch Chunk) error {
 		log.Trace("n.getFetcher deliver", "ref", ch.Address())
 		f.deliver(ctx, ch)
 	}
-	return nil
+	return exists, nil
 }
 
 // Get retrieves the chunk from the NetStore DPA synchronously.
 // It calls NetStore.get, and if the chunk is not in local Storage
 // it calls fetch with the request, which blocks until the chunk
 // arrived or context is done
-func (n *NetStore) Get(rctx context.Context, ref Address) (Chunk, error) {
-	chunk, fetch, err := n.get(rctx, ref)
+func (n *NetStore) Get(rctx context.Context, mode chunk.ModeGet, ref Address) (Chunk, error) {
+	chunk, fetch, err := n.get(rctx, mode, ref)
 	if err != nil {
 		return nil, err
 	}
@@ -118,18 +119,10 @@ func (n *NetStore) Get(rctx context.Context, ref Address) (Chunk, error) {
 	return fetch(rctx)
 }
 
-func (n *NetStore) BinIndex(po uint8) uint64 {
-	return n.store.BinIndex(po)
-}
-
-func (n *NetStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
-	return n.store.Iterator(from, to, po, f)
-}
-
 // FetchFunc returns nil if the store contains the given address. Otherwise it returns a wait function,
 // which returns after the chunk is available or the context is done
 func (n *NetStore) FetchFunc(ctx context.Context, ref Address) func(context.Context) error {
-	chunk, fetch, _ := n.get(ctx, ref)
+	chunk, fetch, _ := n.get(ctx, chunk.ModeGetRequest, ref)
 	if chunk != nil {
 		return nil
 	}
@@ -140,9 +133,8 @@ func (n *NetStore) FetchFunc(ctx context.Context, ref Address) func(context.Cont
 }
 
 // Close chunk store
-func (n *NetStore) Close() {
+func (n *NetStore) Close() (err error) {
 	close(n.closeC)
-	n.store.Close()
 
 	wg := sync.WaitGroup{}
 	for _, key := range n.fetchers.Keys() {
@@ -162,6 +154,8 @@ func (n *NetStore) Close() {
 		}
 	}
 	wg.Wait()
+
+	return n.Store.Close()
 }
 
 // get attempts at retrieving the chunk from LocalStore
@@ -172,11 +166,11 @@ func (n *NetStore) Close() {
 // or all fetcher contexts are done.
 // It returns a chunk, a fetcher function and an error
 // If chunk is nil, the returned fetch function needs to be called with a context to return the chunk.
-func (n *NetStore) get(ctx context.Context, ref Address) (Chunk, func(context.Context) (Chunk, error), error) {
+func (n *NetStore) get(ctx context.Context, mode chunk.ModeGet, ref Address) (Chunk, func(context.Context) (Chunk, error), error) {
 	n.mu.Lock()
 	defer n.mu.Unlock()
 
-	chunk, err := n.store.Get(ctx, ref)
+	chunk, err := n.Store.Get(ctx, mode, ref)
 	if err != nil {
 		// TODO: Fix comparison - we should be comparing against leveldb.ErrNotFound, this error should be wrapped.
 		if err != ErrChunkNotFound && err != leveldb.ErrNotFound {
@@ -192,13 +186,6 @@ func (n *NetStore) get(ctx context.Context, ref Address) (Chunk, func(context.Co
 	return chunk, nil, nil
 }
 
-// Has is the storage layer entry point to query the underlying
-// database to return if it has a chunk or not.
-// Called from the DebugAPI
-func (n *NetStore) Has(ctx context.Context, ref Address) bool {
-	return n.store.Has(ctx, ref)
-}
-
 // getOrCreateFetcher attempts at retrieving an existing fetchers
 // if none exists, creates one and saves it in the fetchers cache
 // caller must hold the lock
diff --git a/swarm/storage/netstore_test.go b/swarm/storage/netstore_test.go
index 653877625..dc0727987 100644
--- a/swarm/storage/netstore_test.go
+++ b/swarm/storage/netstore_test.go
@@ -23,6 +23,7 @@ import (
 	"errors"
 	"fmt"
 	"io/ioutil"
+	"os"
 	"sync"
 	"testing"
 	"time"
@@ -30,6 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/swarm/chunk"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
 )
 
 var sourcePeerID = enode.HexID("99d8594b52298567d2ca3f4c441a5ba0140ee9245e26460d01102a52773c73b9")
@@ -76,45 +78,43 @@ func (m *mockNetFetchFuncFactory) newMockNetFetcher(ctx context.Context, _ Addre
 	return m.fetcher
 }
 
-func mustNewNetStore(t *testing.T) *NetStore {
-	netStore, _ := mustNewNetStoreWithFetcher(t)
-	return netStore
-}
-
-func mustNewNetStoreWithFetcher(t *testing.T) (*NetStore, *mockNetFetcher) {
+func newTestNetStore(t *testing.T) (netStore *NetStore, fetcher *mockNetFetcher, cleanup func()) {
 	t.Helper()
 
-	datadir, err := ioutil.TempDir("", "netstore")
+	dir, err := ioutil.TempDir("", "swarm-storage-")
 	if err != nil {
 		t.Fatal(err)
 	}
-	naddr := make([]byte, 32)
-	params := NewDefaultLocalStoreParams()
-	params.Init(datadir)
-	params.BaseKey = naddr
-	localStore, err := NewTestLocalStoreForAddr(params)
+	localStore, err := localstore.New(dir, make([]byte, 32), nil)
 	if err != nil {
+		os.RemoveAll(dir)
 		t.Fatal(err)
 	}
+	cleanup = func() {
+		localStore.Close()
+		os.RemoveAll(dir)
+	}
 
-	fetcher := &mockNetFetcher{}
+	fetcher = new(mockNetFetcher)
 	mockNetFetchFuncFactory := &mockNetFetchFuncFactory{
 		fetcher: fetcher,
 	}
-	netStore, err := NewNetStore(localStore, mockNetFetchFuncFactory.newMockNetFetcher)
+	netStore, err = NewNetStore(localStore, mockNetFetchFuncFactory.newMockNetFetcher)
 	if err != nil {
+		cleanup()
 		t.Fatal(err)
 	}
-	return netStore, fetcher
+	return netStore, fetcher, cleanup
 }
 
 // TestNetStoreGetAndPut tests calling NetStore.Get which is blocked until the same chunk is Put.
 // After the Put there should no active fetchers, and the context created for the fetcher should
 // be cancelled.
 func TestNetStoreGetAndPut(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
 	defer cancel()
@@ -126,12 +126,12 @@ func TestNetStoreGetAndPut(t *testing.T) {
 		time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
 
 		// check if netStore created a fetcher in the Get call for the unavailable chunk
-		if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+		if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
 			putErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
 			return
 		}
 
-		err := netStore.Put(ctx, chunk)
+		_, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
 		if err != nil {
 			putErrC <- fmt.Errorf("Expected no err got %v", err)
 			return
@@ -141,7 +141,7 @@ func TestNetStoreGetAndPut(t *testing.T) {
 	}()
 
 	close(c)
-	recChunk, err := netStore.Get(ctx, chunk.Address()) // this is blocked until the Put above is done
+	recChunk, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address()) // this is blocked until the Put above is done
 	if err != nil {
 		t.Fatalf("Expected no err got %v", err)
 	}
@@ -150,7 +150,7 @@ func TestNetStoreGetAndPut(t *testing.T) {
 		t.Fatal(err)
 	}
 	// the retrieved chunk should be the same as what we Put
-	if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
+	if !bytes.Equal(recChunk.Address(), ch.Address()) || !bytes.Equal(recChunk.Data(), ch.Data()) {
 		t.Fatalf("Different chunk received than what was put")
 	}
 	// the chunk is already available locally, so there should be no active fetchers waiting for it
@@ -172,26 +172,27 @@ func TestNetStoreGetAndPut(t *testing.T) {
 // After the Put the chunk is available locally, so the Get can just retrieve it from LocalStore,
 // there is no need to create fetchers.
 func TestNetStoreGetAfterPut(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
 	defer cancel()
 
 	// First we Put the chunk, so the chunk will be available locally
-	err := netStore.Put(ctx, chunk)
+	_, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
 	if err != nil {
 		t.Fatalf("Expected no err got %v", err)
 	}
 
 	// Get should retrieve the chunk from LocalStore, without creating fetcher
-	recChunk, err := netStore.Get(ctx, chunk.Address())
+	recChunk, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
 	if err != nil {
 		t.Fatalf("Expected no err got %v", err)
 	}
 	// the retrieved chunk should be the same as what we Put
-	if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
+	if !bytes.Equal(recChunk.Address(), ch.Address()) || !bytes.Equal(recChunk.Data(), ch.Data()) {
 		t.Fatalf("Different chunk received than what was put")
 	}
 	// no fetcher offer or request should be created for a locally available chunk
@@ -207,9 +208,10 @@ func TestNetStoreGetAfterPut(t *testing.T) {
 
 // TestNetStoreGetTimeout tests a Get call for an unavailable chunk and waits for timeout
 func TestNetStoreGetTimeout(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
 	defer cancel()
@@ -221,7 +223,7 @@ func TestNetStoreGetTimeout(t *testing.T) {
 		time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
 
 		// check if netStore created a fetcher in the Get call for the unavailable chunk
-		if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+		if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
 			fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
 			return
 		}
@@ -232,7 +234,7 @@ func TestNetStoreGetTimeout(t *testing.T) {
 	close(c)
 	// We call Get on this chunk, which is not in LocalStore. We don't Put it at all, so there will
 	// be a timeout
-	_, err := netStore.Get(ctx, chunk.Address())
+	_, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
 
 	// Check if the timeout happened
 	if err != context.DeadlineExceeded {
@@ -259,9 +261,10 @@ func TestNetStoreGetTimeout(t *testing.T) {
 // TestNetStoreGetCancel tests a Get call for an unavailable chunk, then cancels the context and checks
 // the errors
 func TestNetStoreGetCancel(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
 
@@ -271,7 +274,7 @@ func TestNetStoreGetCancel(t *testing.T) {
 		<-c                                // wait for the Get to be called
 		time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
 		// check if netStore created a fetcher in the Get call for the unavailable chunk
-		if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+		if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
 			fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
 			return
 		}
@@ -283,7 +286,7 @@ func TestNetStoreGetCancel(t *testing.T) {
 	close(c)
 
 	// We call Get with an unavailable chunk, so it will create a fetcher and wait for delivery
-	_, err := netStore.Get(ctx, chunk.Address())
+	_, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
 
 	if err := <-fetcherErrC; err != nil {
 		t.Fatal(err)
@@ -311,9 +314,10 @@ func TestNetStoreGetCancel(t *testing.T) {
 // delivered with a Put, we have to make sure all Get calls return, and they use a single fetcher
 // for the chunk retrieval
 func TestNetStoreMultipleGetAndPut(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
 	defer cancel()
@@ -327,7 +331,7 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
 			putErrC <- errors.New("Expected netStore to use one fetcher for all Get calls")
 			return
 		}
-		err := netStore.Put(ctx, chunk)
+		_, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
 		if err != nil {
 			putErrC <- fmt.Errorf("Expected no err got %v", err)
 			return
@@ -340,11 +344,11 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
 	errC := make(chan error)
 	for i := 0; i < count; i++ {
 		go func() {
-			recChunk, err := netStore.Get(ctx, chunk.Address())
+			recChunk, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
 			if err != nil {
 				errC <- fmt.Errorf("Expected no err got %v", err)
 			}
-			if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
+			if !bytes.Equal(recChunk.Address(), ch.Address()) || !bytes.Equal(recChunk.Data(), ch.Data()) {
 				errC <- errors.New("Different chunk received than what was put")
 			}
 			errC <- nil
@@ -385,7 +389,8 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
 
 // TestNetStoreFetchFuncTimeout tests a FetchFunc call for an unavailable chunk and waits for timeout
 func TestNetStoreFetchFuncTimeout(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
 	chunk := GenerateRandomChunk(chunk.DefaultSize)
 
@@ -424,21 +429,22 @@ func TestNetStoreFetchFuncTimeout(t *testing.T) {
 
 // TestNetStoreFetchFuncAfterPut tests that the FetchFunc should return nil for a locally available chunk
 func TestNetStoreFetchFuncAfterPut(t *testing.T) {
-	netStore := mustNewNetStore(t)
+	netStore, _, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
 	defer cancel()
 
 	// We deliver the created the chunk with a Put
-	err := netStore.Put(ctx, chunk)
+	_, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
 	if err != nil {
 		t.Fatalf("Expected no err got %v", err)
 	}
 
 	// FetchFunc should return nil, because the chunk is available locally, no need to fetch it
-	wait := netStore.FetchFunc(ctx, chunk.Address())
+	wait := netStore.FetchFunc(ctx, ch.Address())
 	if wait != nil {
 		t.Fatal("Expected wait to be nil")
 	}
@@ -451,16 +457,17 @@ func TestNetStoreFetchFuncAfterPut(t *testing.T) {
 
 // TestNetStoreGetCallsRequest tests if Get created a request on the NetFetcher for an unavailable chunk
 func TestNetStoreGetCallsRequest(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx := context.WithValue(context.Background(), "hopcount", uint8(5))
 	ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
 	defer cancel()
 
 	// We call get for a not available chunk, it will timeout because the chunk is not delivered
-	_, err := netStore.Get(ctx, chunk.Address())
+	_, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
 
 	if err != context.DeadlineExceeded {
 		t.Fatalf("Expected context.DeadlineExceeded err got %v", err)
@@ -479,9 +486,10 @@ func TestNetStoreGetCallsRequest(t *testing.T) {
 // TestNetStoreGetCallsOffer tests if Get created a request on the NetFetcher for an unavailable chunk
 // in case of a source peer provided in the context.
 func TestNetStoreGetCallsOffer(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	//  If a source peer is added to the context, NetStore will handle it as an offer
 	ctx := context.WithValue(context.Background(), "source", sourcePeerID.String())
@@ -489,7 +497,7 @@ func TestNetStoreGetCallsOffer(t *testing.T) {
 	defer cancel()
 
 	// We call get for a not available chunk, it will timeout because the chunk is not delivered
-	_, err := netStore.Get(ctx, chunk.Address())
+	_, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
 
 	if err != context.DeadlineExceeded {
 		t.Fatalf("Expect error %v got %v", context.DeadlineExceeded, err)
@@ -513,8 +521,8 @@ func TestNetStoreGetCallsOffer(t *testing.T) {
 // TestNetStoreFetcherCountPeers tests multiple NetStore.Get calls with peer in the context.
 // There is no Put call, so the Get calls timeout
 func TestNetStoreFetcherCountPeers(t *testing.T) {
-
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
 	addr := randomAddr()
 	peers := []string{randomAddr().Hex(), randomAddr().Hex(), randomAddr().Hex()}
@@ -529,7 +537,7 @@ func TestNetStoreFetcherCountPeers(t *testing.T) {
 		peer := peers[i]
 		go func() {
 			ctx := context.WithValue(ctx, "peer", peer)
-			_, err := netStore.Get(ctx, addr)
+			_, err := netStore.Get(ctx, chunk.ModeGetRequest, addr)
 			errC <- err
 		}()
 	}
@@ -565,21 +573,22 @@ func TestNetStoreFetcherCountPeers(t *testing.T) {
 // and checks there is still exactly one fetcher for one chunk. Afthe chunk is delivered, it checks
 // if the fetcher is closed.
 func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
-	chunk := GenerateRandomChunk(chunk.DefaultSize)
+	ch := GenerateRandomChunk(chunk.DefaultSize)
 
 	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
 	defer cancel()
 
 	// FetchFunc should return a non-nil wait function, because the chunk is not available
-	wait := netStore.FetchFunc(ctx, chunk.Address())
+	wait := netStore.FetchFunc(ctx, ch.Address())
 	if wait == nil {
 		t.Fatal("Expected wait function to be not nil")
 	}
 
 	// There should be exactly one fetcher for the chunk
-	if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+	if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
 		t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
 	}
 
@@ -596,12 +605,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
 	time.Sleep(100 * time.Millisecond)
 
 	// there should be still only one fetcher, because all wait calls are for the same chunk
-	if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+	if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
 		t.Fatal("Expected netStore to have one fetcher for the requested chunk")
 	}
 
 	// Deliver the chunk with a Put
-	err := netStore.Put(ctx, chunk)
+	_, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
 	if err != nil {
 		t.Fatalf("Expected no err got %v", err)
 	}
@@ -630,7 +639,8 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
 // TestNetStoreFetcherLifeCycleWithTimeout is similar to TestNetStoreFetchFuncCalledMultipleTimes,
 // the only difference is that we don't deilver the chunk, just wait for timeout
 func TestNetStoreFetcherLifeCycleWithTimeout(t *testing.T) {
-	netStore, fetcher := mustNewNetStoreWithFetcher(t)
+	netStore, fetcher, cleanup := newTestNetStore(t)
+	defer cleanup()
 
 	chunk := GenerateRandomChunk(chunk.DefaultSize)
 
diff --git a/swarm/storage/schema.go b/swarm/storage/schema.go
deleted file mode 100644
index 91847ca0f..000000000
--- a/swarm/storage/schema.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package storage
-
-// The DB schema we want to use. The actual/current DB schema might differ
-// until migrations are run.
-const CurrentDbSchema = DbSchemaHalloween
-
-// There was a time when we had no schema at all.
-const DbSchemaNone = ""
-
-// "purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5
-const DbSchemaPurity = "purity"
-
-// "halloween" is here because we had a screw in the garbage collector index.
-// Because of that we had to rebuild the GC index to get rid of erroneous
-// entries and that takes a long time. This schema is used for bookkeeping,
-// so rebuild index will run just once.
-const DbSchemaHalloween = "halloween"
diff --git a/swarm/storage/types.go b/swarm/storage/types.go
index 2f39685b4..d1d47dbe8 100644
--- a/swarm/storage/types.go
+++ b/swarm/storage/types.go
@@ -178,9 +178,7 @@ func (c ChunkData) Size() uint64 {
 	return binary.LittleEndian.Uint64(c[:8])
 }
 
-type ChunkValidator interface {
-	Validate(chunk Chunk) bool
-}
+type ChunkValidator = chunk.Validator
 
 // Provides method for validation of content address in chunks
 // Holds the corresponding hasher to create the address
@@ -211,20 +209,7 @@ func (v *ContentAddressValidator) Validate(ch Chunk) bool {
 	return bytes.Equal(hash, ch.Address())
 }
 
-type ChunkStore interface {
-	Put(ctx context.Context, ch Chunk) (err error)
-	Get(rctx context.Context, ref Address) (ch Chunk, err error)
-	Has(rctx context.Context, ref Address) bool
-	Close()
-}
-
-// SyncChunkStore is a ChunkStore which supports syncing
-type SyncChunkStore interface {
-	ChunkStore
-	BinIndex(po uint8) uint64
-	Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error
-	FetchFunc(ctx context.Context, ref Address) func(context.Context) error
-}
+type ChunkStore = chunk.Store
 
 // FakeChunkStore doesn't store anything, just implements the ChunkStore interface
 // It can be used to inject into a hasherStore if you don't want to actually store data just do the
@@ -233,20 +218,33 @@ type FakeChunkStore struct {
 }
 
 // Put doesn't store anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Put(_ context.Context, ch Chunk) error {
-	return nil
+func (f *FakeChunkStore) Put(_ context.Context, _ chunk.ModePut, ch Chunk) (bool, error) {
+	return false, nil
 }
 
 // Has doesn't do anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Has(_ context.Context, ref Address) bool {
-	panic("FakeChunkStore doesn't support HasChunk")
+func (f *FakeChunkStore) Has(_ context.Context, ref Address) (bool, error) {
+	panic("FakeChunkStore doesn't support Has")
 }
 
 // Get doesn't store anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
+func (f *FakeChunkStore) Get(_ context.Context, _ chunk.ModeGet, ref Address) (Chunk, error) {
 	panic("FakeChunkStore doesn't support Get")
 }
 
+func (f *FakeChunkStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+	panic("FakeChunkStore doesn't support Set")
+}
+
+func (f *FakeChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
+	panic("FakeChunkStore doesn't support LastPullSubscriptionBinID")
+}
+
+func (f *FakeChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+	panic("FakeChunkStore doesn't support SubscribePull")
+}
+
 // Close doesn't store anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Close() {
+func (f *FakeChunkStore) Close() error {
+	return nil
 }
diff --git a/swarm/swarm.go b/swarm/swarm.go
index 61813e23f..7f5ee8361 100644
--- a/swarm/swarm.go
+++ b/swarm/swarm.go
@@ -20,6 +20,7 @@ import (
 	"bytes"
 	"context"
 	"crypto/ecdsa"
+	"errors"
 	"fmt"
 	"io"
 	"math/big"
@@ -29,6 +30,11 @@ import (
 	"time"
 	"unicode"
 
+	"github.com/ethereum/go-ethereum/swarm/chunk"
+
+	"github.com/ethereum/go-ethereum/swarm/storage/feed"
+	"github.com/ethereum/go-ethereum/swarm/storage/localstore"
+
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/contracts/chequebook"
@@ -48,7 +54,6 @@ import (
 	"github.com/ethereum/go-ethereum/swarm/pss"
 	"github.com/ethereum/go-ethereum/swarm/state"
 	"github.com/ethereum/go-ethereum/swarm/storage"
-	"github.com/ethereum/go-ethereum/swarm/storage/feed"
 	"github.com/ethereum/go-ethereum/swarm/storage/mock"
 	"github.com/ethereum/go-ethereum/swarm/swap"
 	"github.com/ethereum/go-ethereum/swarm/tracing"
@@ -143,11 +148,31 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
 		resolver = api.NewMultiResolver(opts...)
 		self.dns = resolver
 	}
+	// check that we are not in the old database schema
+	// if so - fail and exit
+	isLegacy := localstore.IsLegacyDatabase(config.ChunkDbPath)
+
+	if isLegacy {
+		return nil, errors.New("Legacy database format detected! Please read the migration announcement at: https://github.com/ethersphere/go-ethereum/wiki/Swarm-v0.4-local-store-migration")
+	}
+
+	var feedsHandler *feed.Handler
+	fhParams := &feed.HandlerParams{}
+
+	feedsHandler = feed.NewHandler(fhParams)
 
-	lstore, err := storage.NewLocalStore(config.LocalStoreParams, mockStore)
+	localStore, err := localstore.New(config.ChunkDbPath, config.BaseKey, &localstore.Options{
+		MockStore: mockStore,
+		Capacity:  config.DbCapacity,
+	})
 	if err != nil {
 		return nil, err
 	}
+	lstore := chunk.NewValidatorStore(
+		localStore,
+		storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)),
+		feedsHandler,
+	)
 
 	self.netStore, err = storage.NewNetStore(lstore, nil)
 	if err != nil {
@@ -161,6 +186,8 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
 	delivery := stream.NewDelivery(to, self.netStore)
 	self.netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, config.DeliverySkipCheck).New
 
+	feedsHandler.SetStore(self.netStore)
+
 	if config.SwapEnabled {
 		balancesStore, err := state.NewDBStore(filepath.Join(config.Path, "balances.db"))
 		if err != nil {
@@ -194,22 +221,6 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
 	// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
 	self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams)
 
-	var feedsHandler *feed.Handler
-	fhParams := &feed.HandlerParams{}
-
-	feedsHandler = feed.NewHandler(fhParams)
-	feedsHandler.SetStore(self.netStore)
-
-	lstore.Validators = []storage.ChunkValidator{
-		storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)),
-		feedsHandler,
-	}
-
-	err = lstore.Migrate()
-	if err != nil {
-		return nil, err
-	}
-
 	log.Debug("Setup local storage")
 
 	self.bzz = network.NewBzz(bzzconfig, to, self.stateStore, self.streamer.GetSpec(), self.streamer.Run)
-- 
GitLab